diff --git "a/3149.jsonl" "b/3149.jsonl" new file mode 100644--- /dev/null +++ "b/3149.jsonl" @@ -0,0 +1,81 @@ +{"seq_id":"26435031030","text":"'''\ndelete_tweets.py\n\nHelps deletes all your tweets 1000 at a time.\n\nNOTE: WARNING THIS WILL DELETE APPROXIMATELY 750 TWEETS OF THE AUTHORISED\nACCOUNT THAT IS ACCESSING THE API. THIS CANNOT BE UNDONE.\n\nNOTE: Requires app authentication from https://apps.twitter.com/ with read -\nwrite privileges. You set your authentication tokens as enviroment varibles\nthat the app provides.\n\nEnviroment varibles need to be set. this can be done by creating a .env file\nas follows:\n\n TWITTER_CONSUMER_KEY=your_consumer_key\n TWITTER_CONSUMER_SECRET=your_consumer_secret\n TWITTER_ACCESS_TOKEN=your_access_Token\n TWITTER_ACCESS_TOKEN_SECRET=your_access_Token_secret\n\nYou can then set the env varibles with:\n\n $ for line in $(cat .env); do export $line; done\n\nAlternatively just set your env varibles directly. Search 'EDIT_HERE' to see\nwhere to do this.\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport argparse\n\nimport tweepy\n\n# python 2/3 cover.\ntry:\n input = raw_input\nexcept:\n pass\n\n### EDIT_HERE\n# You can just replace all of this by just setting your env varibles directly.\nconsumer_key = os.environ[\"TWITTER_CONSUMER_KEY\"]\nconsumer_secret = os.environ[\"TWITTER_CONSUMER_SECRET\"]\naccess_token = os.environ[\"TWITTER_ACCESS_TOKEN\"]\naccess_token_secret = os.environ[\"TWITTER_ACCESS_TOKEN_SECRET\"]\n\n\ndef main():\n # init api.\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n\n # get user account info via the api.\n args = parse_args()\n me = api.me()\n total = 750\n message_args = (total, me.statuses_count, me.name, me.screen_name, me.id_str)\n\n # Check if user actually wants to do this.\n if input(message.format(*message_args)) != \"y\":\n print(\"Action Aborted\")\n return\n\n kwargs = {'count': total, 'max_id': args.max_id, 'since_id': args.max_id}\n # DANGER: WARNING THIS WILL DELETE 750 TWEETS OF THE AUTHORISED ACCOUNT THAT\n # IS ACCESSING THE API. THIS CANNOT BE UNDONE.\n for tweet in api.user_timeline(me.id, **kwargs):\n if args.verbose:\n print(\"id: {}\\ntext: {}\".format(tweet.id, tweet.text))\n if input(\"Skip tweet (y/n): \") == \"y\":\n continue\n api.destroy_status(tweet.id)\n print(\"Deleted tweet: {}. From: {}\".format(tweet.id, tweet.created_at))\n\n\ndef parse_args():\n \"Parse args and return formatted args.\"\n help_link = \"https://goo.gl/89ikje\"\n parser = argparse.ArgumentParser(description=\"Delete unwanted tweets\")\n parser.add_argument('-v', '--verbose',\n type=bool, help='check each Tweet before deleting', default=True)\n parser.add_argument('-m', '--max_id',\n type=int, help='Newest tweet by incremental id. Info: %s' % help_link, default=None)\n parser.add_argument('-s', '--since_id',\n type=int, help='Oldest tweet by incremental id. Info: %s' % help_link, default=None)\n return parser.parse_args()\n\nmessage = '''WARNING:\n\nTHIS TRY WILL DELETE {}/{} TWEETS OF THE AUTHORISED ACCOUNT:\n name: {}\n screen name: {}\n id: {}\n\nTHIS CANNOT BE UNDONE. Are you sure want to continue? (y/n): '''\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"winstonjay/digitalcitizens","sub_path":"api_tools/twitter_py/delete_tweets.py","file_name":"delete_tweets.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"14997758884","text":"#You are given a binary tree:\n# Your task is to return the list with elements from tree sorted by levels,\n# which means the root element goes first, then root children\n# (from left to right) are second and third, and so on.\n# Return empty array if root is nil.\ndef tree_by_levels(node):\n p, q = [], [node]\n while q:\n v = q.pop(0)\n if v is not None:\n p.append(v.value)\n q += [v.left,v.right]\n return p if not node is None else []\n","repo_name":"DelStez/code-for-codewars","sub_path":"4kyu/Sort.py","file_name":"Sort.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"72929689937","text":"from django.urls import path, re_path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('album/', views.album_main, name='album_main'),\n path('album/', views.album, name='album'),\n path('artist/', views.artist, name='artist'),\n path('song/', views.song, name='song'),\n path('user/', views.UserDetailView.as_view(), name='user'),\n path('login/', views.login, name='login'),\n path('user/', views.user, name='user'),\n path('user/edit_trackspotter', views.edit_trackspotter.as_view(), name='edit_trackspotter'),\n path('user/edit_critic', views.edit_critic.as_view(), name='edit_critic'),\n\tre_path(r'^search/$', views.search, name='search'),\n]\n\nurlpatterns += [\n\tpath('album/create/', views.AlbumCreate.as_view(), name='album_create'),\n\tpath('album//update/', views.AlbumUpdate.as_view(), name='album_update'),\n\tpath('album//delete/', views.AlbumDelete.as_view(), name='album_delete'),\n\tpath('song/create/', views.SongCreate.as_view(), name='song_create'),\n\tpath('song//update/', views.SongUpdate.as_view(), name='song_update'),\n\tpath('song//delete/', views.SongDelete.as_view(), name='song_delete'),\n\tpath('artist/create/', views.ArtistCreate.as_view(), name='artist_create'),\n\tpath('artist//update/', views.ArtistUpdate.as_view(), name='artist_update'),\n\tpath('artist//delete/', views.ArtistDelete.as_view(), name='artist_delete'),\n\tpath('album//reviewcreate/', views.create_album_review, name = 'album_review_create'),\n\tpath('song//reviewcreate/', views.create_song_review, name = 'review_form'),\n]\n\n","repo_name":"jswny/trackspot","sub_path":"trackspot/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"1169114387","text":"import asyncio\nimport errno\nimport fcntl\nimport os\nimport shutil\nfrom contextlib import asynccontextmanager\nfrom pathlib import Path\nfrom types import TracebackType\nfrom typing import AsyncGenerator, Optional, Type, Union\n\nfrom rich.console import Console\nfrom rich.live import Live\nfrom rich.spinner import Spinner as RichSpinner\n\nfrom greenbone.feed.sync.errors import FileLockingError, GreenboneFeedSyncError\n\nDEFAULT_FLOCK_WAIT_INTERVAL = 5 # in seconds\n\n\ndef is_root() -> bool:\n \"\"\"\n Checks if the current user is root\n \"\"\"\n return os.geteuid() == 0\n\n\n@asynccontextmanager\nasync def flock_wait(\n path: Union[str, Path],\n *,\n console: Optional[Console] = None,\n wait_interval: Optional[int] = DEFAULT_FLOCK_WAIT_INTERVAL,\n) -> AsyncGenerator[None, None]:\n \"\"\"\n Try to lock a file and wait if it is already locked\n\n Arguments:\n path: File to lock\n console: A console to print messages to or None to keep the function\n quiet.\n wait_interval: Time to wait in seconds after failed lock attempt before\n re-trying to lock the file. Set to None to raise a FileLockingError\n instead of re-trying to acquire the lock. Default is 5 seconds.\n \"\"\"\n # ensure path is a Path\n path = Path(path)\n\n # ensure parent directories exist\n try:\n path.parent.mkdir(parents=True, exist_ok=True, mode=0o770)\n except OSError as e:\n raise FileLockingError(\n f\"Could not create parent directories for {path}\"\n ) from e\n\n with path.open(\"w\", encoding=\"utf8\") as fd0:\n has_lock = False\n while not has_lock:\n try:\n if console:\n console.print(\n f\"Trying to acquire lock on {path.absolute()}\"\n )\n\n fcntl.flock(fd0, fcntl.LOCK_EX | fcntl.LOCK_NB)\n\n if console:\n console.print(f\"Acquired lock on {path.absolute()}\")\n\n has_lock = True\n path.chmod(mode=0o660)\n except OSError as e:\n if e.errno in (errno.EAGAIN, errno.EACCES):\n if wait_interval is None:\n raise FileLockingError(\n f\"{path.absolute()} is locked. Another process \"\n \"related to the feed update may already running.\"\n ) from None\n\n if console:\n console.print(\n f\"{path.absolute()} is locked by another process. \"\n f\"Waiting {wait_interval} seconds before next try.\"\n )\n await asyncio.sleep(wait_interval)\n else:\n raise\n\n try:\n yield\n finally:\n try:\n # free the lock\n if console:\n console.print(f\"Releasing lock on {path.absolute()}\")\n\n fcntl.flock(fd0, fcntl.LOCK_UN)\n except OSError:\n pass\n\n\nclass Spinner:\n def __init__(self, console: Console, status: str) -> None:\n self._spinner = RichSpinner(\n \"dots\", text=status, style=\"status.spinner\", speed=1.0\n )\n self._live = Live(\n self._spinner,\n console=console,\n refresh_per_second=12.5,\n transient=False,\n )\n\n def __enter__(self) -> \"Spinner\":\n self._live.start()\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n self._live.stop()\n\n\ndef change_user_and_group(\n user: Union[str, int], group: Union[str, int]\n) -> None:\n \"\"\"\n Change effective user or group of the current running process\n\n Args:\n user: User name or ID\n group: Group name or ID\n \"\"\"\n if isinstance(user, str):\n user_id = shutil._get_uid( # type: ignore[attr-defined] # pylint: disable=protected-access # noqa: E501\n user\n )\n if user_id is None:\n raise GreenboneFeedSyncError(\n f\"Can't run as user '{user}'. User '{user}' is unknown.\"\n )\n user = user_id\n if isinstance(group, str):\n group_id = shutil._get_gid(group) # type: ignore[attr-defined] # pylint: disable=protected-access # noqa: E501\n if group_id is None:\n raise GreenboneFeedSyncError(\n f\"Can't run as group '{group}'. Group '{group}' is unknown.\"\n )\n group = group_id\n\n os.setegid(group) # type: ignore[arg-type]\n os.seteuid(user) # type: ignore[arg-type]\n","repo_name":"greenbone/greenbone-feed-sync","sub_path":"greenbone/feed/sync/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"13"} +{"seq_id":"16893154300","text":"#!interpreter [optional-arg]\r\n# -*- coding: utf-8 -*-\r\n#\r\n\"\"\"\r\nroutes.py: All Routes \r\n\r\n\"\"\"\r\n\r\n#Built-in/Generic\r\nimport datetime\r\n\r\n#Libs\r\nfrom flask import Flask, g, redirect, render_template, request, url_for, session, flash\r\n\r\nfrom flask_sqlalchemy import SQLAlchemy\r\n\r\nfrom sqlalchemy import (\r\n\t\tTable, Column, Integer, String, MetaData, ForeignKey, Boolean\r\n\t)\r\nfrom sqlalchemy import or_\r\n\r\n#Modules\r\nfrom flask_app import db, app\r\n\r\nfrom models import User, Msg\r\n\r\nimport msg_routes, user_routes\r\n\r\nfrom functools import wraps\r\n\r\n#for some reason I need to also import all from each of these. \r\n#especially list_routes for find all from list\r\nfrom msg_routes import *\r\nfrom user_routes import *\r\n\r\n#needs to be above all functions that use it???\r\ndef admin_login_required(f):\r\n\r\n\t@wraps(f)\r\n\tdef decorated_function():\r\n\t\tif g.user is None or \"user_id\" not in session:\r\n\t\t\treturn redirect(url_for(\"dashboard_forbidden\"))\r\n\t\telif user_read(session['user_id']).type != \"admin\":\r\n\t\t\treturn redirect(url_for(\"dashboard_forbidden\"))\r\n\t\treturn f()\r\n\treturn decorated_function\r\n\t\r\n\t\r\n@app.route(\"/get_msgs_and_current_msg\")\r\ndef get_msgs_and_current_msg():\r\n\tuser = user_read(session['user_id'])\r\n\t\r\n\t\r\n\tmsgs = Msg.query.all()\r\n\t#current_msg = find_current_msg(msgs)\r\n\treturn msgs, None#, current_msg\r\n\t\r\n@app.route(\"/\")\r\ndef index():\r\n\tnum_users = db.session.query(User).count()\r\n\tnum_msgs = db.session.query(Msg).count()\r\n\tif 'user_id' not in session or not user_read(session['user_id']):\r\n\t\treturn render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs)\r\n\telse:\r\n\t\tmsgs, current_msg = get_msgs_and_current_msg()\r\n\t\treturn render_template('auth/index.html', msgs=msgs, current_msg=current_msg, \r\n\t\tnum_users=num_users, num_msgs=num_msgs)\r\n\r\n#Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\r\n\r\n@app.route(\"/dashboard_show_msgs\")\r\n@admin_login_required\r\ndef dashboard_show_msgs():\r\n\t#get all msgs for the user\r\n\t\r\n\tmsgs, current_msg = get_msgs_and_current_msg()\r\n\treturn render_template('list/dashboard_msgs.html', msgs=msgs)\r\n\t\r\n\r\n@app.route(\"/dashboard_show_live_msgs\")\r\n@admin_login_required\r\ndef dashboard_show_live_msgs():\r\n\t#get all msgs for the user\r\n\t\r\n\tmsgs = db.session.query(Msg).filter(Msg.type==\"live\")\r\n\treturn render_template('list/dashboard_msgs.html', msgs=msgs)\r\n\t\r\n@app.route(\"/dashboard_show_waiting_msgs\")\r\n@admin_login_required\r\ndef dashboard_show_waiting_msgs():\r\n\t#get all msgs for the user\r\n\t\r\n\tmsgs = db.session.query(Msg).filter(Msg.type==\"waiting\")\r\n\treturn render_template('list/dashboard_msgs.html', msgs=msgs)\r\n\t\r\n@app.route(\"/dashboard_show_reported_msgs\")\r\n@admin_login_required\r\ndef dashboard_show_reported_msgs():\r\n\t#get all msgs for the user\r\n\t\r\n\tmsgs = db.session.query(Msg).filter(Msg.type==\"reported\")\r\n\treturn render_template('list/dashboard_msgs.html', msgs=msgs)\r\n\t\r\n\t\r\n#Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\r\n\r\n\r\n@app.route(\"/dashboard_show_all_users\")\r\n@admin_login_required\r\ndef dashboard_show_all_users():\r\n\t\r\n\tusers = User.query.all()\r\n\ttitle = \"All Users\"\r\n\treturn render_template('list/dashboard_users.html', users=users, title=title)\r\n\t\r\n@app.route(\"/dashboard_show_online_users\")\r\n@admin_login_required\r\ndef dashboard_show_online_users():\r\n\t\r\n\tusers = db.session.query(User).filter(User.online==True)\r\n\ttitle = \"Online Users\"\r\n\treturn render_template('list/dashboard_users.html', users=users, title=title)\r\n\t\r\n@app.route(\"/dashboard_show_admin_users\")\r\n@admin_login_required\r\ndef dashboard_show_admin_users():\r\n\t\r\n\tusers = db.session.query(User).filter(User.type==\"admin\")\r\n\ttitle = \"Admin Users\"\r\n\treturn render_template('list/dashboard_users.html', users=users, title=title)\r\n\t\r\n@app.route(\"/dashboard_show_premium_users\")\r\n@admin_login_required\r\ndef dashboard_show_premium_users():\r\n\t\r\n\tusers = db.session.query(User).filter(User.type==\"premium\")\r\n\ttitle = \"Premium Users\"\r\n\treturn render_template('list/dashboard_users.html', users=users, title=title)\r\n\r\n@app.route(\"/dashboard_show_reported_users\")\r\n@admin_login_required\r\ndef dashboard_show_reported_users():\r\n\t\r\n\tusers = db.session.query(User).filter(User.type==\"reported\")\r\n\ttitle = \"Reported Users\"\r\n\treturn render_template('list/dashboard_users.html', users=users, title=title)\r\n\r\n@app.route(\"/dashboard_show_banned_users\")\r\n@admin_login_required\r\ndef dashboard_show_banned_users():\r\n\t\r\n\tusers = db.session.query(User).filter(User.type==\"banned\")\r\n\ttitle = \"Banned Users\"\r\n\treturn render_template('list/dashboard_users.html', users=users, title=title)\r\n\t\r\n\r\n# Main ########################################################################################\r\n\r\n\r\n\t\r\n@app.route(\"/dashboard_forbidden\")\r\ndef dashboard_forbidden():\r\n\t\r\n\treturn render_template('list/forbidden.html')\r\n\t\r\n@app.route(\"/dashboard_error_404\")\r\ndef dashboard_error_404():\r\n\t\r\n\treturn render_template('list/error_404.html')\r\n\r\n@app.route(\"/dashboard\")\r\n@admin_login_required\r\ndef dashboard():\r\n\t\r\n\t#get all msgs for the user\r\n\tmsgs, current_msg = get_msgs_and_current_msg()\r\n\t\r\n\t#if not current_msg:\r\n\t#-->\r\n\treturn render_template('list/dashboard_msgs.html', msgs=msgs)\r\n\t\r\n\t#current, completed, deleted = [], [], []\r\n\t#tasks = Task.query.filter_by(parent_list=current_msg.id)\r\n\t#tasks = sorted(list(tasks), key=lambda x:(\r\n\t#-x.important,\r\n\t#x.state==\"deleted\",\r\n\t#x.state==\"current\",\r\n\t#x.state==\"completed\",\r\n\t#x.id))\r\n\t\r\n\t#for task in tasks:\r\n\t#\tif task.state == \"current\":\r\n\t#\t\tcurrent.append(task)\r\n\t#\telif task.state == \"completed\":\r\n\t#\t\tcompleted.append(task)\r\n\t#\telse:\r\n\t#\t\tdeleted.append(task)\r\n\t\t\t\r\n\t#current = sorted(current, key=lambda x:(-x.important, x.sort_value))\r\n\t\r\n\t#return render_template('list/dashboard_filter_all.html', \r\n\t#msgs=msgs, \r\n\t#current=current, \r\n\t#filter=\"All\")\r\n\t\r\n#find the list with current=True\r\n#@app.route(\"/find_current_msg\")\r\n#def find_current_msg(msgs):#\r\n#\tfor msg in msgs:\r\n#\t\tif msg.current == True:\r\n#\t\t\treturn msg\r\n#\treturn None","repo_name":"Yusef28/flattery","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"21829831642","text":"# %%\n#######################################\ndef pilresize_imageobject_keep_aspect_ratio(image_object: PIL.Image.Image, width=None, height=None):\n \"\"\"Takes a given PIL Image object and will resize the image while keeping the aspect ration. Either the \"width\" parameter or \"height\" parameter must receive an integer value for the desired 'anchor' property to be used when doing the resize (the function will automatically calculate the the other length, so only use one of the two, either \"width\" or \"height\"; do not specify both).\n\n Examples:\n >>> ##### EXAMPLE 1 #####\n >>> import PIL\\n\n >>> from PIL import Image\\n\n >>> pic6 = Image.open('6.jpg')\\n\n >>> pic6.size\\n\n (600, 800)\n\n >>> resize_pic6_width200 = pilresize_imageobject_keep_aspect_ratio(pic6, width=200)\\n\n >>> resize_pic6_width200.size\\n\n (200, 266)\n\n >>> ##### EXAMPLE 2 #####\n >>> resize_pic6_height200 = pilresize_imageobject_keep_aspect_ratio(pic6, height=200)\\n\n >>> resize_pic6_height200.size\\n\n (150, 200)\n \n >>> ##### EXAMPLE 3 #####\n >>> pilresize_imageobject_keep_aspect_ratio(pic6, 200, 200)\\n\n\n Either reference the \"width\" or the \"height\" parameter to be used as the 'anchor' for the resizing operation. Do not supply arguments to both.\\n\n\n >>> ##### EXAMPLE 4 #####\n >>> pilresize_imageobject( pic6, (200, 200) )\\n\n \\n\n\n\n Args:\n image_object (PIL.Image.Image): Reference an existig PIL Image object\n width (int, optional): Reference the desired width for the resize. Defaults to None.\n height (int, optional): Reference the desired height for the resize. Defaults to None.\n \"\"\"\n from PIL import Image\n# \n orig_width, orig_height = image_object.size\n#\n if width and height:\n print('\\nEither reference the \"width\" or the \"height\" parameter to be used as the \\'anchor\\' for the resizing operation. Do not supply arguments to both.\\n')\n return\n elif width:\n ratio_change = width / orig_width\n new_width = int( orig_width * ratio_change )\n new_height = int( orig_height * ratio_change )\n new_image = image_object.resize( (new_width, new_height ) , Image.ANTIALIAS)\n elif height:\n ratio_change = height / orig_height\n new_width = int( orig_width * ratio_change )\n new_height = int( orig_height * ratio_change )\n new_image = image_object.resize( (new_width, new_height ) , Image.ANTIALIAS)\n return new_image\n\n","repo_name":"SecTraversl/python_pil-image-analysis_Tools","sub_path":"pilresize_imageobject_keep_aspect_ratio.py","file_name":"pilresize_imageobject_keep_aspect_ratio.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"35382159145","text":"nums = [0 for _ in range(10001)]\n\ndef make_dr_num(num):\n dr_num = num\n for num_p in str(num):\n dr_num += int(num_p)\n return dr_num\n\n\nfor num in range(1, 10001):\n if nums[num] == 0:\n print(num)\n while num <= 10000:\n if nums[num] == 1:\n break\n else:\n nums[num] = 1\n num = make_dr_num(num)","repo_name":"GitJaehyeonLee/Algorithm_Study_With_Python","sub_path":"002. Basic Code/002. 백준 STEP/005. 함수 (3 Clear)/002. 셀프 넘버.py","file_name":"002. 셀프 넘버.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"25529168068","text":"# 10-Escribe un programa que pida al usuario una cadena de texto y luego imprima\r\n# la misma cadena pero con todas las vocales en mayúscula.\r\n\r\ntexto = input('Ingrese un texto: ')\r\ntexto_nuevo = ''\r\nfor letra in texto:\r\n if letra== \"a\"or letra==\"e\"or letra==\"i\"or letra==\"o\"or letra==\"u\":\r\n texto_nuevo += letra.upper()\r\n else:\r\n texto_nuevo += letra\r\nprint(texto_nuevo)\r\n\r\n","repo_name":"LangBraian/Informatorio2023","sub_path":"Semana 3/Ejercicios/Ejercicio10.py","file_name":"Ejercicio10.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"73910026579","text":"import os\n\n\nclass rt_sw2sw:\n def __init__(self, filePath:str) -> None:\n self.slot_num = 0 # 时间片的个数\n self.rt_sw2sw_slot = dict() # 所有时间片的数据\n # self.rt_sw2sw_slot0 = rt_db2db.load_db2db(filePath + \"/s2s_0\")\n self.rt_sw2sw_diff = dict() # 所有时间片的数据\n self.start(filePath)\n\n def start(self, filePath:str):\n # 读取分布式数据库之间的默认路由信息\n self.slot_num = len(os.listdir(filePath)) # 获取时间片个数\n for index in range(self.slot_num):\n self.rt_sw2sw_slot[index] = rt_sw2sw.load_sw2sw(filePath + \"/s2s_\" + str(index))\n for index in range(self.slot_num):\n self.rt_sw2sw_diff[index] = rt_sw2sw.diff_sw2sw(self.rt_sw2sw_slot[index], \\\n self.rt_sw2sw_slot[(index+1)%self.slot_num])\n\n @staticmethod\n def load_sw2sw(filename:str):\n # 从文件当中加载一个时间片的默认路由信息\n data = dict()\n with open(file=filename) as file:\n lines = file.read().splitlines()\n for line in lines:\n line_list = line.strip(' ').split(' ')\n sw1 = int(line_list[0])\n if len(line_list)==1:\n data[sw1] = dict()\n continue\n sw2 = int(line_list[1])\n if sw2 not in data[sw1]:\n data[sw1][sw2] = list()\n # flow = int(line_list[2])\n # sw = int(flow/1000)\n # port = flow - sw*1000\n # data[sw1][sw2].append((sw, port+1000))\n for i in range(len(line_list)-2):\n flow = int(line_list[i+2])\n sw = int(flow/1000)\n port = flow - sw*1000\n data[sw1][sw2].append((sw, port+1000))\n\n return data\n \n @staticmethod\n def diff_sw2sw(dslot_b:dict, dslot_n:dict):\n # 找出两个时间片的路由的增删\n data = dict()\n for sw in dslot_b:\n data[sw] = list()\n for adj_sw in dslot_b[sw]:\n for rt in dslot_b[sw][adj_sw]:\n if rt not in dslot_n[sw][adj_sw]:\n data[sw].append((-1, adj_sw, rt[0], rt[1]))\n for adj_sw in dslot_n[sw]:\n for rt in dslot_n[sw][adj_sw]:\n if rt not in dslot_b[sw][adj_sw]:\n data[sw].append((1, adj_sw, rt[0], rt[1]))\n return data\n\n","repo_name":"AkyuC/topo_ovs","sub_path":"route_default/rt_sw2sw.py","file_name":"rt_sw2sw.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"21411049758","text":"class WordDictionary:\n \"\"\"Solution 1\n [MEMO+1] Use dict to implement trie, use \"$\" to indicate is end. Memorize the way to how add \"$\" and check \"$\"\n My original passed code had similar approach (with customized data structure instead of just using dict, and more code...)\n Solution is not faster than own code\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.trie = {}\n\n def addWord(self, word: str) -> None:\n \"\"\"\n Adds a word into the data structure.\n \"\"\"\n node = self.trie\n\n for ch in word:\n if not ch in node:\n node[ch] = {}\n node = node[ch]\n node[\"$\"] = True\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the data structure. A word could contain the dot character '.' to represent any letter.\n \"\"\"\n\n def search_in_node(word, node) -> bool:\n for i, ch in enumerate(word):\n if not ch in node:\n # if the current character is '.'\n # check all possible nodes at this level\n if ch == \".\":\n for x in node:\n if x != \"$\" and search_in_node(word[i + 1 :], node[x]):\n return True\n # if no nodes lead to answer\n # or the current character != '.'\n return False\n # if the character is found\n # go down to the next level in trie\n else:\n node = node[ch]\n return \"$\" in node\n\n return search_in_node(word, self.trie)","repo_name":"stevenjst0121/leetcode","sub_path":"211_word_dictionary_trie.py","file_name":"211_word_dictionary_trie.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"33175563007","text":"from glob import glob\nfrom PIL import Image\nfrom config import * # my dropbox API key and Push bullet API key\nimport subprocess\n\ndef the_cropper():\n # Setting the points for cropped image \n left = 0\n upper = 0\n right = 3200\n lower = 1800\n print(\".........................................................\")\n print(f\"Croppping files to new res: {right} by {lower}\")\n file_list = glob(\"/home/pi/sunrise300/images/*.JPG\")\n for names in file_list:\n im = Image.open(names)\n im = im.crop((left, upper, right, lower))\n im.save(names)\n\nif __name__ == \"__main__\":\n the_cropper()","repo_name":"llewmihs/sunrise300","sub_path":"cropper4000.py","file_name":"cropper4000.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"22517223386","text":"import numpy as np\nimport math\n\nENV_NAME = \"CartPole\" #Название задачи\nFILE_NAME_LOG = \"log.csv\" #Файл с логами\n\nclass CartPole:\n \"\"\"\n Description:\n A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum starts upright, and the goal is to prevent it from falling over by increasing and reducing the cart's velocity.\n\n Source:\n This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson\n\n Observation:\n Type: Box(4)\n Num\tObservation Min Max\n 0\tCart Position -4.8 4.8\n 1\tCart Velocity -Inf Inf\n 2\tPole Angle -24 deg 24 deg\n 3\tPole Velocity At Tip -Inf Inf\n\n Actions:\n Type: Discrete(2)\n Num\tAction\n 0\tPush cart to the left\n 1\tPush cart to the right\n\n Note: The amount the velocity that is reduced or increased is not fixed; it depends on the angle the pole is pointing. This is because the center of gravity of the pole increases the amount of energy needed to move the cart underneath it\n\n Reward:\n Reward is 1 for every step taken, including the termination step\n\n Starting State:\n All observations are assigned a uniform random value in [-0.05..0.05]\n\n Episode Termination:\n Pole Angle is more than 12 degrees\n Cart Position is more than 2.4 (center of the cart reaches the edge of the display)\n Episode length is greater than 200\n Solved Requirements\n Considered solved when the average reward is greater than or equal to 195.0 over 100 consecutive trials.\n \"\"\"\n\n def __init__(self, balance_start = 0):\n self.action_space = 2 #Количество выходов\n self.timesteps = 0 #Окно с данными для LSTM\n self.observation_space = 4 #Количество входов\n self.balance_start = balance_start #Стартовый баланс\n self.balance = self.balance_start #Баланс\n self.state = None #Текущее состояние\n self.log = [] #Логи по задаче\n\n self.gravity = 9.8\n self.masscart = 1.0\n self.masspole = 0.1\n self.total_mass = (self.masspole + self.masscart)\n self.length = 0.5 # actually half the pole's length\n self.polemass_length = (self.masspole * self.length)\n self.force_mag = 10.0\n self.tau = 0.02 # seconds between state updates\n\n # Angle at which to fail the episode\n self.theta_threshold_radians = 12 * 2 * math.pi / 360\n self.x_threshold = 2.4\n self.steps_beyond_done = None\n\n def step(self, action, num = 0):\n \"\"\"\n Расчитывает следующий шаг игры, вознаграждение за совершённое действие\n и признак завершения текущей игры.\n :param int num: Номер текущего примера для обучения.\n :param int action: Действие.\n :return: tuple(ndarray, float, bool, dict)\n \"\"\"\n #Расчёт следующего шага\n (x, x_dot, theta, theta_dot) = self.state\n force = self.force_mag if action == 1 else -self.force_mag\n costheta = math.cos(theta)\n sintheta = math.sin(theta)\n temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta * temp) / (self.length * (4.0 / 3.0 - self.masspole * costheta * costheta / self.total_mass))\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n x += self.tau * x_dot\n x_dot += self.tau * xacc\n theta += self.tau * theta_dot\n theta_dot += self.tau * thetaacc\n self.state = (x, x_dot, theta, theta_dot)\n\n #Расчёт вознаграждения\n done = self.game_over(x, theta)\n if not done:\n reward = 1.0\n elif self.steps_beyond_done is None:\n self.steps_beyond_done = 0\n reward = 1.0\n else:\n self.steps_beyond_done += 1\n reward = 0.0\n\n self.balance += reward\n if done:\n self.log.append({\"Balance\": self.balance, \"Reward\": reward, \"Num\": num})\n return (np.array(self.state), reward, done, {})\n\n def reset(self, num = 0):\n \"\"\"\n Сбрасывает игру в текущее состояние.\n :param int num: Номер текущего примера для обучения.\n :return: tuple(ndarray, float, bool, dict)\n \"\"\"\n self.balance = self.balance_start\n self.state = np.random.uniform(low=-0.05, high=0.05, size=(self.observation_space,))\n self.steps_beyond_done = None\n return np.array(self.state)\n\n def game_over(self, x, theta):\n \"\"\"\n Условие на завершение текущей игры.\n :return: bool\n \"\"\"\n done = x < -self.x_threshold \\\n or x > self.x_threshold \\\n or theta < -self.theta_threshold_radians \\\n or theta > self.theta_threshold_radians\n done = bool(done)\n return done\n\n def getBalance(self):\n \"\"\"\n Текущий баланс игры.\n :return: float\n \"\"\"\n return self.balance\n\n def getStartNum(self):\n \"\"\"\n Номер примера, с которого осуществляется старт.\n :return: int\n \"\"\"\n return self.timesteps\n","repo_name":"the-lans/ml_course","sub_path":"Reinforce/scores/cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":5867,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"13"} +{"seq_id":"18713786653","text":"class Solution:\n def canPartition(self, nums: [int]) -> bool:\n n = len(nums)\n if n < 2 :\n return False\n sumn = sum(nums)\n if sumn % 2 != 0 :\n return False\n target = sumn >> 1\n maxn = max(nums)\n if maxn > target :\n return False\n dp = [False] * ( target + 1)\n dp[0] = True\n for i in range(n) :\n v = nums[i]\n for j in range(target, v-1,-1) :\n dp[j] = dp[j] or dp[j-v]\n return dp[target]\n\n\n\nif __name__ == \"__main__\" :\n s = Solution()\n a = s.canPartition([1,2,5])\n print(a)\n\n","repo_name":"russellgao/algorithm","sub_path":"dailyQuestion/2020/2020-10/10-11/python/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"13"} +{"seq_id":"10191479295","text":"def solution(s):\n answer = ''\n arr = s.split(' ')\n\n for x in arr:\n if x != '':\n first = x[0]\n left = x[1:]\n answer += first.upper()\n answer += left.lower()\n answer += ' '\n\n answer = answer[:len(answer) - 1]\n return answer\n","repo_name":"Jinnie-J/Algorithm-study","sub_path":"programmers/JadenCase문자열만들기.py","file_name":"JadenCase문자열만들기.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"40711856139","text":"import numpy as np\nimport random \nimport cv2\nimport math\nfrom scipy import signal\n\nfrom settings import params\n\n\n'''\nSELECTIVE ATTENTION SCHEME\n\nvideo image can be categorized into\n 1. background region -> sparse attention/scanning\n 2. unimportant dynamic scene region -> sparse attention/scanning\n 3. important moving object appearing region -> attention on these region\nGOAL: focus on foreground area --> necessary calculation would be reduced significantly\n\nTo get active sampling mask we use 3 properties: temporal, spatial, frequency\nBased on these properties we make a fg propability map P_FG\n\nactive sampling strategy updated every frame according to (P_FG)^(t-1)\n3 sampling strategies:\n 1. randomly scattered sampling (RSS)\n 2. spatially expanding importance sampling (SEIS)\n 3. surprise pixel sampling\nusing these sampling methods we build the sampling mask M_t\n\nUsing the mask M_t, selective pixel-wise background subtraction is performed only for\nthe pixels of M_t(n)=1\n\n\n'''\n\n\n#### FOREGROUND PROBABILITY MAP GENERATION\n#### ESTIMATION OF FOREGROUND PROPERTIES\n# alpaha_t 0.1 , alpha_f = 0.01, alpha_s = 0.05, phi = 0.05, k=sqrt(3) in section6\nclass FgMap():\n\n def __init__(self, frame_gray) -> None:\n self.frame = frame_gray\n\n self.temporal_prop = np.zeros_like(frame_gray)\n self.spatial_prop = np.zeros_like(frame_gray)\n self.freq_prop = np.zeros_like(frame_gray)\n \n self.detection_mask = np.zeros_like(frame_gray)\n self.mask_t_1 = np.zeros_like(frame_gray)\n self.mask_t_2 = np.zeros_like(frame_gray)\n \n self.fg_map = np.zeros_like(frame_gray)\n\n def update_temporal(self):\n alpha_t = params.ALPHA_T # defined in sction 6 of the paper\n self.temporal_prop = (1-alpha_t)* self.temporal_prop + alpha_t * self.detection_mask\n\n def compute_s_t(self):\n s_t = np.zeros_like(self.frame)\n omega = params.OMEGA\n kernel = np.ones((omega,omega))\n s_t = (1/omega**2)*signal.correlate2d(self.detection_mask,kernel,mode='same')\n return s_t\n\n\n def update_spatial(self):\n alpha_s = params.ALPHA_S\n s_t = self.compute_s_t()\n self.spatial_prop = (1-alpha_s)*self.spatial_prop + alpha_s * s_t\n \n def compute_f_t(self):\n blinking = (self.mask_t_1 !=self.mask_t_2)&(self.mask_t_1 !=self.detection_mask)\n f_t = blinking.astype(int)\n return f_t\n\n def update_freq(self):\n alpha_f = params.ALPHA_F\n f_t = self.compute_f_t()\n self.freq_prop = (1-alpha_f)*self.freq_prop + alpha_f*f_t \n\n def update_properties(self):\n self.update_temporal()\n self.update_spatial()\n self.update_freq()\n \n def calc_fg_map(self,frame,detection_mask):\n self.frame = frame\n self.detection_mask = detection_mask \n self.update_properties()\n self.mask_t_2 = self.mask_t_1\n self.mask_t_1 = detection_mask\n self.fg_map = self.temporal_prop * self.spatial_prop * (1-self.temporal_prop)\n\n # essayons ca pour voir si ca marche\n return self.fg_map\n\n\n\nclass ActiveSamplingMask():\n def __init__(self,frame) -> None:\n\n # Params to update every frame\n self.frame = frame\n self.detection_mask = np.zeros_like(frame)\n self.fg_map = np.zeros_like(frame)\n\n # the active sampling mask is obtained by a combination of 3 masks by a \n # pixel-wise OR as M_t = M_RS_t or M_SEI_t or M_SP_t\n self.samp_mask = np.zeros_like(frame)\n\n # The sampling mask is obtained by a combination of 3 masks\n self.rand_scattered_mask = np.zeros_like(frame)\n self.spatial_exp_imp_mask = np.zeros_like(frame)\n self.surprise_pix_samp_mask = np.zeros_like(frame)\n\n\n self.phi = params.PHI # for Random Scattered usuallly between [0.05,0.1]\n self.k_sei = params.K_SEI\n\n def calc_rand_scattered_sampling(self):\n '''\n phi% pixels of entire pixels are selected through randomly\n scattered sampling\n '''\n \n # TODO regarder s'il faut aussi rajouter les reuse\n \n frame = self.frame\n self.rand_scattered_mask = np.zeros_like(frame)\n D_t = self.detection_mask\n N = np.size(frame) # nb of pixels in frame \n N_reuse = np.sum(D_t)\n # TODO probleme ici\n nb_rand_samp = np.round(self.phi*N - N_reuse).astype(int)\n \n if nb_rand_samp > 0:\n indices = [(i,j) for i in range(frame.shape[0]) for j in range(frame.shape[1]) \n if D_t[i,j] < 0.5]\n\n # randomly select pixels that are not in the detection map \n rand_ind = np.array(random.sample(indices,nb_rand_samp))\n self.rand_scattered_mask[rand_ind[:,0],rand_ind[:,1]] = 1\n\n\n if params.DEBUG:\n debg = np.expand_dims(frame,axis=2)\n debg = np.repeat(debg,3,axis=2)\n debg[self.rand_scattered_mask>0,2]=255\n cv2.imshow(\"random scattered sampling\", debg)\n cv2.waitKey(1)\n\n\n def calc_spatially_expanding_importance_sampling(self):\n ''' \n RS is too sparse to construct a complete fg region and might miss small\n objects. necessary to fill the space between sparse points in the fg region\n \n M_t_SEI = S_t_SEI(M_t_RS,P_t-1_FG)\n \n The importance weight of each randomly scattered sample i becomes r_t(i) = P_FG(i)\n Proportional to r_t(i) we expand the sampling region N(i) with size of E_t(i) x E_t(i)\n \n '''\n frame = self.frame\n self.spatial_exp_imp_mask = np.zeros_like(frame)\n \n P_FG = self.fg_map\n N = np.size(frame)\n Ns = np.sum(self.rand_scattered_mask)\n omega_s = self.k_sei*math.sqrt(N/Ns)\n for i in range(frame.shape[0]):\n for j in range(frame.shape[1]):\n if self.rand_scattered_mask[i,j] == 1:\n phi_t = np.round(P_FG[i,j]*omega_s)\n top = (i-phi_t) if (i-phi_t) >= 0 else 0\n bottom = (i+phi_t) if (i+phi_t) < frame.shape[0] else frame.shape[0]-1\n top, bottom = int(np.round(top)), int(np.round(bottom)) \n left = (j-phi_t) if (j-phi_t) >= 0 else 0\n right = (j+phi_t) if (j+phi_t) < frame.shape[1] else frame.shape[1]-1\n left, right = int(np.round(left)), int(np.round(right)) \n \n self.spatial_exp_imp_mask[top:bottom,left:right]=1\n \n print(\"c'est bon\")\n \n # visualization\n if params.DEBUG:\n debg = np.expand_dims(frame,axis=2)\n debg = np.repeat(debg,3, axis=2)\n debg[self.spatial_exp_imp_mask>0,1] = 255\n cv2.imshow(\"spatially expanding importance sampling\", debg)\n cv2.waitKey(1)\n \n\n def calc_surprise_pixel_sampling(self):\n pass\n\n def calc_sampling_mask(self,frame,detection_mask,fg_map):\n '''\n to keep the efficiency of the additional load (computation of fg map)\n restrict the SEARCH SPACE\n extract the candidate pixel positions to run the background subtraction\n and model update\n\n Also extract the positions randomly as 5% of entire pixels to detect the\n newly appeared objects\n '''\n self.frame = frame\n self.detection_mask = detection_mask\n self.fg_map = fg_map\n\n self.calc_rand_scattered_sampling()\n self.calc_spatially_expanding_importance_sampling()\n self.calc_surprise_pixel_sampling()\n tmp = np.bitwise_or(self.rand_scattered_mask, self.surprise_pix_samp_mask)\n self.samp_mask = np.bitwise_or(tmp, self.spatial_exp_imp_mask).astype(int)\n return self.samp_mask","repo_name":"smbensi/project2","sub_path":"bg_sub/utils/active_samp/sampling_map_gen.py","file_name":"sampling_map_gen.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"24553655598","text":"from numpy import array\nimport numpy as np\nfrom keras.utils import to_categorical\nimport string\n\nMAX_LENGTH = 280 #max lenght of a tweet\nCHARS = list(string.printable) #100\n\ndef deal_with_unknown_characters(tweet):\n\n for char in tweet[:]:\n\n if char not in list(string.printable):\n\n\n tweet = tweet.replace(char,\"\")\n\n\n return tweet\n\ndef map_to_ints(a_string):\n\n integer_mapping = {x: i for i, x in enumerate(CHARS)}\n\n try:\n\n return [integer_mapping[char] for char in a_string]\n\n except:\n\n\n cleaned_up = deal_with_unknown_characters(a_string)\n\n return [integer_mapping[char] for char in cleaned_up]\n\n\n\n#currently not used\n# def get_one_hot(a_string):\n# integer_mapping = {x: i for i, x in enumerate(CHARS)}\n# string_vec = [integer_mapping[char] for char in a_string]\n# return to_categorical(string_vec, num_classes=len(CHARS))\n#\n# def pad_tweet(one_hot_encoded_tweet):\n# if len(one_hot_encoded_tweet) < MAX_LENGTH:\n# difference = MAX_LENGTH - len(one_hot_encoded_tweet)\n# zeros = np.zeros(shape=(difference, len(CHARS)))\n# return np.concatenate((one_hot_encoded_tweet,zeros))\n\n","repo_name":"neilmizzi/TMD_HateSpeech","sub_path":"predicting/onehotencoding.py","file_name":"onehotencoding.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"38642094819","text":"import json\r\nimport time\r\nimport datetime\r\nimport requests\r\nfrom get_weekContent import GetTableContent\r\nfrom apscheduler.schedulers.blocking import BlockingScheduler\r\n\r\ncount_machine = {\"resource_response_num\": 0, #今日已响应量\r\n \"bid_partner_num\": 0, #今日合作商配置中\r\n \"bid_ongoing_num\": 0, #今日试标进行中\r\n \"resource_allocate_num\": 0, #今日-资源配置中\r\n \"program_confirm_num\": 0, #\r\n \"create_n\": 0, #本周提交总量\r\n \"finish_n\": 0 #本周配置完成总量\r\n }\r\n\r\n\r\ndef process():\r\n now = datetime.datetime.now()\r\n monday = (now - datetime.timedelta(days=now.weekday()))\r\n biz_params = {\r\n \"form_code\": \"ae0f610bcc9f4fedb468784aab2366a4\",\r\n \"start_date\": time.strftime(\"%Y-%m-%d\"),\r\n \"end_date\": time.strftime(\"%Y-%m-%d\")}\r\n\r\n mon_params = {\r\n \"form_code\": \"ae0f610bcc9f4fedb468784aab2366a4\",\r\n \"start_date\": monday.strftime(\"%Y-%m-%d\"),\r\n \"end_date\": time.strftime(\"%Y-%m-%d\")}\r\n\r\n biz_params = json.dumps(biz_params)\r\n mon_params = json.dumps(mon_params)\r\n\r\n msg_id = \"world\"\r\n app_id = \"cli_9f1cc7f544f6500c\"\r\n secret = \"85aae6f1-2fb8-11eb-bd92-df3459788b11\"\r\n\r\n weektest = GetTableContent(app_id=app_id, secret=secret, biz_params=mon_params, msg_id=msg_id)\r\n weekresponse = weektest.run()\r\n\r\n for item in weekresponse['data']:\r\n tmp = item['current_node_name']\r\n count_machine['create_n'] += 1\r\n if tmp == \"资源配置\" or tmp == \"资源配置,资源配置\" or \"配置方案审批-项目测\" in tmp or \"配置方案审批-资源测\" in tmp or \"资源配置方案审批-项目\" in tmp or \"资源配置方案审批-资源\" in tmp or \"资源使用确认\" in tmp or \\\r\n item['process_status'] == \"APPROVED\":\r\n count_machine['finish_n'] += 1\r\n\r\n test = GetTableContent(app_id=app_id, secret=secret, biz_params=biz_params, msg_id=msg_id)\r\n\r\n response = test.run()\r\n # print(response['data'][0])\r\n for item in response['data']:\r\n\r\n tmp = item['current_node_name']\r\n if item['process_status']:\r\n count_machine[\"resource_response_num\"] += 1\r\n if \"试标合作商配置\" in tmp or \"BPO方式\" in tmp:\r\n count_machine[\"bid_partner_num\"] += 1\r\n if \"试标资源确认\" in tmp or \"试标结论同步\" in tmp or \"试标效果确认\" in tmp :\r\n count_machine[\"bid_ongoing_num\"] += 1\r\n if tmp == \"资源配置\" or tmp == \"资源配置,资源配置\":\r\n count_machine[\"resource_allocate_num\"] += 1\r\n\r\n res = {\"time\": time.strftime(\"%Y.%m.%d\"), \"start\": len(response['data'])}\r\n res.update(count_machine)\r\n return res\r\n\r\nprocess_res = process()\r\n\r\ndef send1():\r\n url = \"\" #deleted\r\n\r\n payload = {\"events\": [process_res]}\r\n payload = json.dumps(payload)\r\n # print(payload)\r\n headers = {'Content-Type': 'application/json'}\r\n requests.request(\"POST\", url, headers=headers, data=payload)\r\n\r\n\r\n\r\ndef send2():\r\n url = \"\" //deleted\r\n\r\n payload = {\"events\": [process_res]}\r\n payload = json.dumps(payload)\r\n # print(payload)\r\n headers = {'Content-Type': 'application/json'}\r\n requests.request(\"POST\", url, headers=headers, data=payload)\r\n\r\n\r\nsend1()\r\nsend2()\r\n# schedule = BlockingScheduler()\r\n# schedule.add_job(send, trigger=\"cron\", hour=9, minute=55)\r\n# schedule.start()\r\n","repo_name":"damon46kai/Tiktok_project","sub_path":"pythonProject1/invokeWeek.py","file_name":"invokeWeek.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"69982766418","text":"# Anonymizes participant details to hide PII\n\nimport json\nimport pickle\nimport copy\nimport numpy as np\nfrom pkl2json import pkl2jsonRecursive\nfrom api import User\n\ndef anonymize():\n users: dict[str, User] = pickle.load( open('./study-utils/users.p', 'rb') )\n with open('./study-utils/users.json', 'r') as f:\n users_json = json.load(f)\n anonymized_users: dict[str, User] = dict()\n for i, (email, user) in enumerate(users.items()):\n print(email)\n if '@anonymized.com' not in email and 'test' not in users_json[email]['background']:\n new_email = 'participant' + str(i+1) + '@anonymized.com'\n for run in user.runs:\n with open('./docker-out/' + run + '/project_info.json', 'r') as f:\n project_info = json.load(f)\n project_info['email'] = new_email\n with open('./docker-out/' + run + '/project_info.json', 'w') as f:\n json.dump(project_info, f)\n anonymized_users[new_email] = copy.deepcopy(users[email])\n anonymized_users[new_email].background = users_json[email]['background']\n # del users[email]\n pickle.dump( anonymized_users, open('./study-utils/users.p', 'wb') )\n pkl2jsonRecursive('real')\n\nif __name__ == '__main__':\n anonymize()","repo_name":"blazerzero/duo-clean","sub_path":"server/anonymize.py","file_name":"anonymize.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"6627420476","text":"# HumanDissensus.py\n# Takes training data produced by five different people and\n# measures their agreement with the consensus model that was\n# produced by collating their data.\n\nfrom zipfile import ZipFile\nimport sys, os\nimport SonicScrewdriver as utils\n\nrootpath = \"/Users/tunder/Dropbox/pagedata/deprecated/\"\nfolderlist = [\"Jonathan\", \"Lea\", \"Nicole\", \"Shawn\", \"Ted\"]\n\ndef addgenre(agenre, thedictionary):\n\tif agenre in thedictionary:\n\t\tthedictionary[agenre] += 1\n\telse:\n\t\tthedictionary[agenre] = 1\n\n\treturn thedictionary\n\n# translator = {'subsc' : 'front', 'argum': 'non', 'pref': 'non', 'aut': 'non', 'bio': 'non', 'toc': 'front', 'title': 'front', 'bookp': 'front', 'bibli': 'back', 'gloss': 'back', 'epi': 'fic', 'errat': 'non', 'notes': 'non', 'ora': 'non', 'let': 'non', 'trv': 'non', 'lyr': 'poe', 'nar': 'poe', 'vdr': 'dra', 'pdr': 'dra', 'clo': 'dra', 'impri': 'front', 'libra': 'back', 'index': 'back'}\n\ntranslator = {'subsc' : 'front', 'argum': 'non', 'pref': 'non', 'aut': 'non', 'bio': 'non', 'toc': 'front', 'title': 'front', 'bookp': 'front', 'bibli': 'back', 'gloss': 'back', 'epi': 'fic', 'errat': 'non', 'notes': 'non', 'ora': 'non', 'let': 'non', 'trv': 'non', 'lyr': 'poe', 'nar': 'poe', 'vdr': 'dra', 'pdr': 'dra', 'clo': 'dra', 'impri': 'front', 'libra': 'back', 'index': 'back'}\n\nsecondtranslate = {'front': 'paratext', 'back': 'paratext', 'ads': 'paratext'}\n\ndef translate(agenre):\n\tglobal translator\n\n\tif agenre in translator:\n\t\tagenre = translator[agenre]\n\n\treturn agenre\n\ndef effectively_equal(genreA, genreB):\n\tglobal secondtranslate\n\n\tif genreA in secondtranslate:\n\t\tgenreA = secondtranslate[genreA]\n\n\tif genreB in secondtranslate:\n\t\tgenreB = secondtranslate[genreB]\n\n\tif genreA == genreB:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\n\ngenrecounts = dict()\n\nvolumesread = dict()\n\nfor folder in folderlist:\n\tthispath = os.path.join(rootpath, folder)\n\tfilelist = os.listdir(thispath)\n\tfor afile in filelist:\n\t\tif afile.endswith(\"maps.zip\"):\n\t\t\tfilepath = os.path.join(thispath, afile)\n\t\t\twith ZipFile(filepath, mode='r') as zf:\n\t\t\t\tfor member in zf.infolist():\n\n\t\t\t\t\tif not member.filename.endswith('/') and not member.filename.endswith(\"_Store\") and not member.filename.startswith(\"_\"):\n\t\t\t\t\t\tdatafile = ZipFile.open(zf, name=member, mode='r')\n\t\t\t\t\t\tfilelines = datafile.readlines()\n\t\t\t\t\t\tfilelines[0] = filelines[0].rstrip()\n\t\t\t\t\t\thtid = filelines[0].decode(encoding=\"UTF-8\")\n\t\t\t\t\t\tthismap = list()\n\t\t\t\t\t\tcounter = 0\n\t\t\t\t\t\tfor line in filelines[1:]:\n\t\t\t\t\t\t\tline = line.decode(encoding=\"UTF-8\")\n\t\t\t\t\t\t\tline = line.rstrip()\n\t\t\t\t\t\t\tfields = line.split(\"\\t\")\n\t\t\t\t\t\t\tif int(fields[0]) != counter:\n\t\t\t\t\t\t\t\tprint(\"error\\a\")\n\t\t\t\t\t\t\tcounter += 1\n\t\t\t\t\t\t\tthisgenre = fields[1]\n\t\t\t\t\t\t\tthismap.append(thisgenre)\n\t\t\t\t\t\t\tgeneralized = translate(thisgenre)\n\t\t\t\t\t\t\tgenrecounts = addgenre(generalized, genrecounts)\n\n\n\t\t\t\t\t\tif htid in volumesread:\n\t\t\t\t\t\t\tvolumesread[htid].append((folder,thismap))\n\t\t\t\t\t\t\t# Note that we append a twotuple, of which the first element is the folder string\n\t\t\t\t\t\t\t# and the second, the map itself. We will use the folder ID to give preference to\n\t\t\t\t\t\t\t# ratings by me (Ted).\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tvolumesread[htid] = [(folder, thismap)]\n\ndef comparelists(firstmap, secondmap, genremistakes, correctbygenre, wordcounts):\n\tif len(firstmap) > len(secondmap):\n\t\tlength = len(secondmap)\n\telif len(firstmap) == len(secondmap):\n\t\tlength = len(firstmap)\n\telse:\n\t\tprint(\"Error, Will Robinson. There are occasions where the consensus version is shorter but no valid reason for it to be longer.\")\n\n\tdivergence = 0.0\n\n\tfor i in range(length):\n\n\t\tgeneralizedfirst = translate(firstmap[i])\n\t\tgeneralizedsecond = translate(secondmap[i])\n\n\t\tif effectively_equal(generalizedfirst, generalizedsecond):\n\t\t\tutils.addtodict(generalizedsecond, wordcounts[i], correctbygenre)\n\t\telse:\n\t\t\tdivergence += wordcounts[i]\n\t\t\tutils.addtodict((generalizedsecond, generalizedfirst), wordcounts[i], genremistakes)\n\n\treturn divergence\n\ngenremistakes = dict()\ncorrectbygenre = dict()\nvolumepercents = dict()\noverallcomparisons = 0\noverallagreement = 0\n\ncountwords = True\n\nif countwords:\n\tfilewordcounts = dict()\n\twith open(\"/Users/tunder/Dropbox/pagedata/pagelevelwordcounts.tsv\", mode=\"r\", encoding=\"utf-8\") as f:\n\t\tfilelines = f.readlines()\n\n\tfor line in filelines[1:]:\n\t\tline = line.rstrip()\n\t\tfields = line.split('\\t')\n\t\thtid = fields[0]\n\t\tpagenum = int(fields[1])\n\t\tcount = int(fields[2])\n\n\t\tif htid in filewordcounts:\n\t\t\tfilewordcounts[htid].append((pagenum, count))\n\t\telse:\n\t\t\tfilewordcounts[htid] = [(pagenum, count)]\n\n\tfor key, value in filewordcounts.items():\n\t\tvalue.sort()\n\t\t# This just makes sure tuples are sorted in pagenum order.\nelse:\n\tfilewordcounts = dict()\n\nbadvols = [\"njp.32101072911116\", \"nyp.33433069339749\", \"hvd.hwjsgk\"]\nconsensuspath = \"/Users/tunder/Dropbox/pagedata/mixedtraining/genremaps/\"\nconsensusversions = dict()\n\nficcounter = 0\n\nfor htid, listoftuples in volumesread.items():\n\tif htid in badvols:\n\t\tcontinue\n\tfilepath = consensuspath + htid + \".map\"\n\n\ttry:\n\t\twith open(filepath, mode=\"r\", encoding=\"utf-8\") as f:\n\t\t\tfilelines = f.readlines()\n\texcept:\n\t\tcontinue\n\n\tthismap = list()\n\tfor line in filelines:\n\t\tline = line.rstrip()\n\t\tfields = line.split(\"\\t\")\n\t\tgenre = translate(fields[1])\n\t\tthismap.append(genre)\n\t\tif genre == \"fic\":\n\t\t\tficcounter += 1\n\n\tconsensusversions[htid] = thismap\n\nfor key, listoftuples in volumesread.items():\n\n\thtid = key\n\tif htid in badvols:\n\t\tcontinue\n\n\ttruegenres = consensusversions[htid]\n\n\tnummaps = len(listoftuples)\n\n\tlengthofvolume = len(listoftuples[0][1])\n\n\tif nummaps == 1:\n\t\tcontinue\n\n\t# We don't check agreement when there was only one rater, because\n\t# it's a foregone conclusion that one person will agree with herself.\n\n\tif countwords:\n\t\twordcounts = [x[1] for x in filewordcounts[htid]]\n\telse:\n\t\twordcounts = [1] * lengthofvolume\n\n\tpotentialcomparisons = nummaps * sum(wordcounts)\n\ttotaldivergence = 0\n\n\tfor reading in listoftuples:\n\t\treadera = reading[0]\n\t\tpredictedgenres = reading[1]\n\n\t\tdivergence = comparelists(predictedgenres, truegenres, genremistakes, correctbygenre, wordcounts)\n\t\ttotaldivergence += divergence\n\n\tagreement = (potentialcomparisons - totaldivergence)\n\tagreementpercent = agreement / potentialcomparisons\n\tvolumepercents[htid] = agreementpercent\n\toverallcomparisons += potentialcomparisons\n\toverallagreement += agreement\n\nprint(\"Average human agreement: \" + str(overallagreement / overallcomparisons))\n\nwith open(\"/Users/tunder/Dropbox/pagedata/interrater/HumanDissensus.tsv\", mode=\"w\", encoding = \"utf-8\") as f:\n\tf.write(\"htid\\tagreement\\n\")\n\tfor key, value in volumepercents.items():\n\t\toutline = utils.pairtreelabel(key) + \"\\t\" + str(value) + \"\\n\"\n\t\tf.write(outline)\n\nimport ConfusionMatrix\nConfusionMatrix.confusion_matrix(correctbygenre, genremistakes)\n\n\n\n\n\n\n\n","repo_name":"tedunderwood/genre","sub_path":"munging/HumanDissensus.py","file_name":"HumanDissensus.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"13"} +{"seq_id":"73471124496","text":"from keras.layers import Input, Embedding, LSTM, Dense, Activation, GRU,Convolution1D,Dropout\nfrom keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Flatten\nfrom keras.layers import merge, Reshape, Activation\nfrom keras.layers.pooling import GlobalAveragePooling1D,GlobalMaxPooling1D, MaxPooling1D\n\nfrom keras.models import Model, Sequential\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils.np_utils import to_categorical\nfrom keras.optimizers import SGD\n\nfrom keras.regularizers import l2, activity_l2\n\nfrom keras.callbacks import CSVLogger, ReduceLROnPlateau\nfrom keras.callbacks import CSVLogger, ReduceLROnPlateau,ModelCheckpoint\n\nfrom pandas import HDFStore\nimport pandas as pd\nimport numpy as np\nimport os\n\nmax_features = 20000\nmaxlen=100\nbatch_size = 64\n\nhidden_dims = 250\nnb_epoch = 100\n\nEMBEDDING_DIM = 300\ndelta = 1.0\n\n# Convolution\nfilter_length = 5\nnb_filter = 64\npool_length = 4\n\nGLOVE_DIR = \"comments/glove/\"\n\n\n \ndef getDistribution(dataframe):\n ratings_matrix = dataframe.ix[:,:10]\n sum_of_ratings = (dataframe.ix[:,:10]).sum(axis=1)\n normalized_score_distribution = ratings_matrix.div(sum_of_ratings,axis='index')\n return normalized_score_distribution.as_matrix()\n\ndef getBinaryDistribution(dataframe):\n ratings_matrix = pd.concat([dataframe.ix[:,:5].sum(axis=1),dataframe.ix[:,5:10].sum(axis=1)], axis=1)\n # ratings_matrix = dataframe.ix[:,:10]\n sum_of_ratings = (dataframe.ix[:,:10]).sum(axis=1)\n normalized_score_distribution = ratings_matrix.div(sum_of_ratings,axis='index')\n return normalized_score_distribution.as_matrix()\n\n\nif __name__ == \"__main__\":\n store = HDFStore('../dataset/labels.h5')\n\n ava_table = store['labels_train']\n # ava_table = ava_table[( abs(ava_table.score - 5) >= delta)]\n\n ava_table = ava_table.sort_values(by=\"score\")\n comments_train = ava_table.ix[:,'comments'].as_matrix()\n\n\n Y_train = getDistribution(ava_table)\n\n\n\n ava_test = store['labels_test']\n comments_test = ava_test.ix[:,'comments'].as_matrix()\n\n Y_test = getDistribution(ava_test)\n\n X_train, X_test, word_index = tokenizeAndGenerateIndex(comments_train, comments_test)\n\n embeddings_index = generateIndexMappingToEmbedding()\n\n embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n\n\n embedding_layer = Embedding(len(word_index) + 1,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=maxlen,\n trainable=False)\n\n comment_input = Input(shape=(100,), dtype='int32')\n embedded_sequences = embedding_layer(comment_input)\n # x = GRU(EMBEDDING_DIM)(embedded_sequences) # 0.8013\n # x = GRU(EMBEDDING_DIM,dropout_W = 0.3,dropout_U = 0.3)(embedded_sequences) #0.8109\n\n\n # x = Convolution1D(128, 5, activation='relu')(embedded_sequences)\n # x = MaxPooling1D(5)(x)\n # x = Convolution1D(128, 5, activation='relu')(x)\n # x = MaxPooling1D(5)(x)\n # x = Convolution1D(128, 5, activation='relu')(x)\n # x = MaxPooling1D(35)(x) # global max pooling\n x = GRU(EMBEDDING_DIM,dropout_W = 0.3,dropout_U = 0.3)(embedded_sequences)\n # x = Flatten()(x)\n # x = Dense(128, activation='relu')(x)\n # x = Dropout(0.5)(x)\n\n # x = Flatten()(embedded_sequences)\n preds = Dense(10, activation='softmax')(x)\n\n # question_input = Input(shape=(maxlen,), dtype='int32')\n # x = Embedding(input_dim=max_features,\n # output_dim=EMBEDDING_DIM, input_length=maxlen,\n # dropout=0.25)(question_input)\n # # x = Convolution1D(nb_filter=nb_filter,\n # # filter_length=filter_length,\n # # border_mode='valid',\n # # activation='relu',\n # # subsample_length=1)(x)\n # # x = MaxPooling1D(pool_length=pool_length)(x)\n # x = GRU(EMBEDDING_DIM,dropout_W = 0.3,dropout_U = 0.3)(x)\n # output = Dense(2, activation='softmax')(x)\n\n model = Model(input=comment_input, output=preds)\n # sgd = SGD(lr=0.01, decay=5e-4, momentum=0.9, nesterov=True, clipnorm=1., clipvalue=0.5)\n model.compile(loss='kld',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n\n time_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n checkpointer = ModelCheckpoint(filepath=\"text_distribution_weights{}.h5\".format(time_now), verbose=1, save_best_only=True)\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,patience=2)\n csv_logger = CSVLogger('training_distribution_text{}.log'.format(time_now))\n\n model.fit(X_train, Y_train,\n batch_size=128,\n nb_epoch=20,\n validation_data=(X_test, Y_test)\n , callbacks=[checkpointer, reduce_lr, csv_logger])\n\n\n out = model.predict(X_test)\n weights = np.array([1,2,3,4,5,6,7,8,9,10])\n score = (out * weights).sum(axis=1)\n\n good = [ 1 if row >= 5 else 0 for row in score]\n\n\n truth_good = ava_test.ix[:, \"good\"].as_matrix()\n\n np.sum(good == truth_good) / len(good)\n\n\n\n\n\n\n\n good = np.argmax(out,axis=1)\n good_truth = ava_test.ix[:, \"good\"].as_matrix()\n\n np.sum(good == truth_good) / len(good)","repo_name":"HiiYL/PiQual","sub_path":"comments/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"13"} +{"seq_id":"19588808875","text":"from typing import List, Union\n\nimport pandas as pd\nfrom xfeat.base import TransformerMixin\nfrom xfeat.types import XDataFrame\n\nclass DateTimeEncoder(TransformerMixin):\n def __init__(self, input_cols: List[str]):\n self.input_cols = input_cols\n \n def fit(self, input_df: XDataFrame):\n return self\n \n def transform(self, input_df: XDataFrame) -> XDataFrame:\n new_df = input_df.copy()\n \n for col in self.input_cols:\n new_df[col] = pd.to_datetime(input_df[col])\n \n return new_df[self.input_cols]\n\nclass DateTimeTransformEncoder(TransformerMixin):\n def __init__(\n self,\n input_cols:List[str],\n to:List[str] = ['dayofweek', 'is_weekend', 'hour', 'minute', 'second'],\n ) -> None:\n self.input_cols = input_cols\n self.to = to\n\n def fit(self, input_df: XDataFrame):\n return self\n\n def transform(self, input_df: XDataFrame) -> XDataFrame:\n new_df = input_df[self.input_cols].copy()\n \n transformed_cols = []\n for col in self.input_cols:\n for t in self.to:\n col_name = f'{col}_{t}'\n transformed_cols.append(col_name)\n\n if t == 'is_weekend':\n new_df[col_name] = (input_df[col].dt.dayofweek >= 5).astype(int)\n else:\n new_df[col_name] = getattr(input_df[col].dt, t)\n\n return new_df\n","repo_name":"ktrw1011/probspace-minpaku-service","sub_path":"racoon/racoon/encoder/_custom/_datetime.py","file_name":"_datetime.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"13"} +{"seq_id":"35792656019","text":"# -*- coding:utf-8 -*-\nfrom sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema\nfrom sqlalchemy.dialects.mysql import MEDIUMTEXT\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float,Numeric,DECIMAL\nfrom sqlalchemy.orm import relationship, backref, object_mapper\nfrom oslo.config import cfg\n\nfrom collecter.db.sqlalchemy import models\nfrom collecter.common import timeutils\n\n\nCONF = cfg.CONF\nBASE = declarative_base()\n\ndef MediumText():\n return Text().with_variant(MEDIUMTEXT(), 'mysql')\n\n\nclass BillingBase(\n# models.SoftDeleteMixin,\n# models.TimestampMixin,\n models.ModelBase):\n metadata = None\n \nclass BillingResource(BASE,BillingBase,models.TimestampMixin):\n __tablename__ = 'billing_resource'\n __table_args__ = ()\n resource_id=Column(String(64), primary_key=True)\n resource_name=Column(String(64))\n billing_item=Column(String(64),nullable=False)\n region_id=Column(String(64),nullable=False)\n sum=Column(Integer)\n parent_id=Column(String(64))\n status=Column(String(32))\n resource_type=Column(String(32),nullable=False)\n user_id=Column(String(64))\n tenant_id=Column(String(64))\n deleted_at=Column(DateTime)\n\nclass Using(BASE,BillingBase):\n __tablename__ = 'using'\n __table_args__ = ()\n using_id=Column(String(64), primary_key=True)\n resource_id=Column(String(64), ForeignKey('billing_resource.resource_id'),nullable=False)\n created_at=Column(DateTime, default=timeutils.utcnow)\n started_at=Column(DateTime)\n ended_at=Column(DateTime)\n tran_status=Column(String(32))\n billingresource=relationship('BillingResource',enable_typechecks=False)\n \n \n#\n#class User(BASE,SearchBase,models.TimetampDefault):\n# \"\"\"Represents a running service on a host.\"\"\"\n#\n# __tablename__ = 'user'\n# __table_args__ = ()\n#\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# username = Column(String(100),nullable=False)\n# password = Column(String(100))\n# realname = Column(String(255))\n# phone = Column(String(50))\n# telephone = Column(Integer)\n# email = Column(String(100))\n# comment = Column(String(4000))\n# type=Column(String(50))\n# state=Column(String(50))\n# isAdmin=Column(Boolean)\n## createTime = Column(DateTime, default=timeutils.utcnow,\n## nullable=False)\n## updateTime = Column(DateTime, default=timeutils.utcnow,\n## nullable=False)\n#\n#\n#class Core(BASE,SearchBase,models.TimetampDefault):\n# __tablename__ = 'core'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# core=Column(String(255),nullable=False)\n# corePath=Column(String(255))\n#\n#\n#class Corporation(BASE,SearchBase,models.TimetampDefault):\n# __tablename__ = 'corporation'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# name=Column(String(255),nullable=False)\n# info=Column(MediumText())\n# \n# \n#class Role(BASE,SearchBase,models.TimetampDefault): \n# __tablename__ = 'role'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# roleName=Column(String(100),nullable=False)\n# roleId=Column(String(100),nullable=False)\n#\n#\n#class CoreCorporation(BASE,SearchBase,models.TimetampDefault):\n# __tablename__ = 'core_corporation'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# coreId=Column(Integer,ForeignKey('core.id'),nullable=False)\n# corporationId=Column(Integer,ForeignKey('corporation.id'),nullable=False)\n# \n#class UserCorporation(BASE,SearchBase,models.TimetampDefault):\n# __tablename__ = 'user_corporation'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# userId=Column(Integer,ForeignKey('user.id'),nullable=False)\n# corporationId=Column(Integer,ForeignKey('corporation.id'),nullable=False)\n# \n#class UserRole(BASE,SearchBase,models.TimetampDefault):\n# __tablename__ = 'user_role'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# userId=Column(Integer,nullable=False)\n# roleId=Column(String(100),nullable=False)\n# \n#class Resource(BASE,SearchBase,models.TimetampDefault):\n# __tablename__ = 'resource'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# name=Column(String(255))\n# desc=Column(MediumText())\n# type=Column(String(50))\n# coreId=Column(Integer)\n# state=Column(String(50))\n# docId=Column(String(100))\n# scanSum=Column(Integer,default=0)\n# downloadSum=Column(Integer,default=0)\n# searchSum=Column(Integer,default=0)\n#\n#class LogAction(BASE,SearchBase,models.TimetampDefault):\n# __tablename__ = 'log_action'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# action=Column(String(255))\n# userId=Column(Integer)\n# username=Column(String(100))\n# hostIp=Column(String(100))\n# info=Column(MediumText())\n#\n#class log_data(BASE,SearchBase,models.TimetampDefault):\n# __tablename__ = 'log_data'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# docId=Column(String(100),nullable=False)\n# coreId=Column(Integer)\n# action=Column(String(100))\n# info=Column(MediumText())\n# userId=Column(Integer)\n# username=Column(String(100))\n# \n#class Token(BASE,SearchBase,models.TimetampDefault):\n# __tablename__ = 'token'\n# __table_args__ = ()\n# id = Column(Integer, primary_key=True,autoincrement=True)\n# token=Column(String(50),nullable=False)\n# userId=Column(Integer)\n# username=Column(String(100))","repo_name":"greshem/develop_python","sub_path":"billing_collecter_bao_guodong/collecter/db/object/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"25603070440","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\n\nfrom monitoring.organs.models import Organ\n\n\nUser = get_user_model()\n\nclass Host(models.Model):\n name = models.CharField(max_length=255)\n description = models.TextField()\n organ = models.ForeignKey(\n Organ,\n on_delete=models.CASCADE,\n related_name=\"hosts\",\n )\n host_admin = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name=\"hosts\",\n )\n\n def __str__(self):\n return f\"{self.name} -> {self.organ}\"\n","repo_name":"Amirbapiri/carpo-task","sub_path":"monitoring/hosts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"14672917175","text":"\nimport time\nimport pathlib\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom constants import *\nfrom constants import COVAR_DIR\n\n'''\nCompute covariance of on-policy F from saved samples.\n'''\n\ndef compute_covariances(F, samples_dir, target_n_samples):\n p = pathlib.Path(samples_dir)\n As_list = []\n file_count = 0\n stime = time.time()\n for samples_file in tqdm(p.iterdir()):\n As = np.load(str(samples_file))['arr_0']\n assert np.all(np.isclose(np.sum(As, axis=1), F.sum()))\n As_list.append(As)\n file_count += 1\n if target_n_samples is not None and file_count == int(target_n_samples / 1000):\n break\n rtime = time.time() - stime\n print(f\"{file_count} files loaded, time {rtime}\")\n\n stime = time.time()\n A_flat_all = np.concatenate(As_list, axis=0)\n del As_list\n rtime = time.time() - stime\n print(f\"A_flat_all.shape={A_flat_all.shape}, concat time {rtime}\")\n\n n_samples, _ = A_flat_all.shape \n assert n_samples == target_n_samples\n\n # rowvar=False => each col represents a variable\n stime = time.time()\n A_cov = np.cov(A_flat_all, rowvar=False, bias=True) \n rtime = time.time() - stime\n print(f\"|A_cov| = {A_cov.shape}, covar time {rtime}\")\n\n return A_cov, n_samples\n\n\nif __name__ == '__main__':\n stage = 0\n print(f'Stage {stage}')\n F = np.load(ON_POLICY_FILE[stage])['F']\n target_n_samples = 1000000\n if stage == 0:\n samples_dir = COVAR_DIR + \"samples/\"\n elif stage == 1:\n samples_dir = COVAR_DIR + \"samples_s2/\"\n A_cov, n_samples = compute_covariances(F, samples_dir, target_n_samples)\n if stage == 0:\n out_file = COVAR_DIR + f\"cov{n_samples}.npz\"\n elif stage == 1:\n out_file = COVAR_DIR + f\"cov{n_samples}_s2.npz\"\n np.savez_compressed(out_file, Cov=A_cov, n_samples=n_samples)\n","repo_name":"offpolicypeerreview/ope","sub_path":"aaai/covariance_computation.py","file_name":"covariance_computation.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"20438066080","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.colors import Normalize\nfrom matplotlib import cm\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom matplotlib.ticker import MultipleLocator\n\nlims=[-0.01,1.01]\n\ndef add_plot_pr_scatter(p_correct, r_correct, ax, title=None):\n \"\"\"\n \"\"\"\n \n #ax.plot(r_correct, p_correct, 'o', alpha=0.5, markersize=8, markeredgecolor=\"None\")\n binw=0.02\n bins=np.arange(0.0,1.0+binw,binw)\n\n cmap = cm.inferno\n norm = Normalize()\n\n h = ax.hist2d(np.array(r_correct), np.array(p_correct), \\\n bins=bins, norm=norm, cmin=binw, cmap=cmap, alpha=0.5, edgecolor=None)\n \n axins1 = inset_axes(ax, width=\"2%\", height=\"90%\", loc='center left')\n cb = plt.colorbar(h[3], cax=axins1)\n \n\n ax.set_xlim(lims)\n ax.xaxis.set_major_locator(MultipleLocator(0.1)) \n ax.xaxis.set_minor_locator(MultipleLocator(binw)) \n\n ax.set_ylim(np.array(lims))\n ax.yaxis.set_major_locator(MultipleLocator(0.1)) \n ax.yaxis.set_minor_locator(MultipleLocator(binw)) \n\n ax.set_xlabel(\"precision\")\n ax.set_ylabel(\"recall\")\n if title:\n ax.set_title(title)\n ax.grid()\n \n return None\n\n\ndef add_plot_pr_hist(bins, hist_p, hist_r, ax, title=None):\n \"\"\"\n \"\"\"\n binw=abs(bins[1]-bins[0])\n \n _ = ax.bar(bins[:-1], hist_p, alpha=0.5, width=binw, align='center', label=\"precision\")\n _ = ax.bar(bins[:-1], hist_r, alpha=0.5, width=binw, align='center', label=\"recall\")\n \n ax.set_xlabel(\"precision, recall\")\n ax.set_ylabel(\"frac.\")\n \n ax.set_xlim(lims)\n ax.xaxis.set_major_locator(MultipleLocator(0.1)) \n ax.xaxis.set_minor_locator(MultipleLocator(binw))\n \n ax.set_ylim(np.array(lims))\n ax.yaxis.set_major_locator(MultipleLocator(0.1)) \n \n ax.grid()\n \n if title: ax.set_title(title)\n \n ax.legend()\n \n return None\n\n\ndef add_plot_pr_cumsum(bins, hist_p, hist_r, ax, title=None):\n \"\"\"\n \"\"\"\n binw=abs(bins[1]-bins[0])\n \n cumsum_p = np.cumsum(hist_p)\n cumsum_r = np.cumsum(hist_r)\n \n _ = ax.plot(bins[:-1], cumsum_p, '-.', lw=2, alpha=0.95, label=\"precision\")\n _ = ax.plot(bins[:-1], cumsum_r, ':', lw=2, alpha=0.95, label=\"recall\")\n \n ax.plot([0.9,0.9],[0,1], '--', color='gray', lw=1.2, alpha=0.5)\n ax.plot([0,1],[0.9,0.9], '--', color='gray', lw=1.2, alpha=0.5)\n \n ax.set_xlabel(\"precision, recall\")\n ax.set_ylabel(\"rel. frac.\")\n \n ax.set_xlim(lims)\n ax.xaxis.set_major_locator(MultipleLocator(0.1)) \n ax.xaxis.set_minor_locator(MultipleLocator(binw)) \n \n ax.set_ylim(np.array(lims))\n ax.yaxis.set_major_locator(MultipleLocator(0.1)) \n \n if title:\n ax.set_title(title)\n ax.grid()\n ax.legend()\n \n return None\n\n\ndef add_plot_err_cumsum(bins, hist_c, hist_i, ax, title=None):\n \"\"\"\n \"\"\"\n binw=abs(bins[1]-bins[0])\n\n cumsum_c = np.cumsum(hist_c)\n cumsum_i = np.cumsum(hist_i)\n cumsum_0 = np.cumsum(hist_c + hist_i)\n \n _ = ax.plot(bins[:-1], cumsum_c, '--', alpha=0.95,label=\"Correct\")\n _ = ax.plot(bins[:-1], cumsum_i, '-.', alpha=0.95,label=\"Incorrect\")\n _ = ax.plot(bins[:-1], cumsum_0, '-' , alpha=0.95,label=\"Total\" ) \n \n ax.plot([0.9,0.9],[0,1], '--', color='gray', lw=1.2, alpha=0.5)\n ax.plot([0,1],[0.9,0.9], '--', color='gray', lw=1.2, alpha=0.5)\n \n ax.set_xlabel(\"error\")\n ax.set_ylabel(\"frac.\")\n \n ax.set_xlim(lims)\n ax.xaxis.set_major_locator(MultipleLocator(0.1)) \n ax.xaxis.set_minor_locator(MultipleLocator(binw)) \n \n ax.set_ylim(np.array(lims))\n ax.yaxis.set_major_locator(MultipleLocator(0.1)) \n \n if title:\n ax.set_title(title)\n ax.grid()\n ax.legend()\n \n return None\n\n\ndef plot_scores_stats(p_correct, r_correct, p_incorrect, r_incorrect, title):\n \"\"\"\n \"\"\"\n fig, axs = plt.subplots(2,3, figsize=[20,12])\n axs = axs.flatten()\n\n lims=[-0.01,1.01]\n \n # Scatter plot: recall vs precision (Correct)\n add_plot_pr_scatter(p_correct, r_correct, axs[0])\n \n # Histograms\n binw=0.02\n bins=np.arange(0.0,1.0+binw,binw)\n total = len(p_correct) + len(p_incorrect)\n \n hp, _ = np.histogram(np.array(p_correct), bins=bins, density=False)\n hr, _ = np.histogram(np.array( r_correct), bins=bins, density=False)\n\n ## Histogram plot for absolute fractions\n add_plot_pr_hist(bins, hp/total, hr/total, axs[1], f\"Correct stays\")\n axs[1].set_ylabel('abs. frac.')\n\n ## Cum. Sum for relative fractions\n add_plot_pr_cumsum(bins, hp/len(p_correct), hr/len(r_correct), axs[2])\n axs[2].set_ylabel('rel. frac.')\n \n \n # Scatter plot: recall vs precision (incorrect) \n add_plot_pr_scatter(p_incorrect, r_incorrect, axs[3])\n\n # Histograms\n hp, _ = np.histogram(np.array(p_incorrect), bins=bins, density=False)\n hr, _ = np.histogram(np.array( r_incorrect), bins=bins, density=False)\n\n ## Histogram plot for absolute fractions\n add_plot_pr_hist(bins, hp/total, hr/total, axs[4], f\"Incorrect stays\")\n axs[4].set_ylabel('abs. frac.')\n \n ## Cum. Sum for relative fractions\n add_plot_pr_cumsum(bins, hp/len(p_incorrect), hr/len(r_incorrect), axs[5])\n axs[5].set_ylabel('rel. frac.')\n \n fig.suptitle(title, fontsize=16)\n\n return fig, axs\n\n\ndef plot_errs_stats(errs_correct, errs_incorrect, title):\n \"\"\"\n \"\"\"\n fig, axs = plt.subplots(1,3, figsize=[20,6])\n axs = axs.flatten()\n\n lims=[-.01,1.01]\n\n total = len(errs_correct)+len(errs_incorrect)\n correct_frac = (len(errs_correct)/total)\n incorrect_frac = (len(errs_incorrect)/total)\n\n binw=0.01\n bins=np.arange(0.0,1.0+binw,binw)\n \n hpc, _ = np.histogram(np.array(errs_correct), bins=bins, density=False)\n hpi, _ = np.histogram(np.array(errs_incorrect), bins=bins, density=False)\n\n ## Histogram plot for absolute fractions\n add_plot_pr_hist(bins, hpc/total, hpi/total, axs[0], f\"Hist. (abs. frac.)\")\n axs[0].set_xlabel(\"error\")\n axs[0].set_xlabel(f\"error, bin width: {binw:6.2f}\") \n axs[0].legend(['Correct', 'Incorrect'])\n \n add_plot_pr_hist(bins, hpc/len(errs_correct), hpi/len(errs_incorrect), axs[1], f\"Hist. (rel. frac.)\")\n axs[1].set_xlabel(\"error\") \n axs[1].set_xlabel(f\"error, bin width: {binw:6.2f}\")\n axs[1].legend(['Correct', 'Incorrect'])\n\n add_plot_err_cumsum(bins, hpc/total, hpi/total, axs[2], f\"C.-sum. (abs. frac.)\")\n axs[2].set_xlim([9e-4, 1.1e0])\n axs[2].set_xscale('log')\n fig.suptitle(title, fontsize=16)\n \n return fig, axs\n\n\ndef plot_scores_stats_cominbed(p_correct, r_correct, p_incorrect, r_incorrect, title):\n \"\"\"\n \"\"\"\n fig, axs = plt.subplots(2,2, figsize=[20,12])\n axs = axs.flatten()\n\n lims=np.array([-.01,1.01])\n\n total = len(p_correct)+len(p_incorrect)\n correct_frac = (len(p_correct)/total)\n incorrect_frac = (len(p_incorrect)/total)\n\n # histograms \n binw=0.02\n bins=np.arange(0.0,1.0+binw,binw)\n\n hpc = np.histogram(np.array( p_correct), bins=bins, density=False)[0]/total\n hrc = np.histogram(np.array( r_correct), bins=bins, density=False)[0]/total \n hpi = np.histogram(np.array(p_incorrect), bins=bins, density=False)[0]/total\n hri = np.histogram(np.array(r_incorrect), bins=bins, density=False)[0]/total\n\n\n add_plot_pr_hist(bins, hpc, hpi, axs[0], f\"Prec. hist. (abs. frac.)\")\n axs[0].legend(['Correct', 'Incorrect'])\n axs[0].set_xlabel(\"Precision\")\n\n \n add_plot_err_cumsum(bins, hpc, hpi, axs[1], f\"Prec. C.-sum. (abs. frac.)\")\n axs[1].set_xlabel(\"Precision\")\n \n add_plot_pr_hist(bins, hrc, hri, axs[2], f\"Rec. hist. (abs. frac.)\")\n axs[2].legend(['Correct', 'Incorrect'])\n axs[2].set_xlabel(\"Recall\")\n \n add_plot_err_cumsum(bins, hrc, hri, axs[3], f\"Rec. C.-sum. (abs. frac.)\")\n axs[3].set_xlabel(\"Recall\")\n\n \n fig.suptitle(title, fontsize=16)\n \n\n return fig, axs\n","repo_name":"m-salewski/stay_classification","sub_path":"src/stay_classification/metrics_plotting.py","file_name":"metrics_plotting.py","file_ext":"py","file_size_in_byte":7919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"4973931086","text":"def solution(answers):\n corr = {1:0,2:0,3:0}\n one, len_one = [1,2,3,4,5], 5\n two, len_two = [2,1,2,3,2,4,2,5], 8\n three, len_three = [3,3,1,1,2,2,4,4,5,5], 10\n\n for i in range(len(answers)):\n if answers[i] == one[int(i%len_one)]:\n corr[1] += 1\n if answers[i] == two[int(i%len_two)]:\n corr[2] += 1\n if answers[i] == three[int(i%len_three)]:\n corr[3] += 1 \n \n maximum = max(corr.values())\n ans = []\n for i in corr:\n if maximum == corr[i]:\n ans.append(i)\n \n return ans\n","repo_name":"yeos60490/algorithm","sub_path":"programmers/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"21631877134","text":"from django.contrib.auth.models import Group\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.management.base import BaseCommand\n\nfrom wafer.talks.models import Talk\n\n\nFROM = 'content@debconf.org'\nSUBJECT = 'DebConf - talk scheduled: %(title)s'\nBODY = '''\\\nDear speaker / BoF convenor,\n\nWe are glad to announce that we have just published the DebConf 18 schedule:\n\nhttps://debconf18.debconf.org/schedule/\n\nYour Talk/BoF titled %(title)s has been scheduled at %(time)s in %(venue)s.\n\nWe kindly request that you check whether you are able to be there at the day\nand time we have allocated to your session. If not, please let us know, by\nreplying to this email, as soon as possible. We can either reschedule you in a\nmore convenient time, or if you cannot join us, we will be able to mark the\nslot as available for other activities.\n\nBest regards,\nThe DebConf Content Team\n'''\n\n\nclass Command(BaseCommand):\n help = \"Notify speakers that their talks have been scheduled\"\n\n def add_arguments(self, parser):\n parser.add_argument('--yes', action='store_true',\n help='Actually do something'),\n\n def badger(self, talk, dry_run):\n try:\n scheduleitem = talk.scheduleitem_set.get()\n except ObjectDoesNotExist:\n return\n\n kv, created = talk.kv.get_or_create(\n group=self.content_group,\n key='notified_speaker',\n defaults={'value': None},\n )\n\n rebadger_key = [scheduleitem.venue.id, scheduleitem.slots.first().id]\n if kv.value == rebadger_key:\n return\n\n to = [user.email for user in talk.authors.all()]\n\n subst = {\n 'title': talk.title,\n 'time': scheduleitem.get_start_time(),\n 'venue': scheduleitem.venue.name,\n }\n\n subject = SUBJECT % subst\n body = BODY % subst\n\n if dry_run:\n print('I would badger speakers of: %s'\n % talk.title.encode('utf-8'))\n return\n email_message = EmailMultiAlternatives(\n subject, body, from_email=FROM, to=to)\n email_message.send()\n\n kv.value = rebadger_key\n kv.save()\n\n def handle(self, *args, **options):\n dry_run = not options['yes']\n self.content_group = Group.objects.get_by_natural_key('Talk Mentors')\n\n if dry_run:\n print('Not actually doing anything without --yes')\n\n for talk in Talk.objects.all():\n self.badger(talk, dry_run)\n","repo_name":"muhammed-ajmal/heroku","sub_path":"dc18/management/commands/badger_speakers_scheduled.py","file_name":"badger_speakers_scheduled.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"38610315166","text":"import os\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_training_loss(mini_batch_loss_list, num_epoch, iter_per_epoch,\n result_dir=None, averaging_iteration=100):\n\n plt.figure()\n ax1 = plt.subplot(1, 1, 1)\n ax1.plot(range(len(mini_batch_loss_list)),\n mini_batch_loss_list, label='Minibatch Loss')\n\n if len(mini_batch_loss_list) < 1000:\n ax1.set_ylim([0, np.max(mini_batch_loss_list[1000:])*1.5])\n\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n\n ax1.plot(np.convolve(mini_batch_loss_list, np.ones(averaging_iteration,)/averaging_iteration,\n mode='valid'),\n label='Running Average')\n\n ax1.legend()\n\n ax2 = ax1.twiny()\n newlabel = list(range(num_epoch+1))\n\n newpos = [e*iter_per_epoch for e in newlabel]\n\n ax2.set_xticks(newpos[::10])\n ax2.set_xticklabels(newlabel[::10])\n\n ax2.xaxis.set_ticks_position('bottom')\n ax2.xaxis.set_label_position('bottom')\n ax2.spines['bottom'].set_position(('outward', 45))\n ax2.set_xlabel('Epochs')\n ax2.set_xlim(ax1.get_xlim())\n\n plt.tight_layout()\n\n\ndef plot_accuracy(train_acc_list, valid_acc_list, results_dir):\n num_epochs = len(train_acc_list)\n\n plt.plot(np.arange(1, num_epochs+1),\n train_acc_list, label='Training')\n plt.plot(np.arange(1, num_epochs+1),\n valid_acc_list, label='Validation')\n\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend()\n\n plt.tight_layout()\n\n if results_dir is not None:\n img_path = os.path.join(results_dir, 'acc.pdf')\n plt.savefig(img_path)\n\n\n\ndef plot_confusion_matrix(conf_mat,\n hide_spines=False,\n hide_ticks=False,\n figsize=None,\n cmap=None,\n colorbar=False,\n show_absolute=True,\n show_normed=False,\n class_names=None):\n\n if not (show_absolute or show_normed):\n raise AssertionError('Both shows are false.')\n\n if class_names is not None and len(class_names) != len(conf_mat):\n raise AssertionError('len(class_names) should be equal to number of classes in the dataset.')\n\n total_samples = conf_mat.sum(axis=1)[:, np.newaxis]\n normed_conf_mat = conf_mat.astype('float')/total_samples\n\n fig, ax = plt.subplots(figsize=figsize)\n ax.grid(False)\n\n if cmap is None:\n cmap = plt.cm.Blues\n\n if figsize is None:\n figsize = (len(conf_mat)*1.25, len(conf_mat)*1.25)\n\n if show_normed:\n matshow = ax.matshow(normed_conf_mat, cmap=cmap)\n else:\n matshow = ax.matshow(conf_mat, cmap=cmap)\n\n if colorbar:\n fig.colorbar(matshow)\n\n for i in range(conf_mat.shape[0]):\n for j in range(conf_mat.shape[1]):\n\n cell_text = \"\"\n if show_absolute:\n cell_text += format(conf_mat[i, j], 'd')\n if show_normed:\n cell_text += \"\\n\" + '('\n cell_text += format(normed_conf_mat[i, j], '.2f') + ')'\n else:\n cell_text += format(normed_conf_mat[i, j], '.2f')\n\n ax.text(x=j,\n y=i,\n s=cell_text,\n va='center',\n ha='center',\n color=\"white\" if normed_conf_mat[i,j] > 0.5 else \"black\")\n\n if class_names is not None:\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=90)\n plt.yticks(tick_marks, class_names)\n\n if hide_spines:\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n if hide_ticks:\n ax.axes.get_yaxis().set_ticks([])\n ax.axes.get_xaxis().set_ticks([])\n\n plt.xlabel('Predicted Labels')\n plt.ylabel('True Labels')\n\n return fig, ax\n\n\n\n","repo_name":"Skanda-Bharadwaj/deepLearning","sub_path":"helper_functions/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"19686461170","text":"from django import forms\nfrom core.models import Orders, OrderDetails, Products, Suppliers\nfrom django.db import models\nfrom django.forms import ModelForm\n\nclass ReportForm(forms.Form):\n\n supplier = forms.ModelChoiceField(Suppliers.objects.all(), \n empty_label='-------'\n )\n\n DATEPICKER = {\n 'type': 'text',\n 'class': 'form-control',\n 'id': 'datetimepicker1'\n }\n \n startdate = forms.DateField(\n widget = forms.DateInput(attrs=DATEPICKER)\n )\n \n DATEPICKER2 = {\n 'type': 'text',\n 'class': 'form-control ',\n 'id': 'datetimepicker2'\n }\n\n enddate = forms.DateField(\n widget = forms.DateInput(attrs=DATEPICKER2)\n )\n\n def __init__(self, *args, **kwargs):\n super(ReportForm, self).__init__(*args, **kwargs)\n for visible in self.visible_fields():\n visible.field.widget.attrs['class'] = 'form-control'\n for key in self.fields:\n self.fields[key].required = True\n","repo_name":"ksiazekm/northwind-python-bazy-danych-4","sub_path":"store/report/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"33938812850","text":"from setuptools import setup, find_packages\nimport codecs\nimport os\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetup(\n name='nano-keras',\n version='1.2.1',\n packages=find_packages(),\n url='https://github.com/MarcelWinterot/nano-keras',\n license='MIT',\n author='Marcel Winterot',\n author_email='m.winterot1@gmail.com',\n description='Deep learning library made with numpy in the style of Keras API',\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: Microsoft :: Windows\",\n ],\n install_requires=['numpy'],\n keywords=['python', 'machine-learning',\n 'machine-learning-library', 'keras', 'numpy']\n)\n","repo_name":"MarcelWinterot/nano-keras","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"13"} +{"seq_id":"8310416196","text":"import requests\n\n# This is a module where we can get all other data\n# from other external services needed\n\ndef get_customer_data(customer_id):\n try:\n response = requests.get(f'https://jsonplaceholder.typicode.com/users/{customer_id}')\n response.raise_for_status() # Raise an exception if response status code is not 200\n data = response.json()\n customer_data = {'id': data['id'], 'name': data['name'], 'email': data['email']}\n return customer_data\n except (requests.exceptions.RequestException, KeyError):\n return None\n","repo_name":"rohteemie/transafe-booking-service","sub_path":"api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"26895984055","text":"\"\"\"App setup and starter\"\"\"\nfrom dash_extensions.enrich import MultiplexerTransform, DashProxy\nfrom dash_bootstrap_components.themes import DARKLY\nfrom strava_viewer import Layout, StravaData, get_callbacks\n\napp = DashProxy(__name__,\n title='Strava Viewer',\n external_stylesheets=[DARKLY],\n transforms=[MultiplexerTransform()],\n prevent_initial_callbacks=True)\n\n# for deployment\nserver = app.server\n\n# data hook\ndata = StravaData(data='redis')\n\n# definition of app layout, instance is callable\napp.layout = Layout(data=data)()\n\n# import callbacks\nget_callbacks(app=app, data=data)\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"MWKSolution/strava-viewer","sub_path":"dash_viewer.py","file_name":"dash_viewer.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"27323352339","text":"#!/usr/bin/env python\n\"\"\"\nStore TSM mounted volumes backup status for easy identification of volumes not being backed up.\n\nPierre Cazenave - pwcazenave@gmail.com\n\nChangeLog\n 14/02/2021 First release.\n\n\"\"\"\n\n# TODO:\n# [done] - Add a simple UI with host/paths/tick|cross for quickly identifying which hosts are correctly configured\n# [done] - Add filter for only showing improperly configured hosts\n# [done] - Add front-end for adding exclusions\n\nimport logging\nimport os\n\nimport flask\nimport flask_wtf\nfrom flask_sqlalchemy import SQLAlchemy\n\nhost = os.environ.get('HOST', '0.0.0.0')\nport = int(os.environ.get('PORT', 8000))\ndebug = 'DEBUG' in os.environ\nuse_reloader = os.environ.get('USE_RELOADER', '1') == '1'\n\nroot_logger = logging.getLogger()\nhandler = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s %(name)-15s %(levelname)-4s %(message)s', '%Y-%m-%d %H:%M:%S')\nhandler.setFormatter(formatter)\nroot_logger.addHandler(flask.logging.default_handler)\nif debug:\n root_logger.setLevel(logging.DEBUG)\nelse:\n root_logger.setLevel(logging.INFO)\n\nlogger = logging.getLogger(__name__)\nlogger.info('Starting app')\n\napp = flask.Flask(__name__, static_url_path='')\napp.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///tsm.db'\napp.config['SECRET_KEY'] = os.urandom(32)\n# Create a base database object.\ndb = SQLAlchemy(app)\n\n# Configure CSRF protection.\ncsrf = flask_wtf.csrf.CSRFProtect(app)\napp.config['SECRET_KEY'] = os.urandom(32)\ncsrf.init_app(app)\n\n# Remove unnecessary whitespace in the rendered HTML\napp.jinja_env.trim_blocks = True\napp.jinja_env.lstrip_blocks = True\n\n\nclass HostDirectory(db.Model):\n \"\"\"\n Our database table for the host config. One row per host per mount.\n\n \"\"\"\n\n __tablename__ = 'backup'\n __table_args__ = {'extend_existing': True}\n\n # Set up the database columns\n id_primary = db.Column('id', db.Integer, primary_key=True)\n hostname = db.Column('hostname', db.String(200))\n mountpoint = db.Column('mountpoint', db.String(500))\n backedup = db.Column('backedup', db.Integer, default=0)\n ignore = db.Column('ignore', db.Integer, default=0)\n\n\n@app.route('/')\ndef root():\n \"\"\"\n Show all the hosts in the database in an easy to view manner.\n\n \"\"\"\n\n hostinfo = HostDirectory.query.filter_by().order_by(HostDirectory.hostname, HostDirectory.backedup, HostDirectory.mountpoint).all()\n\n # Group into: not backed up, ignore and backed up, each of which is per host.\n hosts = {'bad': {}, 'good': {}, 'ignored': {}}\n for record in hostinfo:\n config = {'mountpoint': record.mountpoint, 'backedup': record.backedup, 'ignore': record.ignore}\n dest = ''\n if record.ignore == 1:\n dest = 'ignored'\n elif record.backedup == 0:\n dest = 'bad'\n elif record.backedup == 1:\n dest = 'good'\n try:\n hosts[dest][record.hostname].append(config)\n except KeyError:\n hosts[dest][record.hostname] = [config]\n\n return flask.render_template('index.html', hostinfo=hosts)\n\n\n@app.route('/update', methods=['GET', 'POST'])\ndef update():\n \"\"\"\n Add or update a host backup path and its backup status.\n\n \"\"\"\n\n if flask.request.method == 'POST':\n hostname = flask.request.form['hostname']\n mountpoint = flask.request.form['mountpoint'].rstrip('/') # trim trailing slashes\n backedup = flask.request.form['backedup']\n ignore = flask.request.form['ignore']\n redirect = flask.request.form['redirect']\n else:\n hostname = flask.request.args.get('hostname')\n mountpoint = flask.request.args.get('mountpoint').rstrip('/') # trim trailing slashes\n backedup = flask.request.args.get('backedup')\n ignore = flask.request.args.get('ignore')\n redirect = flask.request.args.get('redirect')\n\n if backedup is None:\n backedup = 0\n else:\n backedup = int(backedup)\n if ignore is None:\n ignore = 0\n else:\n ignore = int(ignore)\n if redirect is None:\n redirect = 0\n else:\n redirect = int(redirect)\n\n # Remove the existing entry for this host/path combo and replace it with the new data.\n hostdir = HostDirectory.query.filter_by(hostname=hostname, mountpoint=mountpoint)\n if hostdir:\n hostdir.delete()\n hostdir = HostDirectory(hostname=hostname, mountpoint=mountpoint, backedup=backedup, ignore=ignore)\n\n db.session.add(hostdir)\n db.session.commit()\n\n if redirect == 1:\n return flask.redirect(flask.url_for('root'))\n else:\n response = {'status': True, 'status_code': 200}\n return flask.jsonify(response)\n\n\n@app.route('/query', methods=['GET', 'POST'])\ndef query():\n \"\"\"\n Find out the current backup status for a given host.\n\n \"\"\"\n\n if flask.request.method == 'POST':\n hostname = flask.request.form['hostname']\n mountpoint = flask.request.form['mountpoint'].rstrip('/') # trim trailing slashes\n else:\n hostname = flask.request.args.get('hostname')\n mountpoint = flask.request.args.get('mountpoint').rstrip('/') # trim trailing slashes\n\n hostdir = HostDirectory.query.filter_by(hostname=hostname, mountpoint=mountpoint).first()\n\n response = {'status': True, 'status_code': 200}\n if hostdir:\n response['backedup'] = hostdir.backedup\n else:\n response['backedup'] = 0\n\n return flask.jsonify(response)\n\n\n@app.route('/clean', methods=['GET', 'POST'])\ndef remove_all():\n \"\"\"\n Remove all entries in the database.\n\n \"\"\"\n\n HostDirectory.query.delete()\n db.session.commit()\n\n response = {'status': True, 'status_code': 200}\n return flask.jsonify(response)\n\n\ndef main():\n app.run(host=host,\n port=port,\n debug=debug,\n use_reloader=use_reloader,\n extra_files=['./app/templates/index.html',\n './app/static/js/scripts.js',\n './app/static/css/style.css'])\n\n\nif __name__ == '__main__':\n\n db.create_all()\n main()\n","repo_name":"pwcazenave/tsm","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"22829263576","text":"import pygame.gfxdraw\nimport cv2\n\ndef get_list_from_bitmap(bitmap_path):\n img = cv2.imread(bitmap_path, 0)\n _, bit_map_list = cv2.threshold(img, 127, 1, cv2.THRESH_BINARY)\n return bit_map_list.tolist()\n\ndef draw_map (surface, color ,map_data, pos, pixel_size):\n x_coord = pos[0]\n y_coord = pos[1]\n coords = [x_coord,y_coord]\n\n for x in map_data:\n y_coord = pos[1]\n for y in x:\n #print(\"x: \" + str(x_coord) + \", y: \" + str(y_coord))\n\n if y == 0:\n draw_rect_filled(surface,color, (y_coord,x_coord),(pixel_size,pixel_size))\n #print(\"painted pixel!\")\n\n y_coord+= pixel_size\n x_coord += pixel_size\n\ndef draw_single_pixel(surface, color, pos):\n surface.fill(color, (pos, (1, 1)))\n\ndef draw_rect_filled(surface, color, pos, size):\n surface.fill(color, (pos, size))\n\ndef find_spawn_point(map_data,map_origin):\n\n for x in range(1,len(map_data)):\n\n for y in range(1,len(map_data[x])):\n if map_data[x][y] == 1:\n print(\"found spawn, x: \" + str(x) + \", y: \" + str(y))\n return (x,y)\n\ndef draw_player(coords, pixel_magnfication,surface):\n draw_rect_filled(surface,\"red\",coords,(pixel_magnfication,pixel_magnfication))\n\ndef update_player_pos(current_pos,new_pos,surface,pixel_mag):\n draw_rect_filled(surface,\"white\",current_pos,(pixel_mag,pixel_mag))\n draw_rect_filled(surface,\"red\",new_pos,(pixel_mag,pixel_mag))\n\ndef check_player_collision(potential_pos,map_data):\n print(\"checking collision with, X: \"+str(potential_pos))\n if map_data[potential_pos[1]][potential_pos[0]] == 0:\n print(\"COLLISION DETECTED\")\n return True\n\ndef check_user_control_of_player(current_pos,current_pos_relative,pygameobj,movement_distance,surface, event,map_data):\n new_pos_r = current_pos\n new_pos_relative_r = current_pos_relative\n\n keys = pygameobj.key.get_pressed()\n\n if keys[pygameobj.K_DOWN]:\n new_pos = (current_pos[0],current_pos[1]+movement_distance)\n new_pos_relative = (current_pos_relative[0],current_pos_relative[1] + 1)\n if not check_player_collision(new_pos_relative,map_data):\n print(\"up\")\n print(\"relative pos: \" +str(new_pos_relative))\n update_player_pos(current_pos,new_pos,surface,movement_distance)\n return (new_pos,new_pos_relative)\n elif keys[pygameobj.K_UP]:\n new_pos = (current_pos[0],current_pos[1]-movement_distance)\n new_pos_relative = (current_pos_relative[0],current_pos_relative[1] - 1)\n if not check_player_collision(new_pos_relative,map_data):\n print(\"moved down\")\n print(\"relative pos: \" +str(new_pos_relative))\n update_player_pos(current_pos,new_pos,surface,movement_distance)\n return (new_pos,new_pos_relative)\n elif keys[pygameobj.K_LEFT]:\n new_pos = (current_pos[0]-movement_distance,current_pos[1])\n new_pos_relative = (current_pos_relative[0] - 1,current_pos_relative[1])\n if not check_player_collision(new_pos_relative,map_data):\n print(\"left\")\n update_player_pos(current_pos,new_pos,surface,movement_distance)\n return (new_pos,new_pos_relative)\n elif keys[pygameobj.K_RIGHT]:\n new_pos = (current_pos[0]+movement_distance,current_pos[1])\n new_pos_relative = (current_pos_relative[0]+1,current_pos_relative[1])\n if not check_player_collision(new_pos_relative,map_data):\n print(\"right\")\n print(\"relative pos: \" +str(new_pos_relative))\n update_player_pos(current_pos,new_pos,surface,movement_distance)\n return (new_pos,new_pos_relative)\n\n\n return (new_pos_r,new_pos_relative_r)\n","repo_name":"ggalisky/Kelp_sim","sub_path":"pygame_copy/kelp_sim/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"43364371179","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 3 20:47:54 2019\n\n@author: aguec\n\"\"\"\n\nfrom random import randrange\n\nsmall_primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] # etc.\n\ndef probably_prime(n, k):\n \"\"\"Return True if n passes k rounds of the Miller-Rabin primality\n test (and is probably prime). Return False if n is proved to be\n composite.\n\n \"\"\"\n if n < 2: return False\n for p in small_primes:\n if n < p * p: return True\n if n % p == 0: return False\n r, s = 0, n - 1\n while s % 2 == 0:\n r += 1\n s //= 2\n for _ in range(k):\n a = randrange(2, n - 1)\n x = pow(a, s, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(r - 1):\n x = pow(x, 2, n)\n if x == n - 1:\n break\n else:\n return False\n return True\n\n\ndef prime_ratio(n):\n diagonal = [1]\n primes = []\n for i in range(3,n+1,2):\n iv = i**2\n diagonal.append(iv)\n test = probably_prime(iv,10)\n if test == True:\n primes.append(iv)\n \n for j in range(1,4):\n temp = iv - j*(i-1)\n diagonal.append(temp)\n test2 = probably_prime(temp,10)\n if test2 == True:\n primes.append(temp)\n ratio = len(primes)/len(diagonal)\n print(ratio)","repo_name":"aguecig/Project-Euler","sub_path":"Problems 51 - 60/pe_58.py","file_name":"pe_58.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"12069567350","text":"\nimport boto3\nimport pandas as pd\n\n\n\nif False:\n file= open(\"/Users/juanerolon/.aws/credentials\")\n lst = []\n for line in file:\n lst.append(line)\n\n print(lst)\n\n\nif True:\n # Create an S3 client\n s3 = boto3.client('s3')\n # Call S3 to list current buckets\n response = s3.list_buckets()\n # Get a list of all bucket names from the response\n buckets = [bucket['Name'] for bucket in response['Buckets']]\n # Print out the bucket list\n print(\"Bucket List: %s\" % buckets)\n\nif True:\n\n client = boto3.client('s3') #low-level functional API\n resource = boto3.resource('s3') #high-level object-oriented API\n my_bucket = resource.Bucket('ml-rolon') #subsitute this for your s3 bucket name.\n obj = client.get_object(Bucket='ml-rolon', Key='datasets/cyber/alerts_sample_data.csv')\n df_01 = pd.read_csv(obj['Body'])\n\nprint(df_01.columns)\nprint(df_01.describe())\n\n\nif False:\n # Create an S3 client\n s3 = boto3.client('s3')\n\n filename = 'test.txt'\n bucket_name = 'ml-rolon'\n\n # Uploads the given file using a managed uploader, which will split up large\n # files automatically and upload parts in parallel.\n s3.upload_file(filename, bucket_name, filename)\n\n\n","repo_name":"juanerolon/elastic-playground","sub_path":"test_aws_access.py","file_name":"test_aws_access.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"14181128578","text":"for _ in range(int(input())):\n nums=list(map(int,input().split()))\n res = nums[0]+nums[1]\n ans=str(res)\n total=0\n for char in ans:\n if char == '0' or char == '6' or char == '9':\n total+=6\n elif char == '1':\n total+=2\n elif char == '2' or char == '3' or char == '5':\n total+=5\n elif char == '4':\n total+=4\n elif char == '8':\n total+=7\n elif char == '7':\n total+=3\n print(total)","repo_name":"jayhawk24/codechef","sub_path":"matches.py","file_name":"matches.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"39824514014","text":"\"\"\"\n# My first app\nHere's our first attempt at using data to create a table:\n\"\"\"\n\nimport numpy as np\nimport streamlit as st\nimport pandas as pd\nfrom functions import ask_chatgpt, filter_dataframe, pd_read_csv\n\n\nst.image(\"resources/amtsblatt_logo.png\", width=200)\nst.header(\"Eigentumsübertragungen 2020-2023\")\n\np = r\"eigentumsuebertragungen_2020-23.csv\"\ndf =pd_read_csv(p)\ndf_fil = filter_dataframe(df)\nst.dataframe(df_fil)\n\nimport plotly.express as px\n\n# fig = px.scatter_mapbox(df_fil, lat=\"KoordinatenLat\", lon=\"KoordinatenLon\", zoom=3)\nfig = px.scatter_mapbox(df_fil, hover_data=df_fil, lat=df_fil[\"KoordinatenLat\"], lon=df_fil.KoordinatenLon, zoom=3)\n\nfig.update_layout(mapbox_style=\"open-street-map\")\nfig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\nst.plotly_chart(fig)\n\nst.header(\"Amtsblatt\")\np2 = r\"kapiteltext_2020-23.csv\"\n# p2 = r\"kapiteltext-extended_2023.csv\"\ndf2 =pd_read_csv(p2)\n\ndf2_fil = filter_dataframe(df2)\nst.dataframe(df2_fil)\n\nquestion = st.text_input('ChatGPT Frage')\n\nif st.button('Frag ChatGPT!'):\n if len(question) == 0:\n st.write(\"Bitte eine Frage eingeben!\")\n else:\n text = df2_fil.to_string()\n if len(text)/4 > 4000:\n st.write(\"Auswahl zu gross!\")\n else:\n # st.write(text)\n ans = ask_chatgpt(question, text)\n st.write(ans)\n\n# df = pd.DataFrame(\n# # np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],\n# np.expand_dims(np.array([46.87839889526367, 8.647464752197266]),0),\n# # [8.647464752197266],46.87839889526367\n# columns=['lat', 'lon'])\n\n# st.map(df)\n\n\n\n\n# st.header(\"Investitionen\")\n# text = load_txt(\"E:\\Projects\\hackdays-streamlit\\input_text1.txt\")\n# st.write(text)\n\n# question = st.text_input('ChatGPT Frage')\n\n# if st.button('Frag ChatGPT!'):\n# if len(question) == 0:\n# st.write(\"Bitte eine Frage eingeben!\")\n# else:\n# ans = ask_chatgpt(question, text)\n# st.write(ans)\n","repo_name":"datahackdaysuri2023-amtsblatt/amtsblatt-streamlit-gpt","sub_path":"my_app.py","file_name":"my_app.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"42365071151","text":"import threading\nfrom contextlib import closing\nfrom socket import *\nimport time \nimport re\nimport select\nimport sys\nimport traceback\nimport numpy as np\nfrom lmtlp_reduce import lmtlp_reduce\n \nclass LmtlpReduceSrv :\n def __init__ (self) :\n self.listenPort = 16213\n self.bufsize = 8192\n self.thisHost = '0.0.0.0'\n self.debug = 0x0\n if(self.debug & 0x1): print(\"debug \", 0x1)\n\n #open listen port \n print(\"listen at port %d\" % self.listenPort)\n self.tcpServerSock = socket(AF_INET, SOCK_STREAM)\n self.tcpServerSock.setsockopt(SOL_SOCKET, SO_REUSEADDR,1)\n self.tcpServerSock.bind((self.thisHost, self.listenPort))\n self.tcpServerSock.listen(10)\n \n def __del__ (self) :\n self.tcpServerSock.close()\n \n def loop(self) :\n while True :\n conn, addres = self.tcpServerSock.accept()\n if(self.debug & 0x1):\n print(\"accepted client\")\n while True:\n msg = conn.recv(self.bufsize)\n if len(msg) == 0 :\n if(self.debug & 0x1):\n print('connection is closed by client')\n print('closing socket...')\n break\n\n print (msg)\n status,x,y,pk,plotfile = self.manageCommand(msg)\n res = np.zeros(4)\n\n print (status, x, y, pk)\n if status == 0:\n #result = str.encode('0,{x:1.3f},{y:1.3f},{pk:1.6f}'.format(x=x, y=y, pk=pk))\n res[0] = status\n res[1] = x\n res[2] = y\n res[3] = pk\n else :\n #result = b'-1,0,0,0'\n res[0] = -1\n res[1] = 0\n res[2] = 0\n res[3] = 0\n\n print ('res', res)\n conn.send(res.tobytes())\n if plotfile is not None:\n try:\n with open(plotfile, 'rb') as f:\n conn.sendfile(f, 0)\n except Exception as e:\n print (e)\n break\n\n\n conn.close()\n\n def manageCommand(self, msg) :\n print (msg)\n try:\n return lmtlp_reduce(msg)\n except Exception as e:\n print (e)\n traceback.print_exc()\n return -1,0,0,0,None\n\ndef lmtlp_reduce_srv() :\n srv = LmtlpReduceSrv()\n srv.loop()\n\nif __name__ == '__main__':\n lmtlp_reduce_srv()\n","repo_name":"teuben/LinePointing","sub_path":"lmtlp_reduce_srv.py","file_name":"lmtlp_reduce_srv.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"32810201720","text":"# Boxplot (diagrama de caixa)\n# -> é uma técnica de visualização de dados em que representa a variação de dados por meio de quartis.\n\nimport matplotlib.pyplot as plt\nimport random\n\nvetor = []\n\nfor i in range(100):\n num = random.randint(0, 50)\n vetor.append(num)\n\nplt.boxplot(vetor)\nplt.title('Boxplot')\nplt.show()\n\n","repo_name":"leonarita/Python","sub_path":"Ciência de Dados/2-Matplot/6-Boxplot.py","file_name":"6-Boxplot.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"21418183726","text":"import random\nimport time\n\ndef createAnswer():\n randomNumber = random.randint(1, 6)\n\n if (randomNumber == 1):\n return(\"Rock\")\n\n elif (randomNumber == 2):\n return(\"Paper\")\n\n elif (randomNumber == 3):\n return(\"Scissors\")\n\n elif (randomNumber == 4):\n return(\"Lizard\")\n\n elif (randomNumber == 5):\n return(\"Spock\")\n\n elif (randomNumber == 6):\n return(\"Kevin\")\n\ndef computerAnswer(userAnswer):\n randomNumber = random.randint(0, 1)\n \n if userAnswer == \"Rock\":\n if(randomNumber == 1):\n print(\"Paper wraps Rock\")\n else:\n print(\"Spock vaporizes Rock\")\n \n elif userAnswer == \"Paper\":\n if(randomNumber == 1):\n print(\"Scissors cuts Paper\")\n else:\n print(\"Lizard eats Paper\")\n \n elif userAnswer == \"Scissors\":\n if(randomNumber == 1):\n print(\"Rock crushes Scissors\")\n else:\n print(\"Spock smashes Scissors\")\n \n elif userAnswer == \"Lizard\":\n if(randomNumber == 1):\n print(\"Rock Smashes Lizard\")\n else:\n print(\"Scissors decapitates Lizard\")\n \n elif userAnswer == \"Spock\":\n if(randomNumber == 1):\n print(\"Lizard poisons Spock\")\n else:\n print(\"Paper disproves Spock\")\n\n elif userAnswer == \"Kevin\":\n randomNumber = random.randint(1, 5)\n if (randomNumber == 5):\n print(\"Kevin runs off with Scissors\")\n\n elif (randomNumber == 4):\n print(\"Kevin eats Rock\")\n\n elif (randomNumber == 3):\n print(\"Kevin attacks Spock\")\n\n elif (randomNumber == 2):\n print(\"Kevin tears Paper\")\n \n elif (randomNumber == 1):\n print(\"Kevin eats Lizard\")\n\n else:\n print(\"who knows\")\n\nwhile True:\n answer = createAnswer()\n print(answer)\n computerAnswer(answer)\n time.sleep(0.1) \n \n#while True:\n # userAnswer = input(\"What do you choose? (Rock, Paper, Scissors, Lizard, Spock, Kevin)? (Or type exit to quit) \")\n # if userAnswer == \"exit\":\n # break\n #computerAnswer(userAnswer)","repo_name":"DuperDerpyDragon/Python-Practice-for-School","sub_path":"rockpaperscissors.py","file_name":"rockpaperscissors.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"13188916892","text":"\r\n\r\n\r\n#Download the dataset from link below\r\n#https://he-s3.s3.amazonaws.com/media/hackathon/machine-learning-challenge-4/sample/c771afa0-c-HackerEarthML4Updated.zip\r\n\r\nimport numpy as np\r\nfrom sklearn import cross_validation\r\nimport pandas as pd\r\nfrom pandas import Series,DataFrame\r\nfrom sklearn.ensemble import AdaBoostClassifier #Importing the classifier here we are using AdaBoost\r\n\r\ndf=pd.read_csv('train_data.csv') #Reading the training data\r\ndf.drop(['connection_id'],1,inplace=True) #Removing less important column\r\n\r\n\r\nx=np.array(df.drop(['target'],1)) # Our features \r\ny=np.array(df['target']) #Our Labels\r\n\r\nxtr,xt,ytr,yt=cross_validation.train_test_split(x,y,test_size=0.2) #splitting data into training and testing set\r\nclf=AdaBoostClassifier(n_estimators=42)\r\nclf.fit(xtr,ytr) # This is where the actual learning happens\r\nconfidence=clf.score(xt,yt) \r\nprint(confidence)\r\ndf1=pd.read_csv('test_data.csv')\r\ndf2=df1\r\ndf1=df1.drop(['connection_id'],1)\r\nxts=np.array(df1)\r\npreds=np.array(clf.predict(xts))\r\ndf2['target']=pd.Series(preds)\r\ndf2=DataFrame(data=df2,columns=['connection_id','target'])\r\ndf2.to_csv('predicts.csv',index=False) \r\n\r\n\r\n\r\n\t\r\n\r\n\r\n","repo_name":"revantt/Projects","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"31070828999","text":"class A:\n def __init__(self):\n self.val = [i for i in range(100)]\n\n def __getitem__(self, arg):\n print(arg)\n argType = type(arg)\n if argType == tuple:\n print(len(arg))\n res = self.val\n for item in arg:\n res = res[item]\n return res\n elif argType == slice:\n return self.val[arg]\n elif argType == int:\n return self.val[arg]\n\n def __str__(self):\n return f\"{self.val}\"\n\n\na = A()\n# print(a)\nprint(a[:5])\n# print(a[5:10, :])\n# 多重切片\nprint(a[5:, 5:, 5:])\n","repo_name":"Littlefean/SmartPython","sub_path":"025 切片和自定义切片/自定义对象的切片.py","file_name":"自定义对象的切片.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"13"} +{"seq_id":"16516497916","text":"import argparse\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nimport torch\nimport torchaudio\n\n\ndef encode_dataset(args):\n in_dir, out_dir = Path(args.in_dir), Path(args.out_dir)\n out_dir.mkdir(parents=True, exist_ok=True)\n\n print(\"Loading checkpoints\")\n cpc = torch.hub.load(\"bshall/cpc:main\", \"cpc\").cuda()\n kmeans = torch.hub.load(\"bshall/cpc:main\", \"kmeans50\")\n\n print(f\"Encoding dataset at {in_dir}\")\n for in_path in tqdm(sorted(list(in_dir.rglob(\"*.wav\")))):\n wav, sr = torchaudio.load(in_path)\n assert sr == 16000\n\n wav = wav.unsqueeze(0).cuda()\n x = cpc.encode(wav).squeeze().cpu().numpy()\n x = StandardScaler().fit_transform(x)\n codes = kmeans.predict(x)\n\n relative_path = in_path.relative_to(in_dir)\n out_path = out_dir / relative_path.with_suffix(\"\")\n out_path.parent.mkdir(parents=True, exist_ok=True)\n\n np.save(out_path.with_suffix(\".npy\"), codes)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Encode an audio dataset using CPC-big (with speaker normalization and discretization).\"\n )\n parser.add_argument(\"in_dir\", type=Path, help=\"Path to the directory to encode.\")\n parser.add_argument(\"out_dir\", type=Path, help=\"Path to the output directory.\")\n args = parser.parse_args()\n encode_dataset(args)\n","repo_name":"bshall/cpc","sub_path":"encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"13"} +{"seq_id":"32813362473","text":"from prefect import task, Flow, Task, Parameter\n\n\n@task\ndef say_hello():\n print(\"Hello, world!\")\n\n\n@task\ndef say_hi(name: str) -> None:\n print(\"Hi, {}!\".format(name))\n\n\nclass AddTask(Task):\n\n def __init__(self, default: int, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.default = default\n\n def run(self, x: int, y: int = None) -> int:\n if y is None:\n y = self.default\n return x + y\n\n\n# initialize the task instance\nadd = AddTask(default=1)\n\nwith Flow(\"My first flow!\") as flow:\n task_name = Parameter('flow_name')\n say_hello()\n say_hi(task_name)\n first_result = add(1, y=2)\n second_result = add(x=first_result, y=100)\n\nstate = flow.run(flow_name=\"pengfei\")\n\n# parameter feature can map the workflow parameter to task parameter","repo_name":"pengfei99/Prefect_tuto","sub_path":"basic/exampl1.py","file_name":"exampl1.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"10878618377","text":"\nprint(\"-----------------------\")\nprint(\"| Connect with CTACT |\")\nprint(\"-----------------------\")\n\nux = True\nwhile ux:\n user_choice = input(\n \"\\nEnter 'V' to view contacts, 'D' to delete or 'A' to add or 'E' to exit: \").upper()\n\n if user_choice == 'V':\n with open(\"data.txt\", 'r') as data:\n contact = data.read()\n print(contact)\n\n elif user_choice == 'A':\n with open('data.txt', 'a') as data:\n firstname = input(\"\\nFirst name of your contact: \").capitalize()\n lastname = input(\"Last name of your contact: \").capitalize()\n phonenumber = input(\n \"Enter the phone number digits in this format 'xxxxxxxxxx': \")\n\n data.write(\n f\"\\n{firstname} {lastname} ({phonenumber[0:3]}) {phonenumber[3:6]}-{phonenumber[6:10]}\")\n\n elif user_choice == 'D':\n with open('data.txt', 'r') as data:\n lines = data.readlines()\n name = input(\n \"Enter the first name of the contact you want to delete: \").upper()\n\n with open(\"data.txt\", \"w\") as f:\n for line in lines:\n n = (line.strip(\"\\n\")[0:4])\n if n.upper() == name[0:4]:\n deleted = [line]\n print(f\"\\nDeleted: {deleted[0]}\")\n\n else:\n f.write(line)\n\n else:\n print(\"\\n\\nThanks for stopping by!\")\n ux = False\n","repo_name":"JosephCardoza36/Week-13-Program-Assign","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"553142146","text":"import os\nfrom setuptools import setup, find_packages\n\nextras = []\nif os.path.exists('/etc/bash_completion.d'):\n extras += [('/etc/bash_completion.d', ['extra/bash-completion/ixrandr'])]\nelif os.path.exists('/usr/share/bash-completion/'):\n extras += [('/usr/share/bash-completion/', ['extra/bash-completion/ixrandr'])]\n\nif os.path.exists('/usr/share/zsh/site-functions/'):\n extras += [('/usr/share/zsh/site-functions/', ['extra/zsh-completion/_ixrandr'])]\nprint(extras) \nsetup(\n name=\"ixrandr\",\n version=\"0.2\",\n author=\"Inemajo\",\n author_email=\"inemajo@fsfe.org\",\n description=\"interactive xrandr\",\n long_description=open('README.md').read(),\n classifiers=[\n 'Programming Language :: Python',\n 'Environment :: Console',\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n ],\n scripts = ['ixrandr'],\n data_files=extras\n)\n","repo_name":"inemajo/ixrandr","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"34594589199","text":"import queue\nimport threading\nimport time\nfrom kivy.uix.popup import Popup\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.label import Label\n\n\nclass DataHandler(queue.Queue):\n def __init__(self, name: str = None):\n super(DataHandler, self).__init__()\n self._name = name\n\n def send_data(self, message):\n if self.full():\n raise Exception(\"stream is full\")\n else:\n self.put(message)\n\n# Used to normalize our robot location in meters to dimensions we can draw based on pixels\ndef normalize_dimensions(location_meters, dimensions_pixels, dimensions_meters):\n return ((location_meters[0] / dimensions_pixels[0]) * dimensions_meters[0],\n (location_meters[1]/ dimensions_pixels[1]) * dimensions_meters[1])\n\n\nclass InformationPopup(Popup):\n \"\"\"\n ->Displays message, warning or error to the user\\n\n ->Use it only to display information to the user\\n\n ->Popup will exit on clicking outside the popup area\\n\n Required Arguments:\n --> _type: string, should be from (\"e\", \"w\", \"i\")\n \"e\": error\n \"w\": warning\n \"i\": info\n --> _message: string, Message to be displayed\\n\n (optional)\\n\n --> _callback_on_dismiss: function called when popup closes (caller is passed as argument)\n \"\"\"\n def __init__(self, _type: str, _message: str, callback_on_dismiss=None, **kwargs):\n super(InformationPopup, self).__init__()\n # Popup Settings\n self.size_hint = (0.35, 0.25)\n self.pos_hint = {\"center_x\": 0.5, \"center_y\": 0.5}\n self.title_color = (0.9, 0.9, 0.9, 1)\n self.title_align = 'center'\n self.title_size = '20sp'\n self.callback_on_dismiss = callback_on_dismiss\n # Message Settings\n self.message_size = '18sp'\n if _type not in (\"e\", \"w\", \"i\"):\n raise Exception(\"Unknown error type\\n _type should be 'e' or 'w' or 'i'\")\n else:\n self._type = _type\n self._message = _message\n if self.callback_on_dismiss is not None:\n self.bind(on_dismiss=callback_on_dismiss)\n\n # Main BoxLayout\n if _type == \"e\":\n self._message = f\"[size={self.message_size}][color=ff3333]{self._message}[/color][/size]\"\n self.title = \"Error\"\n\n elif _type == \"w\":\n self._message = f\"[size={self.message_size}][color=33ff33]{self._message}[/color][/size]\"\n self.title = \"Warning\"\n else:\n self._message = f\"[size={self.message_size}][color=3333ff]{self._message}[/color][/size]\"\n self.title = \"Info\"\n self._main_layout = BoxLayout(size_hint=(1, 1),\n pos_hint={\"center_x\": 0.5, \"center_y\": 0.5},\n pos=self.pos)\n self._message_label = Label(pos_hint={\"center_x\": 0.5, \"center_y\": 0.5},\n size_hint=(1, 1),\n text=self._message,\n markup=True)\n\n self._main_layout.add_widget(self._message_label)\n self.add_widget(self._main_layout)\n\n\nif __name__ == \"__main__\":\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Jaydevard/swarm_bunny_robot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"3877551835","text":"import logging\nimport argparse\n\nfrom momitcool import MomitCool\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n\n parser = argparse.ArgumentParser(\n description='Control your Momit Cool.')\n\n parser.add_argument(\n '--host', type=str, required=True,\n help='ip address of your momit cool.')\n\n parser.add_argument(\n '--cool', action='store_true',\n help='turns on the ac in cooling mode')\n\n parser.add_argument(\n '--off', action='store_true',\n help='turns off the ac')\n\n parser.add_argument(\n '--mode', action='store_true',\n help='get current mode'\n )\n\n parser.add_argument(\n '--temperature', action='store_true',\n help='get current temperature'\n )\n\n args = parser.parse_args()\n\n cool = MomitCool(args.host)\n\n if args.mode:\n print(cool.mode())\n\n if args.temperature:\n print(cool.temperature())\n\n if args.cool:\n cool.cool()\n\n if args.off:\n cool.off()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Photonios/py-momit-cool-remote","sub_path":"momitcool/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"44106297202","text":"\nfile_name = \"guest_book.txt\"\n\nprint(\"type 'q' to stop the program\")\nwhile True:\n name = input('Please enter your name:\\n')\n if name == 'q':\n break\n else:\n with open(file_name , 'a')as file_object:\n file_object.write(name + '\\n')\n print('Hello ' + name + ' We have logged you in the guest book.')\n\n\n ","repo_name":"Wes1042/python-crash-course","sub_path":"chapter10/file_writting/Guest_Book.py","file_name":"Guest_Book.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"5870223051","text":"def wordbreak(s, dict):\n if not dict:\n return False\n n = len(s)\n res = [False] * n\n for i in range(n):\n if s[:i+1] in dict:\n res[i] = True\n if not True in res:\n return False\n i = 0\n while i < n-1:\n for k in range(i+1):\n if res[i+1]:\n break\n if res[i-k] and s[i-k+1:i+2] in dict:\n res[i+1] = True\n break\n i += 1\n if res[-1]:\n return True\n else:\n return False","repo_name":"eric6356/LeetCode","sub_path":"WordBreak.py","file_name":"WordBreak.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"16184317973","text":"\"\"\"\n 계단 오르기(https://www.acmicpc.net/problem/2579)\n - 입력 : 계단의 개수\n 제일 아래 놓인 계단부터 순서대로 각 계단에 쓰여 있는 점수\n 계단의 개수는 300 이하, 점수는 10,000 이하의 자연수\n - 출력 : 얻을 수 있는 총 점수의 최댓값\n\n * 참고 : https://daimhada.tistory.com/181\n\"\"\"\nfrom sys import stdin\n\nn = int(stdin.readline().strip())\nstairs = [int(stdin.readline().strip()) for _ in range(n)]\ndp = []\n\nif n == 1:\n print(stairs[0])\nelif n == 2:\n print(stairs[0] + stairs[1])\nelse:\n dp.append(stairs[0])\n dp.append(max(stairs[0] + stairs[1], stairs[1]))\n dp.append(max(stairs[0] + stairs[2], stairs[1] + stairs[2]))\n\n for i in range(3, n):\n dp.append(max(dp[i - 2] + stairs[i], dp[i - 3] + stairs[i] + stairs[i - 1]))\n\n print(dp.pop())\n\n","repo_name":"akana0321/Algorithm","sub_path":"BaekJoon/Dynamic_Programming/going_up_stairs_2579.py","file_name":"going_up_stairs_2579.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"72952705298","text":"import mysql.connector\nimport pickle\nfrom collections import OrderedDict\nimport numpy as np\nimport visdom\nimport pandas as pd\nfrom datetime import datetime\nimport os.path as path\nfrom itertools import islice, count\nimport helper\nimport networkx as nx\n\nBASE_DIR = path.join(\"..\", \"..\", \"data\", \"customers\")\n\nvis = visdom.Visdom()\n\nconfig = {\n 'user': 'root',\n 'password': 'vela1990',\n 'host': '127.0.0.1',\n 'database': 'ml_crif',\n}\n\nOBJ_COLUMNS = ['segmento', 'date_ref', 'b_partner', 'cod_uo', 'zipcode', 'region', 'country_code', 'customer_kind', 'kind_desc', 'customer_type', 'type_desc', 'uncollectable_status', 'ateco', 'sae']\nRISK_COLUMNS = [\"segmento\", \"date_ref\", \"pre_notching\", \"val_scoring_risk\", \"val_scoring_pre\", \"val_scoring_ai\", \"val_scoring_cr\", \"val_scoring_bi\", \"val_scoring_sd\", \"class_scoring_risk\", \"class_scoring_pre\", \"class_scoring_ai\", \"class_scoring_cr\", \"class_scoring_bi\", \"class_scoring_sd\"]\nTIMESTAMP = [\"2016-06-30\", \"2016-07-31\", \"2016-08-31\", \"2016-09-30\", \"2016-10-31\", \"2016-11-30\", \"2016-12-31\",\n \"2017-01-31\", \"2017-02-28\", \"2017-03-31\", \"2017-04-30\", \"2017-05-31\", \"2017-06-30\"]\n\n\nONE_MAN_COMPANY_COSTUMERS = [13379, 49098, 66357, 66410, 228463, 392729, 394761, 418783, 424879, 430555, 434466, 1356061, 4316517, 4320054, 5280859, 5383259, 5384885, 5390867]\nREF_DATE = \"20170101\"\nDATE_FORMAT = \"%Y%m%d\"\n\nGET_ALL_CUSTOMER = \"SELECT customerid FROM customers\"\nGET_ALL_OWNER = \"SELECT customerid FROM onemancompany_owners\"\nCUSTOMERS_OWNER_UNION = \"SELECT c.customerid FROM customers AS c UNION SELECT o.customerid FROM onemancompany_owners AS o\"\nGET_REVENUE_USER = \"SELECT customerid FROM revenue\"\nGET_RISK_USER = \"SELECT customerid, segmento, date_ref, val_scoring_risk, class_scoring_risk, val_scoring_pre, class_scoring_pre, val_scoring_ai, class_scoring_ai, val_scoring_cr, class_scoring_cr, val_scoring_bi, class_scoring_bi, val_scoring_sd, class_scoring_sd, pre_notching FROM risk ORDER BY customerid asc, date_ref asc\"\nGET_RISK_USER_BY_ID = \"SELECT customerid, date_ref, val_scoring_risk, class_scoring_risk, val_scoring_pre, class_scoring_pre, val_scoring_ai, class_scoring_ai, val_scoring_cr, class_scoring_cr, val_scoring_bi, class_scoring_bi, val_scoring_sd, class_scoring_sd, pre_notching FROM risk ORDER BY date_ref asc WHERE customerid={}\"\nGET_ALL_CUSTOMER_LINKS_ID = \"SELECT DISTINCT * FROM (SELECT c_one.customerid FROM customer_links AS c_one UNION SELECT c2.customerid_link FROM customer_links AS c2) AS u\"\nGET_ALL_CUSTOMER_LINKS_BY_ID = \"SELECT DISTINCT customerid_link FROM customer_links WHERE customerid={}\"\nGET_ALL_CUSTOMER_LINKS_FOR_RISK_CUSTOMERS = \"SELECT DISTINCT cl.customerid, customerid_link FROM customer_links as cl, risk as r WHERE r.customerid = cl.customerid\"\nGET_PAGE_CUSTOMER_LINKS = \"SELECT DISTINCT cl.customerid, cl.customerid_link, cl.cod_link_type FROM customer_links as cl LIMIT {} OFFSET {}\"\nGET_ALL_RISK_LINKS_BY_CUSTOMERID = \"SELECT DISTINCT cl.customerid, cl.customerid_link, cl.cod_link_type, cl.des_link_type FROM risk AS r, customer_links AS cl WHERE r.customerid = cl.customerid AND r.customerid={}\"\nGET_DEFAULT_RISK_CUSTOMER = \"SELECT r.customerid, r.date_ref, r.val_scoring_risk, r.class_scoring_risk, r.val_scoring_pre, r.class_scoring_pre, r.val_scoring_ai, r.class_scoring_ai, r.val_scoring_cr, r.class_scoring_cr, r.val_scoring_bi, r.class_scoring_bi, r.val_scoring_sd, r.class_scoring_sd, r.pre_notching FROM risk AS r WHERE r.customerid IN (SELECT DISTINCT r1.customerid FROM ml_crif.risk AS r1 WHERE r1.val_scoring_risk=100) ORDER BY r.customerid asc, r.date_ref asc\"\nGET_CUSTOMER_BY_ID = \"SELECT birthdate, b_partner, cod_uo, zipcode, region, country_code, c.customer_kind, ck.description as kind_desc, c.customer_type, ct.description as type_desc, uncollectible_status, ateco, sae FROM customers as c, customer_kinds as ck, customer_types as ct WHERE c.customer_kind=ck.customer_kind AND c.customer_type = ct.customer_type AND c.customerid={} LIMIT 0, 1\"\nGET_ACCORDATO_TOT_BY_ID = \"SELECT date_ref, value1, value2 FROM ml_crif.features where customerid={} and cod_feature='GN0018' and cod_source='OP';\"\n\nf_check_none = lambda x: np.nan if x == None else x\nf_parse_date = lambda x: \"{}-{}-{}\".format(x[6:], x[4:6], x[:4])\nf_format_str_date = lambda x: datetime.strptime(x, \"%d-%m-%Y\")\nf_check_b_date = lambda x: REF_DATE if x == \"\" else x\n\n\ndef chunks(data, SIZE=30):\n it = iter(data)\n for i in range(0, len(data), SIZE):\n yield {k:data[k] for k in islice(it, SIZE)}\n\ndef take(data_dict, N_first=30):\n it = iter(data_dict)\n return {k:data_dict[k] for k in islice(it, N_first)}\n\ndef calculate_age(born):\n today = datetime.now()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\n\ndef remove_customers_with_no_neighbors(customers_data, customers_neighbors):\n n_customers_id = set(customers_neighbors.keys())\n df_customers_id = customers_data.columns.get_level_values(\"id\").unique()\n print(\"df_customer len: {}\".format(len(df_customers_id)))\n print(\"n_customer len: {}\".format(len(n_customers_id)))\n for row, customer_id in enumerate(df_customers_id):\n if customer_id not in n_customers_id:\n customers_data = customers_data.drop(customer_id, axis=1, level='id')\n if (row % 100) == 0:\n print(row, len(customers_data.columns.get_level_values(\"id\").unique()))\n print(\"final df_customers: {}\".format(len(customers_data.columns.get_level_values(\"id\").unique())))\n return customers_data\n\ndef get_customers_risk(cursor):\n \"\"\"\n get the customers whit risk values\n :param cursor:\n :return:\n \"\"\"\n customers_data = OrderedDict()\n prev_customer_id = 0\n for row, (customer_id, segmento, date_ref, val_scoring_risk, class_scoring_risk, val_scoring_pre, class_scoring_pre,\n val_scoring_ai, class_scoring_ai, val_scoring_cr, class_scoring_cr, val_scoring_bi, class_scoring_bi,\n val_scoring_sd, class_scoring_sd, pre_notching) in enumerate(cursor):\n\n if not customer_id in customers_data and customer_id != prev_customer_id:\n if row > 0:\n customers_data[prev_customer_id] = pd.DataFrame.from_dict(customers_data[prev_customer_id], orient=\"index\")\n customers_data[customer_id] = OrderedDict()\n prev_customer_id = customer_id\n\n date_ref = f_parse_date(date_ref)\n customers_data[customer_id][f_format_str_date(date_ref)] = __risk_mapping__(segmento, date_ref, pre_notching,\n val_scoring_risk, val_scoring_pre, val_scoring_ai, val_scoring_cr, val_scoring_bi, val_scoring_sd,\n class_scoring_risk, class_scoring_pre, class_scoring_ai, class_scoring_cr, class_scoring_bi, class_scoring_sd)\n\n if row % 100 == 0:\n print(row)\n customers_data[prev_customer_id] = pd.DataFrame.from_dict(customers_data[prev_customer_id], orient=\"index\")\n print(len(customers_data))\n pickle.dump(customers_data, open(path.join(BASE_DIR, \"temp\", \"customers_risk_dict.bin\"), \"wb\"))\n return customers_data\n\ndef get_neighbors(customers_data, cursor):\n \"\"\"\n get the neighbors that has some risk values of nodes with risk value\n :param customers_data:\n :param cursor:\n :return:\n \"\"\"\n customers_neighbors = OrderedDict()\n for row, customer_id in enumerate(customers_data.keys()):\n cursor.execute(GET_ALL_CUSTOMER_LINKS_BY_ID.format(customer_id))\n for customer_id_link, in cursor:\n if customer_id_link in customers_data:\n if customer_id in customers_neighbors:\n customers_neighbors[customer_id].append(customer_id_link)\n else:\n customers_neighbors[customer_id] = [customer_id_link]\n\n if row % 100 == 0:\n print(\"row:{}\\tcustomers:{}\".format(row, len(customers_neighbors)))\n\n print(\"row:{}\\tcustomers:{}\".format(row, len(customers_neighbors)))\n pickle.dump(customers_neighbors, open(path.join(BASE_DIR, \"temp\", \"customers_neighbors_dict.bin\"), \"wb\"))\n return customers_neighbors\n\ndef __get_customer_info__(customer_id, cursor):\n cursor.execute(GET_CUSTOMER_BY_ID.format(customer_id))\n birth_date, b_partner, cod_uo, zipcode, region, country_code, customer_kind, kind_desc, customer_type, type_desc, uncollectable_status, ateco, sae = cursor.fetchone()\n attribute = dict(\n age=calculate_age(f_format_str_date(f_parse_date(f_check_b_date(birth_date)))),\n b_partner=b_partner,\n cod_uo=cod_uo,\n zipcode=zipcode,\n region=region,\n country_code=country_code,\n customer_kind=customer_kind,\n kind_desc=kind_desc,\n customer_type=customer_type,\n type_desc=type_desc,\n uncollectable_status=uncollectable_status,\n ateco=ateco,\n sae=sae)\n return attribute\n\ndef get_customers_info(customers_data, cursor):\n \"\"\"\n get the customers general attribute\n :param customers_data:\n :param cursor:\n :return:\n \"\"\"\n num_customer = len(customers_data)\n for row, (customer_id, df) in enumerate(sorted(customers_data.items())):\n node_attribute = {}\n attribute = __get_customer_info__(customer_id, cursor)\n for time_step in df.index:\n node_attribute[time_step] = attribute\n node_attribute = pd.DataFrame.from_dict(node_attribute, orient=\"index\")\n assert not node_attribute.isnull().values.any(), \"{}\\n{}\".format(customer_id, node_attribute)\n customers_data[customer_id] = pd.concat([df, node_attribute], axis=1)\n print(row, customer_id)\n assert len(customers_data) == num_customer, \"old lenght:{}\\t newlenght:{}\".format(num_customer, len(customers_data))\n\n pickle.dump(customers_data, open(path.join(BASE_DIR, \"temp\", \"customers_risk_attribute_dict.bin\"), \"wb\"))\n return customers_data\n\ndef __risk_mapping__(segmento, date_ref, pre_notching,\n val_scoring_risk, val_scoring_pre, val_scoring_ai, val_scoring_cr, val_scoring_bi, val_scoring_sd,\n class_scoring_risk, class_scoring_pre, class_scoring_ai, class_scoring_cr, class_scoring_bi, class_scoring_sd):\n return {\n \"segmento\": segmento,\n \"date_ref\": date_ref,\n \"pre_notching\": pre_notching,\n \"val_scoring_risk\": f_check_none(val_scoring_risk),\n \"val_scoring_pre\": f_check_none(val_scoring_pre),\n \"val_scoring_ai\": f_check_none(val_scoring_ai),\n \"val_scoring_cr\": f_check_none(val_scoring_cr),\n \"val_scoring_bi\": f_check_none(val_scoring_bi),\n \"val_scoring_sd\": f_check_none(val_scoring_sd),\n \"class_scoring_risk\": class_scoring_risk,\n \"class_scoring_pre\": class_scoring_pre,\n \"class_scoring_ai\": class_scoring_ai,\n \"class_scoring_cr\": class_scoring_cr,\n \"class_scoring_bi\": class_scoring_bi,\n \"class_scoring_sd\": class_scoring_sd\n }\n\n\ndef cut_time_series(customers_data, max_num_nan=4, nan_replacement=-1):\n customers_data = customers_data.iloc[2:] # remove 2015 data\n\n full_nan_row = customers_data.isnull().all(axis=1, level=\"id\") # get full nan row for each id\n delete_ids = full_nan_row.sum().loc[lambda x: x > max_num_nan].keys() # get customers to delete\n customers_data = customers_data.drop(delete_ids, axis=1) # delete obtained columns\n\n # replace nan row with previous values\n full_nan_row = customers_data.isnull().all(axis=1, level=\"id\")\n customers_to_fill = full_nan_row.any().loc[lambda x: x == True].index\n customers_data.update(customers_data[customers_to_fill].fillna(method='bfill').fillna(method='ffill'))\n\n # replace nan in RISK_COLUMN by -1\n customers_data.update(customers_data.loc[:, pd.IndexSlice[:, RISK_COLUMNS]].fillna(nan_replacement))\n nan_customers = customers_data.isnull().any(axis=1, level=\"id\").any()\n return customers_data, nan_customers\n\n\ndef check_dataframe_dim(customers_data, expected_size=(18,28)):\n \"\"\"\n check why some customers do not have the same dimension.\n One-man-company do not have customers attribute.\n FIX it by inserting the co-owner information extracted form the customers link table\n :param customers_data: datframe to check\n :param expected_size: expected dim\n :return:\n \"\"\"\n print(\"shape: {}\".format(customers_data.shape))\n print(\"customers: {}\".format(customers_data.columns.get_level_values('id').unique().shape[0]))\n print(\"attribute number: {}\".format(customers_data.columns.get_level_values('attribute').unique().shape[0]))\n\n ret = []\n for customer_id in customers_data.columns.get_level_values('id').unique():\n if customers_data[customer_id].shape != expected_size:\n print(\"{}\\terror\".format(customer_id))\n print(customers_data[customer_id])\n ret.append(customer_id)\n print(\"len:{}\\n{}\".format(len(ret), ret))\n return ret\n\ndef __delete_customer__(id_to_delete, customers_data, customers_neighbors_dict):\n \"\"\"\n delete customers form the dataframe and form neihgbors\n :param id_to_delete:\n :param customers_data:\n :param customers_neighbors_dict:\n :return:\n \"\"\"\n deleted_customers = []\n customers_data = customers_data.drop(id_to_delete, axis=1, level=0)\n neighbors = customers_neighbors_dict.pop(id_to_delete)\n deleted_customers.append(id_to_delete)\n\n # remove also form the neighbors\n for neighbor in neighbors:\n if neighbor in customers_neighbors_dict:\n customers_neighbors_dict[neighbor].remove(id_to_delete)\n if len(customers_neighbors_dict[neighbor]) == 0:\n # remove also this neighbors\n rec_deleted_customers, customers_data, customers_neighbors_dict = __delete_customer__(neighbor, customers_data, customers_neighbors_dict)\n deleted_customers.extend(rec_deleted_customers)\n return deleted_customers, customers_data, customers_neighbors_dict\n\n\ndef delete_customers(ids_to_delete, customers_data, customers_neighbors_dict):\n deleted_costumers = []\n for id_to_delete in ids_to_delete:\n if id_to_delete in deleted_costumers:\n print(\"already deleted\")\n continue\n rec_deleted_customers, customers_data, customers_neighbors_dict = __delete_customer__(id_to_delete, customers_data,\n customers_neighbors_dict)\n deleted_costumers.extend(rec_deleted_customers)\n return customers_data, customers_neighbors_dict\n\n\ndef fix_neighbors(customers_data, customers_neighbors):\n \"\"\"\n remove from the neighbors set the customers not presenet in the data\n :param customers_data:\n :param customers_neighbors:\n :return:\n \"\"\"\n def __check_customer__(to_check, customers_set):\n for customer_id in to_check:\n if customer_id not in customers_set:\n to_check.remove(customer_id)\n return to_check\n\n removed = []\n customers = customers_data.columns.get_level_values('id').unique().tolist()\n\n for customer_id in list(customers_neighbors.keys()):\n if customer_id in customers:\n neighbors = __check_customer__(customers_neighbors[customer_id], customers)\n if len(neighbors) == 0:\n print(\"{} without neighbors\".format(customer_id))\n customers.remove(customer_id)\n customers_neighbors.pop(customer_id)\n removed.append(customer_id)\n else:\n customers_neighbors[customer_id] = neighbors\n else:\n customers_neighbors.pop(customer_id)\n\n assert len(customers) == len(customers_neighbors), \"{}\\t{}\".format(len(customers), len(customers_neighbors))\n assert len(customers) + len(removed) == customers_data.columns.get_level_values('id').unique().shape[0], \"{}\\t{}\\{}\".format(len(customers), len(removed), customers_data.columns.get_level_values('id').unique().shape[0])\n\n for customer_id in removed:\n customers_data = customers_data.drop(customer_id, axis=1, level=0)\n return customers_data, customers_neighbors\n\n\ndef extract_accordato_massimo(customers_data, cursor):\n customers_id = customers_data.columns.get_level_values(\"id\").unique().tolist()\n print(len(customers_id))\n accordato_max = {}\n for row, customer_id in enumerate(customers_id[7046:]):\n accordato_max[customer_id] = OrderedDict()\n cursor.execute(GET_ACCORDATO_TOT_BY_ID.format(customer_id))\n for date_ref, value1, value2 in cursor.fetchall():\n date_ref = f_parse_date(date_ref)\n\n accordato_max[customer_id][f_format_str_date(date_ref)] = dict(date_ref=date_ref,\n value1=value1,\n value2=value2)\n accordato_max[customer_id] = pd.DataFrame.from_dict(accordato_max[customer_id], orient=\"index\")\n if row % 100 == 0:\n print(row, customer_id)\n return accordato_max\n\n\ndef check_concistency():\n customers_data = pd.read_msgpack(path.join(BASE_DIR, \"temp\", \"customers_risk_time_frame_null_df_final.msg\"))\n customers_ids = customers_data.columns.get_level_values(\"id\").unique().tolist()\n G = nx.readwrite.gpickle.read_gpickle(path.join(BASE_DIR, \"temp\", \"prune_graph.bin\"))\n\n print(G.number_of_nodes())\n print(G.number_of_edges())\n print(len(customers_ids))\n\n bad_nodes = list(filter(lambda x: x[1] == 0, list(G.out_degree(customers_ids))))\n print(bad_nodes)\n print(len(bad_nodes))\n\n\n\ndef create_full_graph(cursor, offset=10000):\n G = nx.DiGraph()\n count_total = 0\n for page in count():\n count_prev = count_total\n print(GET_PAGE_CUSTOMER_LINKS.format(offset, page*offset))\n cursor.execute(GET_PAGE_CUSTOMER_LINKS.format(offset, page*offset))\n\n for customer_id, customer_link, edge_type in cursor.fetchall():\n G.add_edge(customer_id, customer_link, rel_type=edge_type)\n count_total += 1\n\n if count_total % 500 == 0:\n print(count_total)\n\n if count_prev == count_total:\n break\n\n nx.readwrite.gpickle.write_gpickle(G, path.join(BASE_DIR, \"temp\", \"full_graph.bin\"))\n\n\n\ndef prune_graph():\n G = nx.readwrite.gpickle.read_gpickle(path.join(BASE_DIR, \"temp\", \"full_graph.bin\"))\n customers_data = pd.read_msgpack(path.join(BASE_DIR, \"customers_risk_df.msg\"))\n customers_ids = customers_data.columns.get_level_values(\"id\").unique().tolist()\n\n\n nodes = list(G.nodes())\n for row, node in enumerate(nodes):\n if node not in customers_ids:\n G.remove_node(node)\n\n if row % 500 == 0:\n print(row)\n\n nx.readwrite.gpickle.write_gpickle(G, path.join(BASE_DIR, \"temp\", \"prune_graph.bin\"))\n\n print(G.number_of_nodes(), len(customers_ids))\n assert G.number_of_nodes() == len(customers_ids)\n\n\n\n\ndef extract_data(cursor):\n\n # customers_data = get_customers_risk(cursor)\n # customers_neighbors = get_neighbors(customers_data, cursor)\n # customers_data = get_customers_info(customers_data, cursor)\n # customers_data = pd.concat(customers_data, axis=1)\n # customers_data.columns = customers_data.columns.rename(['id', 'attribute'])\n # customers_data = remove_customers_with_no_neighbors(customers_data, customers_neighbors)\n # customers_data.to_msgpack(path.join(BASE_DIR, \"temp\", \"customers_risk_df.msg\"))\n\n # customers_data, nan_customers = cut_time_series(customers_data)\n # customers_data.to_msgpack(path.join(BASE_DIR, \"temp\", \"customers_risk_time_frame_null_df.msg\"))\n\n # check_dataframe_dim(customers_data)\n # customers_data = delete_one_man_company(customers_data, customers_neighbors_dict)\n # customers_data, customers_neighbors_dict = fix_neighbors(customers_data, customers_neighbors_dict)\n\n check_concistency()\n\ndef extract_neighborhod_risk():\n customer_data = pickle.load(open(\"customer_risk_time.bin\", \"rb\"))\n customer_origin_data = OrderedDict()\n customer_diff_data = OrderedDict()\n customer_rel_diff_data = OrderedDict()\n\n n_done = 0\n tot = len(customer_data.keys())\n for id, cusomter_id in enumerate(sorted(customer_data.keys())):\n customer_risk = customer_data[cusomter_id]\n c_risk = np.array(customer_risk)\n c_r_risk = np.diff(c_risk, axis=0)\n\n cursor.execute(GET_ALL_CUSTOMER_LINKS_BY_ID.format(cusomter_id))\n for customer_link_id, in cursor:\n if customer_link_id in customer_data:\n neiborhod_risk = customer_data[customer_link_id]\n n_risk = np.array(neiborhod_risk)\n n_r_risk = np.diff(n_risk, axis=0)\n\n # save original data\n if cusomter_id in customer_origin_data:\n customer_origin_data[cusomter_id].append(n_risk)\n else:\n customer_origin_data[cusomter_id] = [n_risk]\n\n # compute absolute difference\n diff = np.fabs(np.array(c_risk) - np.array(n_risk))\n if cusomter_id in customer_diff_data:\n customer_diff_data[cusomter_id].append(diff)\n else:\n customer_diff_data[cusomter_id] = [diff]\n\n # compute relative difference\n diff = np.fabs(np.array(c_r_risk) - np.array(n_r_risk))\n if cusomter_id in customer_rel_diff_data:\n customer_rel_diff_data[cusomter_id].append(diff)\n else:\n customer_rel_diff_data[cusomter_id] = [diff]\n n_done += 1\n if id % 100 == 0:\n done = (n_done / tot) * 100\n print(done)\n return customer_origin_data, customer_diff_data, customer_rel_diff_data\n\n\nif __name__ == \"__main__\":\n\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n try:\n extract_data(cursor)\n\n finally:\n cursor.close()\n cnx.close()","repo_name":"andompesta/bank_credit_scoring","sub_path":"datasets/bank/data_extraction.py","file_name":"data_extraction.py","file_ext":"py","file_size_in_byte":22154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"9069633817","text":"days = int(input())\nnumber_confectioners = int(input())\nnumber_cakes = int(input())\nnumber_waffles = int(input())\nnumber_pancakes = int(input())\n\ncakes_price = 45\nwaffles_price = 5.8\npancakes_price = 3.2\n\nsum_cakes = number_cakes * cakes_price\nsum_waffles = number_waffles * waffles_price\nsum_pancakes = number_pancakes * pancakes_price\n\nsumPerDay = (sum_cakes + sum_waffles + sum_pancakes) * number_confectioners\ntotSum = sumPerDay * days\nfinalSum = 7/8 * totSum\nprint(finalSum)\n","repo_name":"vbukovska/SoftUni","sub_path":"Programming_basics/Exercise_1/CharityCampaign.py","file_name":"CharityCampaign.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"25127561721","text":"from pathlib import Path\nfrom config_common import get_py_youwol_env, on_before_startup\nfrom youwol_flux_backend import Constants, Configuration\nfrom youwol_utils import LocalStorageClient, LocalDocDbClient\nfrom youwol_utils.clients.assets_gateway.assets_gateway import AssetsGatewayClient\nfrom youwol_utils.context import ConsoleContextLogger\nfrom youwol_utils.http_clients.flux_backend import PROJECTS_TABLE, COMPONENTS_TABLE\nfrom youwol_utils.middlewares.authentication_local import AuthLocalMiddleware\nfrom youwol_utils.servers.fast_api import FastApiMiddleware, AppConfiguration, ServerOptions\n\n\nasync def get_configuration():\n env = await get_py_youwol_env()\n databases_path = Path(env['pathsBook']['databases'])\n\n async def _on_before_startup():\n await on_before_startup(service_config)\n\n service_config = Configuration(\n storage=LocalStorageClient(root_path=databases_path / 'storage',\n bucket_name=Constants.namespace),\n doc_db=LocalDocDbClient(root_path=databases_path / 'docdb',\n keyspace_name=Constants.namespace,\n table_body=PROJECTS_TABLE\n ),\n doc_db_component=LocalDocDbClient(\n root_path=databases_path.parent / 'docdb',\n keyspace_name=Constants.namespace,\n table_body=COMPONENTS_TABLE\n ),\n assets_gtw_client=AssetsGatewayClient(url_base=f\"http://localhost:{env['httpPort']}/api/assets-gateway\")\n )\n server_options = ServerOptions(\n root_path=\"\",\n http_port=env['portsBook']['flux-backend'],\n base_path=\"\",\n middlewares=[FastApiMiddleware(AuthLocalMiddleware, {})],\n on_before_startup=_on_before_startup,\n ctx_logger=ConsoleContextLogger()\n )\n return AppConfiguration(\n server=server_options,\n service=service_config\n )\n","repo_name":"youwol/flux-backend","sub_path":"src/config_local.py","file_name":"config_local.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"45770968926","text":"# confing=\"utf-8\"\n# 定义登录函数\nfrom time import sleep\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nfrom common import config\n\nfrom demo.pageObject.basePage import BasePage, ex1,ex2\n\n\nclass LoginPage(BasePage):\n\n # 用户名\n unnameLocater = (By.CSS_SELECTOR,ex1.readExcel(1,4))\n # 密码\n upwdLocater = (By.CSS_SELECTOR, ex1.readExcel(2,4))\n # 登录按钮\n butLocater = (By.CSS_SELECTOR, ex1.readExcel(3,4))\n\n # 错误信息-登录失败的信息提示-小窗\n\n erroruname = (By.XPATH, ex1.readExcel(4,4))\n errorpwd = (By.XPATH, ex1.readExcel(5, 4))\n\n\n errortext=(By.CSS_SELECTOR, ex1.readExcel(6,4))\n #小窗的确定按钮\n\n acceptBut=(By.CSS_SELECTOR, ex1.readExcel(7,4))\n\n #登录数据\n loginDatalist=[[ex2.readExcel(1, 3), ex2.readExcel(1, 4)],#正确的用户名,密码\n [ex2.readExcel(2, 3), ex2.readExcel(2, 4)],#用户名空,密码空\n [ex2.readExcel(3, 3), ex2.readExcel(3, 4)],#用户名空,密码非空\n [ex2.readExcel(4, 3), ex2.readExcel(4, 4)],#用户名非空,密码空\n [ex2.readExcel(5, 3), ex2.readExcel(5, 4)],#用户名错误,密码正确\n [ex2.readExcel(6, 3), ex2.readExcel(6, 4)],#用户名正确,密码错误\n [ex2.readExcel(7, 3), ex2.readExcel(7, 4)],#用户名错误,密码错误\n [ex2.readExcel(8, 3), ex2.readExcel(8, 4)],#用户名长度不够,密码正确\n [ex2.readExcel(9, 3), ex2.readExcel(9, 4)],#用户名长度超出12位,密码正确\n [ex2.readExcel(10, 3), ex2.readExcel(10, 4)],#用户名长度正确,密码不够\n [ex2.readExcel(11, 3), ex2.readExcel(11, 4)],#用户名长度正确,密码超出20位\n [ex2.readExcel(12, 3), ex2.readExcel(12, 4)],#用户名长度超过12位,密码超出21位\n [ex2.readExcel(13, 3), ex2.readExcel(13, 4)]#用户名长度不够,密码长度不够\n ]\n\n # 登录方法\n def LoginFun(self, vname=loginDatalist[0][0],\n vpwd=loginDatalist[0][1]):\n # 输入用户名\n self.inputValue(self.unnameLocater, vname)\n # 输入密码\n self.inputValue(self.upwdLocater, vpwd)\n # 点击登录按钮\n self.doClick(self.butLocater)\n sleep(5)\n\n\nif __name__ == '__main__':\n driver = webdriver.Chrome()\n # url = config.base_url\n login = LoginPage(driver)\n login.open()\n login.driver.maximize_window()\n login.driver.implicitly_wait(10)\n # login.LoginFun()\n # login.logOutFun(driver)\n # #登录成功\n login.LoginFun()\n # #账号登录失败\n # #获取错误信息的值,并打印\n # # errorname=log.getElementValue(log.erroruname)\n # # print(errorname)\n # # errorpwd=log.getElementValue(log.errorpwd)\n # # print(errorpwd)\n # errText=login.getElementValue(login.errortext)\n # print(errText)\n # login.doClick(login.acceptBut)\n login.logOutFun(driver)\n login.driver.quit()\n # print(login.loginDatalist)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"zmx19980630/Apptest","sub_path":"webAutoProject2/demo/pageObject/loginPage.py","file_name":"loginPage.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"38802464549","text":"from django import forms\nfrom .models import Product, ProductDetail\n\nclass ProductDetailAddForm(forms.ModelForm):\n class Meta:\n model = ProductDetail\n fields = \"__all__\"\n\nclass EditProductCodeForm(forms.Form):\n product_code = forms.CharField(required=True)\n\nclass AddProductInBranchForm(forms.ModelForm):\n class Meta:\n model = Product\n exclude = ['branch',]\n\n## this form to edit product in branch, if user is seller he can't edit anything\n## if user is manager he can edit quantity only\n## if user is admin, he can edit anything\n## it must send user_site to this form\n## if the user is manager, he can edit product price or quantity, not code\n## if he is admin he can edit in anything for the product\nclass EditProductInBranchForm(forms.ModelForm):\n class Meta:\n model = Product\n exclude = ['branch',]\n\n\n def __init__(self, *args, **kwargs):\n user_site = kwargs[\"user_site\"]\n del kwargs[\"user_site\"]\n super().__init__(*args, **kwargs)\n\n disabled_fields = {}\n\n if user_site.is_branch_manager():\n for field_name, field in self.fields.items():\n if field_name == \"quantity\" or field_name == \"price_for_branch\":\n pass\n else:\n field.widget.attrs['disabled'] = True\n field.required = False\n disabled_fields['disabled_product_detail'] = forms.CharField(\n widget=forms.HiddenInput,\n initial=self.instance.product_detail,\n )\n elif not user_site.is_site_admin():\n for field_name, field in self.fields.items():\n field.widget.attrs['disabled'] = True\n\n if disabled_fields:\n self.fields.update(disabled_fields)\n\n def clean(self):\n cleaned_data = super(EditProductInBranchForm, self).clean()\n disabled_product_detail = cleaned_data.get('disabled_product_detail')\n if disabled_product_detail:\n product_detail = ProductDetail.objects.get(product_code=disabled_product_detail)\n if disabled_product_detail:\n cleaned_data['product_detail'] = product_detail\n del cleaned_data[\"disabled_product_detail\"]\n return cleaned_data\n else:\n return cleaned_data\n","repo_name":"mariomalak1/clothingStore","sub_path":"Product/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"4845225342","text":"from .db import db\n\nclass Note_Tag(db.Model):\n __tablename__ = 'note_tags'\n\n id = db.Column(db.Integer, primary_key = True)\n tag_id = db.Column(db.Integer, db.ForeignKey('tags.id'), nullable=False)\n note_id = db.Column(db.Integer, db.ForeignKey('notes.id'), nullable=False)\n\n __table_args__ = (db.Index(\"only_one_unique_tag_per_note\", \"tag_id\", \"note_id\", unique=True),)\n\n def to_dict(self):\n return {\n \"id\": self.id,\n \"note_id\": self.note_id,\n \"tag_id\": self.tag_id\n }\n","repo_name":"mjshuff23/evernote-clone","sub_path":"app/models/note_tag.py","file_name":"note_tag.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"13"} +{"seq_id":"47328290534","text":"import pandas as pd\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\n\r\nclass preprocessor:\r\n\r\n def __init__(self):\r\n self.df = None\r\n self.df_countries = None\r\n\r\n # load the data from a given path\r\n # if file doesnt exists or empty- error message will be returned\r\n def load_data(self, filepath):\r\n try:\r\n self.df = pd.read_excel(filepath, index_col=0)\r\n\r\n if len(self.df) == 0:\r\n self.df = None\r\n return f'file is empty'\r\n\r\n except FileNotFoundError:\r\n self.df = None\r\n return f'file not found at path: {filepath}'\r\n\r\n return ''\r\n\r\n # impute the missing data with feature mean\r\n def impute_data(self):\r\n mean = self.df.mean(numeric_only=True)\r\n self.df.fillna(mean, inplace=True)\r\n\r\n # normalize the features by standard deviation\r\n def data_normalization(self):\r\n # print(self.df[self.df.columns])\r\n self.df[self.df.columns] = StandardScaler().fit_transform(self.df[self.df.columns])\r\n\r\n # group by country and aggregate by mean value\r\n def data_grouping(self):\r\n self.df_countries = self.df.groupby(['country']).mean()\r\n self.df_countries.drop(['year'], axis=1, inplace=True)\r\n\r\n # the full pipeline of the preprocessing\r\n def preprocess(self, filename):\r\n err_msg = self.load_data(filename)\r\n\r\n if len(err_msg) > 0:\r\n self.df_countries = None\r\n return None, None, err_msg\r\n\r\n self.impute_data()\r\n self.data_normalization()\r\n self.data_grouping()\r\n\r\n return self.df, self.df_countries, 'Preprocessing completed successfully!'\r\n","repo_name":"talkad/business_intelligence","sub_path":"Clustering/clustering/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"42696933570","text":"#coding=utf-8\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\nfrom sogouWechart.items import WechartAccount\nimport json\nclass SogouWechartSpider(BaseSpider):\n name = 'sogou_wehchart'\n allowed_domains = [\"weixin.sogou.com\", \"mp.weixin.qq.com\"]\n start_key = ['python']\n start_urls = []\n\n def __init__(self):\n fileObj = open('F:/scrapy/it_key.json')\n str = fileObj.read()\n keys = json.loads(str)\n for key in keys:\n self.start_key.append(key['name'])\n for c in key['children']:\n self.start_key.append(c['name'])\n for key in self.start_key:\n for i in range(1, 10):\n url = 'http://weixin.sogou.com/weixin?type=2&query=%s&ie=utf8&_sug_=n&_sug_type_=page=%s' % (key, i)\n self.start_urls.append(url)\n\n def parse(self, response):\n sel = Selector(response)\n links = sel.xpath('//*[@id=\"main\"]/div/div[2]/div/div')\n for l in links:\n link = ''.join(l.xpath('div[2]/h4/a/@href').extract())\n yield Request(url=link, callback=self.parse_article)\n\n def parse_article(self, response):\n item = WechartAccount()\n sel = Selector(response)\n user_name = sel.re('([^<]*)')\n nickname = sel.re('var nickname = \"([^\"]*)\";')\n image_url = sel.re('hd_head_img : \"([^\"]*)\"')\n item['nickname'] = ''.join(nickname)\n item['user_name'] = user_name[0]\n item['image_url'] = ''.join(image_url)\n return item\n\n\n\n\n","repo_name":"duanbj/sogouWechart","sub_path":"sogouWechart/spiders/SogouSearch.py","file_name":"SogouSearch.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"13"} +{"seq_id":"3891776358","text":"import torch\nimport time\n\nfrom torch import optim\nfrom torch import nn\nfrom .others import denoiser\n\n# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndevice = torch.device(\"cpu\")\n\n\nclass Model:\n def __init__(self) -> None:\n # instantiate model + optimizer + loss function + any other stuff you need\n self.model = denoiser.Denoiser()\n self.model.to(device)\n self.criterion = nn.MSELoss()\n self.lr = 0.001\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr, betas=(0.9, 0.999), eps=1e-08)\n self.mini_batch_size = 50\n\n def load_pretrained_model(self) -> None:\n # This loads the parameters saved in bestmodel.pth into the model\n denoiser_state_dict = torch.load('./Miniproject_1/bestmodel.pth')\n self.model.load_state_dict(denoiser_state_dict)\n\n def train(self, train_input, train_target, num_epochs) -> None:\n #: train_input : tensor of size (N, C, H, W) containing a noisy version of the images.\n #: train_target : tensor of size (N, C, H, W) containing another noisy version of the same images,\n # which only differs from the input by their noise .\n\n # Normalisation of data\n train_input = train_input.to(device)\n train_input = train_input.float() / 255.0\n train_target = train_target.to(device)\n train_target = train_target.float() / 255.0\n\n print(f\"Starts Training with : mini_batch_size = {self.mini_batch_size} and num epochs = {num_epochs}\")\n total_time = 0\n nb_step = 0\n\n for e in range(num_epochs):\n for b in range(0, train_input.size(0), self.mini_batch_size):\n start = time.time()\n\n output = self.model(train_input.narrow(0, b, self.mini_batch_size))\n loss = self.criterion(output, train_target.narrow(0, b, self.mini_batch_size))\n self.model.zero_grad()\n loss.backward()\n self.optimizer.step()\n nb_step += 1\n\n end = time.time()\n total_time += end - start\n\n if nb_step % 100 == 0:\n print(\n f\"Epoch number : {e + 1}, Step number : {nb_step},\"\n f\" mini_batch_size = {self.mini_batch_size}, Total running time : {total_time:.1f} s\")\n\n print(f\"End of training with total running time : {total_time:.1f} s\")\n\n def predict(self, test_input) -> torch.Tensor:\n #: test_input : tensor of size (N1 , C, H, W) that has to be denoised by the trained\n # or the loaded network.\n #: returns a tensor of the size (N1 , C, H, W)\n\n # Normalisation of data\n test_input = test_input.to(device)\n test_input = test_input.float() / 255.0\n\n output = self.model(test_input)\n output = output * 255.0\n return output\n","repo_name":"hmiranda-queiros/DL_Project","sub_path":"Miniproject_1/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"39772766484","text":"from operator import length_hint #operator.length_hint method is utilised to find the length of an iterable such as list, tuple etc.\nNumber = int(input(\"Enter Number:\")) #Required number: we want to show a sequence of prime number.\ndef prime(N): #define prime as a function where N is a variable.\n if N <= 1: #if true, it is not a prime number.\n return False #calling function (returning value to function).\n for i in range(2, N): #Checking the number is even or not, where 2 is the starting value in range and N can be N-1 as range will not take the last value of input N.\n if (N % i == 0): #Checking if the number is divisible or not.\n return False #Not a prime number.\n return True #Prime number.\ni = 2 #First prime number is 2.\nlst = [] #Creating empty list.\nwhile True: #the loop condition is true.\n if (prime(i)): #First prime number is true.\n lst.append(i) #Adding i to the end of list.\n if (length_hint(lst) == Number): #length_hint()function:total number of elements in the list.\n break #Stop the conditions.\n i += 1 #increment i if it is prime number.\nprint(\"First \" + str(Number) + \" prime numbers are:\") #Showing the comment line.\nprint(lst) #Printing the list of prime numbers.","repo_name":"mirmoheuddin22/Python_Winter_2022_3","sub_path":"N prime number.py","file_name":"N prime number.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"13"} +{"seq_id":"13506592355","text":"import argparse\nimport json\nimport logging\nimport numpy as np\nimport os\n\nimport sklearn.discriminant_analysis\nimport sklearn.ensemble\nimport sklearn.feature_selection\nimport sklearn.impute\nimport sklearn.linear_model\nimport sklearn.naive_bayes\nimport sklearn.neighbors\nimport sklearn.neural_network\nimport sklearn.preprocessing\nimport sklearn.svm\nimport sklearn.tree\n\nimport lccv\n\nimport openml\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_directory', type=str, help='directory to store output',\n default=os.path.expanduser('~') + '/experiments/lccv/')\n parser.add_argument('--job_idx', type=int, default=None)\n parser.add_argument('--verbose', type=bool, default=False)\n parser.add_argument('--study_id', type=str, default=271)\n\n return parser.parse_args()\n\n\nlearners = [\n sklearn.svm.LinearSVC(),\n sklearn.tree.DecisionTreeClassifier(),\n sklearn.tree.ExtraTreeClassifier(),\n sklearn.linear_model.LogisticRegression(),\n sklearn.linear_model.PassiveAggressiveClassifier(),\n sklearn.linear_model.Perceptron(),\n sklearn.linear_model.RidgeClassifier(),\n sklearn.linear_model.SGDClassifier(),\n sklearn.neural_network.MLPClassifier(),\n sklearn.discriminant_analysis.LinearDiscriminantAnalysis(),\n sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis(),\n sklearn.naive_bayes.BernoulliNB(),\n sklearn.naive_bayes.MultinomialNB(),\n sklearn.neighbors.KNeighborsClassifier(),\n sklearn.ensemble.ExtraTreesClassifier(),\n sklearn.ensemble.RandomForestClassifier(),\n sklearn.ensemble.GradientBoostingClassifier(),\n]\n\n\ndef highest_2power_below(n) -> int:\n p = int(np.log2(n * .9))\n return int(pow(2, p) / 9 * 10)\n\n\ndef clf_as_pipeline(clf, numeric_indices, nominal_indices):\n numeric_transformer = sklearn.pipeline.make_pipeline(\n sklearn.impute.SimpleImputer(),\n sklearn.preprocessing.StandardScaler())\n\n # note that the dataset is encoded numerically, hence we can only impute\n # numeric values, even for the categorical columns.\n categorical_transformer = sklearn.pipeline.make_pipeline(\n sklearn.impute.SimpleImputer(strategy='constant', fill_value=-1),\n sklearn.preprocessing.OneHotEncoder(handle_unknown='ignore'))\n\n transformer = sklearn.compose.ColumnTransformer(\n transformers=[\n ('numeric', numeric_transformer, numeric_indices),\n ('nominal', categorical_transformer, nominal_indices)],\n remainder='passthrough')\n\n pipeline = sklearn.pipeline.make_pipeline(transformer,\n sklearn.feature_selection.VarianceThreshold(),\n clf)\n return pipeline\n\n\ndef run_classifier_on_task(\n learner_idx: int,\n task: openml.tasks.OpenMLSupervisedTask,\n output_directory: str, verbose: bool):\n nominal_indices = task.get_dataset().get_features_by_type('nominal', [task.target_name])\n numeric_indices = task.get_dataset().get_features_by_type('numeric', [task.target_name])\n clf = clf_as_pipeline(learners[learner_idx], numeric_indices, nominal_indices)\n x, y = task.get_X_and_y(dataset_format='array')\n unique, counts = np.unique(y, return_counts=True)\n logging.info('class dist (all): %s' % dict(zip(unique, counts)))\n size_big = highest_2power_below(len(x))\n\n indices_big = np.random.permutation(np.arange(len(x)))[:size_big]\n indices_small = indices_big[:int(size_big/2)]\n x_big, y_big = x[indices_big], y[indices_big]\n unique_big, counts_big = np.unique(y_big, return_counts=True)\n logging.info('class dist (big): %s' % dict(zip(unique_big, counts_big)))\n x_small, y_small = x[indices_small], y[indices_small]\n unique_small, counts_small = np.unique(y_small, return_counts=True)\n logging.info('class dist (small): %s' % dict(zip(unique_small, counts_small)))\n\n output_dir = os.path.join(output_directory, str(task.task_id))\n os.makedirs(output_dir, exist_ok=True)\n filename = 'result_%s.json' % str(learners[learner_idx]) # do not use full pipeline name\n if os.path.isfile(os.path.join(output_dir, filename)):\n logging.info('clf %s on dataset %s already exists' % (str(learners[learner_idx]), task.get_dataset().name))\n return\n\n logging.info('dataset: %s, shape: %s > %s > %s' % (task.get_dataset().name,\n x.shape, x_big.shape,\n x_small.shape))\n results_lccv = lccv.lccv(clf, x_small, y_small,\n enforce_all_anchor_evaluations=True, verbose=verbose)\n prediction = results_lccv[3].get_ipl_estimate_at_target(size_big)\n\n cv_big = sklearn.model_selection.cross_val_score(\n clf, x_big, y_big, cv=10, scoring='accuracy')\n cv_small = sklearn.model_selection.cross_val_score(\n clf, x_small, y_small, cv=10, scoring='accuracy')\n\n all_results = {\n 'sizes': [int(size_big/2), size_big],\n 'lccv': results_lccv[2],\n 'cv': {\n len(x_small): {\n 'n': 10,\n 'mean': np.mean(1 - cv_small),\n 'std': np.std(1 - cv_small)\n },\n len(x_big): {\n 'n': 10,\n 'mean': np.mean(1 - cv_big),\n 'std': np.std(1 - cv_big)\n }\n },\n 'prediction': {\n int(size_big): prediction\n }\n }\n with open(os.path.join(output_dir, filename), 'w') as fp:\n json.dump(all_results, fp)\n logging.info('Results written to file: %s' % os.path.join(output_dir, filename))\n\n\ndef run(args):\n suite = openml.study.get_suite(args.study_id)\n if args.job_idx is not None:\n task_idx = int(args.job_idx / len(learners))\n learner_idx = int(args.job_idx % len(learners))\n task = openml.tasks.get_task(suite.tasks[task_idx])\n if not isinstance(task, openml.tasks.OpenMLSupervisedTask):\n raise ValueError('Can run on Supervised Classification tasks')\n run_classifier_on_task(learner_idx, task, args.output_directory, args.verbose)\n else:\n for task_idx, task_id in enumerate(suite.tasks):\n for learner_idx in range(len(learners)):\n try:\n task = openml.tasks.get_task(task_id)\n data_name = task.get_dataset().name\n logging.info('(%d/%d) starting task %d: %s' % (\n task_idx+1, len(suite.tasks), task.task_id, data_name))\n if not isinstance(task, openml.tasks.OpenMLSupervisedTask):\n raise ValueError('Can run on Supervised Classif. tasks')\n run_classifier_on_task(learner_idx, task, args.output_directory, args.verbose)\n except Exception as e:\n logging.warning('An exception: %s' % str(e))\n print(e)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='[%(asctime)s] [%(levelname)s] %(message)s')\n args = parse_args()\n run(args)\n","repo_name":"fmohr/lccv","sub_path":"publications/2022TPAMI/exp_data_atts.py","file_name":"exp_data_atts.py","file_ext":"py","file_size_in_byte":7224,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"13"} +{"seq_id":"13519722336","text":"import logging\nfrom collections import deque\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Deque, Iterator, List\n\nfrom ..interfaces import Market\nfrom . import Configuration\nfrom .broker import Broker\n\n\nclass MarketSource(Enum):\n \"\"\"\n Available market sources: local file list, watch list, market navigation\n through API, etc.\n \"\"\"\n\n LIST = \"list\"\n WATCHLIST = \"watchlist\"\n API = \"api\"\n\n\nclass MarketProvider:\n \"\"\"\n Provide markets from different sources based on configuration. Supports\n market lists, dynamic market exploration or watchlists\n \"\"\"\n\n config: Configuration\n broker: Broker\n epic_list: List[str] = []\n epic_list_iter: Iterator[str]\n market_list_iter: Iterator[Market]\n node_stack: Deque[str]\n\n def __init__(self, config: Configuration, broker: Broker) -> None:\n self.config = config\n self.broker = broker\n self._initialise()\n\n def next(self) -> Market:\n \"\"\"\n Return the next market from the configured source\n \"\"\"\n source = self.config.get_active_market_source()\n if source == MarketSource.LIST.value:\n return self._next_from_epic_list()\n elif source == MarketSource.WATCHLIST.value:\n return self._next_from_market_list()\n elif source == MarketSource.API.value:\n return self._next_from_api()\n else:\n raise RuntimeError(\"ERROR: invalid market_source configuration\")\n\n def reset(self) -> None:\n \"\"\"\n Reset internal market pointer to the beginning\n \"\"\"\n logging.info(\"Resetting MarketProvider\")\n self._initialise()\n\n def get_market_from_epic(self, epic: str) -> Market:\n \"\"\"\n Given a market epic id returns the related market snapshot\n \"\"\"\n return self._create_market(epic)\n\n def search_market(self, search: str) -> Market:\n \"\"\"\n Tries to find the market which id matches the given search string.\n If successful return the market snapshot.\n Raise an exception when multiple markets match the search string\n \"\"\"\n markets = self.broker.search_market(search)\n if markets is None or len(markets) < 1:\n raise RuntimeError(\n \"ERROR: Unable to find market matching: {}\".format(search)\n )\n else:\n # Iterate through the list and use a set to verify that the results are all the same market\n epic_set = set()\n for m in markets:\n # Epic are in format: KC.D.PRSMLN.DAILY.IP. Extract third element\n market_id = m.epic.split(\".\")[2]\n # Store the DFB epic\n if \"DFB\" in m.expiry and \"DAILY\" in m.epic:\n epic_set.add(market_id)\n if not len(epic_set) == 1:\n raise RuntimeError(\n \"ERROR: Multiple markets match the search string: {}\".format(search)\n )\n # Good, it means the result are all the same market\n return markets[0]\n\n def _initialise(self) -> None:\n # Initialise epic list\n self.epic_list = []\n self.epic_list_iter = iter([])\n self.market_list_iter = iter([])\n # Initialise API members\n self.node_stack = deque()\n source = self.config.get_active_market_source()\n if source == MarketSource.LIST.value:\n self.epic_list = self._load_epic_ids_from_local_file(\n Path(self.config.get_epic_ids_filepath())\n )\n elif source == MarketSource.WATCHLIST.value:\n market_list = self._load_markets_from_watchlist(\n self.config.get_watchlist_name()\n )\n self.market_list_iter = iter(market_list)\n elif source == MarketSource.API.value:\n self.epic_list = self._load_epic_ids_from_api_node(\"180500\")\n else:\n raise RuntimeError(\"ERROR: invalid market_source configuration\")\n self.epic_list_iter = iter(self.epic_list)\n\n def _load_epic_ids_from_local_file(self, filepath: Path) -> List[str]:\n \"\"\"\n Read a file from filesystem containing a list of epic ids.\n The filepath is defined in the configuration file\n Returns a 'list' of strings where each string is a market epic\n \"\"\"\n # define empty list\n epic_ids = []\n try:\n # open file and read the content in a list\n with filepath.open(mode=\"r\") as f:\n filecontents = f.readlines()\n for line in filecontents:\n # remove linebreak which is the last character of the string\n current_epic_id = line[:-1]\n epic_ids.append(current_epic_id)\n except IOError:\n # Create the file empty\n logging.error(\"{} does not exist!\".format(filepath))\n if len(epic_ids) < 1:\n logging.error(\"Epic list is empty!\")\n return epic_ids\n\n def _next_from_epic_list(self) -> Market:\n try:\n epic = next(self.epic_list_iter)\n return self._create_market(epic)\n except Exception:\n raise StopIteration\n\n def _next_from_market_list(self) -> Market:\n try:\n return next(self.market_list_iter)\n except Exception:\n raise StopIteration\n\n def _load_markets_from_watchlist(self, watchlist_name: str) -> List[Market]:\n markets = self.broker.get_markets_from_watchlist(\n self.config.get_watchlist_name()\n )\n if markets is None:\n message = \"Watchlist {} not found!\".format(watchlist_name)\n logging.error(message)\n raise RuntimeError(message)\n return markets\n\n def _load_epic_ids_from_api_node(self, node_id: str) -> List[str]:\n node = self.broker.navigate_market_node(node_id)\n if \"nodes\" in node and isinstance(node[\"nodes\"], list):\n for node in node[\"nodes\"]:\n self.node_stack.append(node[\"id\"])\n return self._load_epic_ids_from_api_node(self.node_stack.pop())\n if \"markets\" in node and isinstance(node[\"markets\"], list):\n return [\n market[\"epic\"]\n for market in node[\"markets\"]\n if any(\n [\n \"DFB\" in str(market[\"epic\"]),\n \"TODAY\" in str(market[\"epic\"]),\n \"DAILY\" in str(market[\"epic\"]),\n ]\n )\n ]\n return []\n\n def _next_from_api(self) -> Market:\n # Return the next item in the epic_list, but if the list is finished\n # navigate the next node in the stack and return a new list\n try:\n return self._next_from_epic_list()\n except Exception:\n self.epic_list = self._load_epic_ids_from_api_node(self.node_stack.pop())\n self.epic_list_iter = iter(self.epic_list)\n return self._next_from_epic_list()\n\n def _create_market(self, epic_id: str) -> Market:\n market = self.broker.get_market_info(epic_id)\n if market is None:\n raise RuntimeError(\"Unable to fetch data for {}\".format(epic_id))\n return market\n","repo_name":"ilcardella/TradingBot","sub_path":"tradingbot/components/market_provider.py","file_name":"market_provider.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","stars":230,"dataset":"github-code","pt":"13"} +{"seq_id":"31219470328","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib as mpl\nfrom os import path\nfrom itertools import product\n#from lib.categorical import violinplot\nfrom seaborn import violinplot\n\n# Style\npalette = ['#80e050','#755575']\n\nvolume_path = path.abspath('data/volumes.csv')\ndf = pd.read_csv(volume_path)\n\ndf.loc[df['Processing']=='Unprocessed', 'Template'] = ''\nax = violinplot(\n\tx=\"Processing\",\n\ty='Volume Change Factor',\n\tdata=df.loc[df['Processing']!='Unprocessed'],\n\thue=\"Template\",\n\tsaturation=1,\n\tsplit=True,\n\tinner='quartile',\n\tpalette=palette,\n\tdensity_norm='area',\n\t#dodge=False,\n\tlinewidth=mpl.rcParams['grid.linewidth'],\n\tlinecolor='w',\n\t)\n","repo_name":"TheChymera/RepSeP","sub_path":"scripts/vc_violin.py","file_name":"vc_violin.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"13"} +{"seq_id":"34786041688","text":"from rct229.rule_engine.rule_base import RuleDefinitionBase\nfrom rct229.rule_engine.rule_list_indexed_base import RuleDefinitionListIndexedBase\nfrom rct229.rule_engine.user_baseline_proposed_vals import UserBaselineProposedVals\nfrom rct229.rulesets.ashrae9012019.data.schema_enums import schema_enums\nfrom rct229.rulesets.ashrae9012019.ruleset_functions.get_hvac_systems_serving_zone_health_safety_vent_reqs import (\n get_hvac_systems_serving_zone_health_safety_vent_reqs,\n)\nfrom rct229.utils.pint_utils import ZERO\n\nFAN_SYSTEM_OPERATION = schema_enums[\"FanSystemOperationOptions\"]\n\n\nclass Section19Rule27(RuleDefinitionListIndexedBase):\n \"\"\"Rule 27 of ASHRAE 90.1-2019 Appendix G Section 19 (HVAC - General)\"\"\"\n\n def __init__(self):\n super(Section19Rule27, self).__init__(\n rmrs_used=UserBaselineProposedVals(False, True, False),\n each_rule=Section19Rule27.HVACRule(),\n index_rmr=\"baseline\",\n id=\"19-27\",\n description=\"HVAC fans shall remain on during unoccupied hours in spaces that have health and safety mandated minimum ventilation requirements during unoccupied hours in the baseline design.\",\n ruleset_section_title=\"HVAC - General\",\n standard_section=\"Section G3.1-4 Schedules exception #2 for the proposed building and Section G3.1.2.4\",\n is_primary_rule=True,\n rmr_context=\"ruleset_model_descriptions/0\",\n list_path=\"$.buildings[*].building_segments[*].heating_ventilating_air_conditioning_systems[*]\",\n )\n\n def create_data(self, context, data):\n rmi_b = context.baseline\n applicable_hvac_systems_list_b = (\n get_hvac_systems_serving_zone_health_safety_vent_reqs(rmi_b)\n )\n\n return {\"applicable_hvac_systems_list_b\": applicable_hvac_systems_list_b}\n\n class HVACRule(RuleDefinitionBase):\n def __init__(self):\n super(Section19Rule27.HVACRule, self).__init__(\n rmrs_used=UserBaselineProposedVals(False, True, False),\n required_fields={\n \"$\": [\"fan_system\"],\n \"fan_system\": [\n \"operation_during_unoccupied\",\n \"minimum_outdoor_airflow\",\n ],\n },\n )\n\n def is_applicable(self, context, data=None):\n hvac_b = context.baseline\n hvac_id_b = hvac_b[\"id\"]\n applicable_hvac_systems_list_b = data[\"applicable_hvac_systems_list_b\"]\n\n return hvac_id_b in applicable_hvac_systems_list_b\n\n def get_calc_vals(self, context, data=None):\n hvac_b = context.baseline\n\n operation_during_unoccupied_b = hvac_b[\"fan_system\"][\n \"operation_during_unoccupied\"\n ]\n minimum_outdoor_airflow_b = hvac_b[\"fan_system\"][\"minimum_outdoor_airflow\"]\n\n return {\n \"operation_during_unoccupied_b\": operation_during_unoccupied_b,\n \"minimum_outdoor_airflow_b\": minimum_outdoor_airflow_b,\n }\n\n def rule_check(self, context, calc_vals=None, data=None):\n operation_during_unoccupied_b = calc_vals[\"operation_during_unoccupied_b\"]\n minimum_outdoor_airflow_b = calc_vals[\"minimum_outdoor_airflow_b\"]\n\n return (\n operation_during_unoccupied_b == FAN_SYSTEM_OPERATION.CONTINUOUS\n and minimum_outdoor_airflow_b > ZERO.FLOW\n )\n\n def get_fail_msg(self, context, calc_vals=None, data=None):\n hvac_b = context.baseline\n hvac_id_b = hvac_b[\"id\"]\n\n return f\"{hvac_id_b} ERVES ZONE(S) THAT APPEAR LIKELY TO HAVE HEALTH AND SAFETY MANDATED MINIMUM VENTILATION REQUIREMENTS DURING UNOCCUPIED HOURS AND THEREFORE (IF THE HVAC SYSTEM SUPPLIES OA CFM) MAY WARRANT CONTINUOUS OPERATION DURING UNOCCUPIED HOURS PER SECTION G3.1-4 SCHEDULES EXCEPTION #2 FOR THE BASELINE BUILDING AND PER SECTION G3.1.2.4.\"\n","repo_name":"pnnl/ruleset-checking-tool","sub_path":"rct229/rulesets/ashrae9012019/section19/section19rule27.py","file_name":"section19rule27.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"13"} +{"seq_id":"15614718460","text":"from html import unescape\n\nclass Question():\n\n def __init__(self, qcategory, qtype, qdifficulty, qquestion, qcorans, qincans) -> None:\n self.category = qcategory\n self.type = qtype\n self.difficulty = qdifficulty\n self.question = unescape(qquestion)\n self.correct_answer = unescape(qcorans)\n self.incorrect_answers = qincans\n for n in range(len(self.incorrect_answers)):\n self.incorrect_answers[n] = unescape(self.incorrect_answers[n])\n\n def is_boolean(self):\n if self.type == 'boolean':\n return True\n else:\n return False\n \n def is_answer(self, ans_string):\n \"\"\"String to boolean. Compares argument with object's correct\n answer attribute.\"\"\"\n if ans_string.lower() == self.correct_answer.lower():\n return True\n else:\n return False\n \n def present_answers(self, shuffled_list):\n answer_dictionary = {}\n answer_dictionary[\"A\"] = (shuffled_list[0])\n answer_dictionary[\"B\"] = (shuffled_list[1])\n answer_dictionary[\"C\"] = (shuffled_list[2])\n answer_dictionary[\"D\"] = (shuffled_list[3])\n return answer_dictionary\n\n\n \n\n ","repo_name":"powerchao/Quiz-Machine","sub_path":"question_model.py","file_name":"question_model.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"38414757542","text":"import boto3\nimport glob\nimport os\n\ndef upload_to_s3():\n s3_bucket = 'textfiles2'\n s3_bucket_region = 'us-east-1'\n folder = 'call_transcripts'\n\n key_name = folder + '/'\n s3_connect = boto3.client('s3', s3_bucket_region)\n\n # upload File to S3\n for filename in os.listdir(folder):\n\n file_key_name = folder + '/' + filename\n local_path = os.getcwd()\n local_name = local_path + '/' + key_name + filename\n upload = s3_connect.upload_file(local_name, s3_bucket, file_key_name)\n\n print(\"Files uploaded in S3 bucket!\")\n\n#upload_to_s3()","repo_name":"Srushti104/Big-data-systems","sub_path":"Assignment 2/annotation/s3_upload.py","file_name":"s3_upload.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"13"} +{"seq_id":"1070609272","text":"#!/usr/bin/env python\n\nimport pandas as pd\nfrom unittest.mock import patch, Mock\nfrom jira_link import connect_to_jira, get_tickets_from_jira\n\n\ndef test_connect_to_jira():\n # replace JIRA class inside the jira_connector module temporarily\n # with a mock object.\n with patch(\"jira_link.JIRA\") as mock_jira:\n mock_instance = Mock()\n mock_jira.return_value = mock_instance\n\n cfg = {\n \"email\": \"email@example.com\",\n \"jira_url\": \"https://jira.example.com\",\n \"api_token\": \"api_key_123\",\n }\n\n result = connect_to_jira(cfg)\n\n mock_jira.assert_called_once_with(\n {\"server\": \"https://jira.example.com\"},\n basic_auth=(\"email@example.com\", \"api_key_123\"),\n )\n assert result == mock_instance\n\n\n@patch(\"jira_link.JIRA\")\ndef test_get_tickets_from_jira(mocked_jira_class):\n # Given: a mock jira client\n mock_item = Mock()\n mock_item.field = \"status\"\n mock_item.fromString = \"Open\"\n mock_item.toString = \"Closed\"\n\n mock_history = Mock()\n mock_history.items = [mock_item]\n mock_history.created = \"2023-09-26T15:59:30.846+0200\"\n\n mock_issue = Mock()\n mock_issue.key = \"TEST-123\"\n mock_issue.changelog.histories = [mock_history]\n\n mock_filter = Mock()\n mock_filter.name = \"some_filter\"\n mock_filter.jql = \"MOCKED JQL\"\n\n mocked_jira_client = mocked_jira_class.return_value\n mocked_jira_client.favourite_filters.return_value = [mock_filter]\n mocked_jira_client.search_issues.return_value = [mock_issue]\n\n cfg_mock = {\"jira_filter\": \"some_filter\"}\n\n # When: Execute the behavior under test (note: no need for a real\n # filter, since the mocked_jira_client returns what it does\n # regardless of the filter)\n df = get_tickets_from_jira(mocked_jira_client, cfg_mock)\n\n # Then: the dataframe we get above should match the expected result\n expected_df = pd.DataFrame(\n {\n \"ticket_id\": [\"TEST-123\"],\n \"from_status\": [\"Open\"],\n \"to_status\": [\"Closed\"],\n \"changed_at\": [\"2023-09-26T15:59:30.846+0200\"],\n }\n )\n\n pd.testing.assert_frame_equal(df, expected_df)\n","repo_name":"fimblo/leanStats","sub_path":"tests/test_jira_link.py","file_name":"test_jira_link.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"23512076942","text":"import time\n\nimport wandb\nimport yaml\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom agent import Agent\nfrom dqn import build_dqn\nfrom gamewrapper import GameWrapper\nfrom replaybuffer import ReplayBuffer\n\n\ndef main(args):\n game_wrapper = GameWrapper(args[\"ENV_NAME\"], args[\"MAX_NOOP_STEPS\"])\n if args[\"WRITE_TERMINAL\"]:\n print(\n \"The environment has the following {} actions: {}\".format(\n game_wrapper.env.action_space.n,\n game_wrapper.env.unwrapped.get_action_meanings(),\n )\n )\n\n if args[\"WRITE_WANDB\"]:\n wandb.init(config=args, project=\"explainable-breakout\")\n\n # Build main and target networks\n main_dqn = build_dqn(\n game_wrapper.env.action_space.n,\n args[\"LEARNING_RATE\"],\n input_shape=args[\"INPUT_SHAPE\"],\n )\n target_dqn = build_dqn(\n game_wrapper.env.action_space.n, input_shape=args[\"INPUT_SHAPE\"]\n )\n\n replay_buffer = ReplayBuffer(\n size=args[\"REPLAY_BUFFER_SIZE\"], input_shape=args[\"INPUT_SHAPE\"]\n )\n agent = Agent(\n main_dqn,\n target_dqn,\n replay_buffer,\n game_wrapper.env.action_space.n,\n input_shape=args[\"INPUT_SHAPE\"],\n batch_size=args[\"BATCH_SIZE\"],\n )\n\n # Training and evaluation\n if args[\"LOAD_FROM\"] is None:\n frame_number = 0\n rewards = []\n loss_list = []\n else:\n if args[\"WRITE_TERMINAL\"]:\n print(\"Loading from\", args[\"LOAD_FROM\"])\n meta = agent.load(args[\"LOAD_FROM\"], args[\"LOAD_REPLAY_BUFFER\"])\n\n # Apply information loaded from meta\n frame_number = meta[\"frame_number\"]\n rewards = meta[\"rewards\"]\n loss_list = meta[\"loss_list\"]\n\n # Main loop\n try:\n while frame_number < args[\"TOTAL_FRAMES\"]:\n # Training\n epoch_frame = 0\n while epoch_frame < args[\"FRAMES_BETWEEN_EVAL\"]:\n start_time = time.time()\n game_wrapper.reset()\n # life_lost = True\n episode_reward_sum = 0\n for _ in range(args[\"MAX_EPISODE_LENGTH\"]):\n # Get action\n action = agent.get_action(frame_number, game_wrapper.state)\n\n # Take step\n (\n processed_frame,\n reward,\n terminal,\n life_lost,\n ) = game_wrapper.step(action)\n frame_number += 1\n epoch_frame += 1\n episode_reward_sum += reward\n\n # Add experience to replay memory\n agent.add_experience(\n action=action,\n frame=processed_frame[:, :, 0],\n reward=reward,\n clip_reward=args[\"CLIP_REWARD\"],\n terminal=life_lost,\n )\n\n # Update agent\n if (\n frame_number % args[\"UPDATE_FREQ\"] == 0\n and agent.replay_buffer.count > args[\"MIN_REPLAY_BUFFER_SIZE\"]\n ):\n loss, _ = agent.learn(\n args[\"BATCH_SIZE\"], gamma=args[\"DISCOUNT_FACTOR\"]\n )\n loss_list.append(loss)\n\n # Update target network\n if (\n frame_number % args[\"UPDATE_FREQ_TARGET_NETWORK\"] == 0\n and frame_number > args[\"MIN_REPLAY_BUFFER_SIZE\"]\n ):\n agent.update_target_network()\n\n # Break the loop when the game is over\n if terminal:\n break\n\n rewards.append(episode_reward_sum)\n\n # Output the progress every 10 games\n if len(rewards) % 10 == 0:\n # Write to TensorBoard\n if args[\"WRITE_WANDB\"]:\n wandb.log(\n {\n \"reward\": np.mean(rewards[-10:]),\n \"loss\": np.mean(loss_list[-10:]),\n \"smooth_loss\": np.mean(loss_list[-100:]),\n \"time\": time.time() - start_time,\n }\n )\n\n if args[\"WRITE_TERMINAL\"]:\n print(\n f\"Game number: {str(len(rewards)).zfill(6)}\\t\"\n f\"Frame number: {str(frame_number).zfill(8)}\\t\"\n f\"Average reward: {np.mean(rewards[-10:]):0.1f}\\t\"\n f\"Time taken: {(time.time() - start_time):.1f}s\"\n )\n\n # Evaluation every `FRAMES_BETWEEN_EVAL` frames\n terminal = True\n eval_rewards = []\n evaluate_frame_number = 0\n\n for _ in range(args[\"EVAL_LENGTH\"]):\n if terminal:\n game_wrapper.reset(evaluation=True)\n life_lost = True\n episode_reward_sum = 0\n terminal = False\n\n # Breakout requires a \"fire\" action (action #1) to start the\n # game each time a life is lost.\n # Otherwise, the agent would sit around doing nothing.\n action = (\n 1\n if life_lost\n else agent.get_action(\n frame_number, game_wrapper.state, evaluation=True\n )\n )\n\n # Step action\n _, reward, terminal, life_lost = game_wrapper.step(action)\n evaluate_frame_number += 1\n episode_reward_sum += reward\n\n # On game-over\n if terminal:\n eval_rewards.append(episode_reward_sum)\n\n if len(eval_rewards) > 0:\n final_score = np.mean(eval_rewards)\n else:\n # In case the game is longer than the number of frames allowed\n final_score = episode_reward_sum\n # Print score and write to tensorboard\n\n if args[\"WRITE_WANDB\"]:\n wandb.log(\n {\"frame_number\": frame_number, \"evaluation_score\": final_score}\n )\n\n if args[\"WRITE_TERMINAL\"]:\n print(\"Evaluation score:\", final_score)\n\n # Save model\n if len(rewards) > 300 and args[\"SAVE_TO\"] is not None:\n agent.save(\n f\"{args['SAVE_TO']}/save-{str(frame_number).zfill(8)}\",\n frame_number=frame_number,\n rewards=rewards,\n loss_list=loss_list,\n save_buffer=args[\"SAVE_REPLAY_BUFFER\"],\n )\n wandb.save(f\"{args['SAVE_TO']}/save-{str(frame_number).zfill(8)}/*\")\n\n except KeyboardInterrupt:\n print(\"\\nTraining exited early.\")\n\n if args[\"SAVE_TO\"] is None:\n try:\n args[\"SAVE_TO\"] = input(\n \"Would you like to save the trained model?\"\n \"If so, type in a save path, otherwise, interrupt with ctrl+c. \"\n )\n except KeyboardInterrupt:\n print(\"\\nExiting...\")\n\n if args[\"SAVE_TO\"] is not None:\n print(\"Saving...\")\n agent.save(\n f\"{args['SAVE_TO']}/save-{str(frame_number).zfill(8)}\",\n frame_number=frame_number,\n rewards=rewards,\n loss_list=loss_list,\n save_buffer=args[\"SAVE_REPLAY_BUFFER\"],\n )\n wandb.save(f\"{args['SAVE_TO']}/save-{str(frame_number).zfill(8)}/*\")\n print(\"Saved.\")\n\n\nif __name__ == \"__main__\":\n gpu_devices = tf.config.experimental.list_physical_devices(\"GPU\")\n for device in gpu_devices:\n tf.config.experimental.set_memory_growth(device, True)\n print(\"Num GPUs Available: \", len(tf.config.list_physical_devices(\"GPU\")))\n\n with open(\"config.yaml\") as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n main(config)\n","repo_name":"alexcosta13/explainable-breakout","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"37470508358","text":"import googletrans\r\n\r\ndef anlamli_sozcuk_arama(arananmetin):\r\n cevirmen = googletrans.Translator()\r\n dil=cevirmen.detect(arananmetin)\r\n diltespiti=dil.lang\r\n print(diltespiti)\r\n\r\n return diltespiti\r\n\r\ntext=\"ug|ctškhtgngog{øpvgok\" #deneme amaçlı bir metindir.\r\nciktilar=['']\r\n\r\ngirilen_metin=input(\"sezar şifreleme çözümü istenen metini giriniz:\")\r\nmetin_uzunlugu=len(girilen_metin)\r\ncikti=(\"\")\r\n\r\nprint(girilen_metin)\r\nprint(metin_uzunlugu)\r\nfor u in range(0,25):\r\n anahtar=u\r\n for i in range(0,metin_uzunlugu):\r\n #print(ord(girilen_metin[i])) #bu aşamadaki kodlar harf boyutunda değişimi gözlemlemek için kullanılmış kodlardır.\r\n j=girilen_metin[i] #i . nci indeksteki harf j değişkenine eşlenir\r\n #print(j)\r\n #print(ord(j)) #bu satırlarda j değeri içine atılmış harflerin ascii karşılığını görebilirsiniz\r\n #print(chr(ord(j)))\r\n k=chr(ord(j) - anahtar) #harfin ascii karşılığına ekleme veya cıkartma yapılarak harf değiştirilmiş - kaydırılmış olur\r\n #print(\" k veri tipi {} \".format(type(k)))\r\n cikti=cikti + k # harfler toplanarak yeniden sözcük - metin haline getirilir.\r\n #print(\"*****************\")\r\n #print(cıktı)\r\n if len(cikti)>(int(metin_uzunlugu)-1):\r\n print(cikti)\r\n ciktilar.append(cikti)\r\n cikti = (\"\") #butun i değerleri için kod çalışacagından dolayı butun harfler değiştirildiğinde ciktilar listesine yazılır ve cikti değişkeninin içi silinir\r\n\r\n print(\"------------------------\\n\")\r\n\r\n#print(cıktı)\r\nprint(\"------------------------\\n\")\r\nprint(ciktilar)\r\nciktisayisi= len(ciktilar)\r\nprint(\" çıktı sayısı ={}\".format(ciktisayisi))\r\nprint(\"***************************************************\")\r\nfor i in range(1,len(ciktilar)):\r\n print(\"kod burada\")\r\n tespitedilen_dil=anlamli_sozcuk_arama(ciktilar[i])\r\n if tespitedilen_dil=='tr':\r\n cozulmus_metin=ciktilar[i]\r\n print(\"{} metninin çözülmüş hali {} dir\".format(girilen_metin,cozulmus_metin))\r\n exit()\r\n else:\r\n print(\"şifre çözümü başarısızdır\")","repo_name":"MAkifSinan/cryptography","sub_path":"sezarşifreçözücü.py","file_name":"sezarşifreçözücü.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"22801021177","text":"#!/bin/python\r\n\r\n\r\n# import\r\nimport re\r\nimport numpy as np\r\n\r\n# Function to find the tree block of the NEXUS file\r\ndef FindStartLine(infile):\r\n\tline_num = 0\r\n\tbegin_line = []\r\n\tsearch_string = 'begin tree'\r\n\twith open(infile, 'r') as file:\r\n\t\tfor line in file:\r\n\t\t\tline_num += 1\r\n\t\t\tif search_string in line:\r\n\t\t\t\tbegin_line.append((line_num, line.rstrip()))\r\n\treturn begin_line\r\n\r\n# Get the string of the tree\r\ndef GetTreeString(infile):\r\n\tbegin_line_num = FindStartLine(infile)\r\n\ttree_line_num = begin_line_num[0][0] + 1\r\n\twith open(infile, 'r') as file:\r\n\t\ttree = file.readlines()[begin_line:(begin_line + 1)]\r\n\treturn tree\r\n\r\n# Edit the statistics / info in the comments\r\ndef EditStatComments(infile):\r\n\ttree_text = \"\".join(GetTreeString(infile))\r\n\r\n\tcomment_pattern = re.compile('(?<=)\\[.+?\\](?=)')\r\n\tcomment_list = comment_pattern.findall(tree_text)\r\n\r\n\r\n\tleading_pattern = r'\\&(.*?)nt\\)\\=\\\"'\r\n\tleading_replacement = '&label='\r\n\r\n\tfollowing_pattern = r'\\\"(.*?)\\}\\]'\r\n\tfollowing_replacement = ''\r\n\r\n\tedited_comment_list = []\r\n\tfor element in comment_list:\r\n\t\tpreceding_sub = re.sub(leading_pattern, leading_replacement, element)\r\n\t\tfollowing_sub = re.sub(following_pattern, following_replacement, preceding_sub)\r\n\t\tedited_comment_list.append(following_sub)\r\n\t\r\n\treturn edited_comment_list\r\n\r\n\r\ndef GetPositionRanges(infile):\r\n\t# get string of the tree\r\n\ttree_text = \"\".join(GetTreeString(infile))\r\n\r\n\t# get edited stat blocks\r\n\tnew_stats = EditStatComments(infile)\r\n\t\r\n\t# start counter\r\n\tcount = 0\r\n\r\n\t# declare a list for the \"start\" of insert position\r\n\tpos_start_list = []\r\n\r\n\t# declare a list for the \"end\" of insert position\r\n\tpos_end_list = []\r\n\r\n\t# for each character in the tree string\r\n\tfor char in range(len(tree_text)):\r\n\t\t# if the character is a specified character\r\n\t\tif tree_text[char] == \"[\":\r\n\t\t\t# add +1 to the counter\r\n\t\t\tcount += 1\r\n\r\n\t# Make list of start positions\r\n\t# for characters at position in tree string\r\n\tfor pos_start, char in enumerate(tree_text):\r\n\t\t# if the character is a specified character\r\n\t\tif(char == \"[\"):\r\n\t\t\t# add the position of that character to list\r\n\t\t\tpos_start_list.append(pos_start)\r\n\r\n\t# Make list of end positions\r\n\t# for characters at position in tree string\r\n\tfor pos_start, char in enumerate(tree_text):\r\n\t\t# if the character is a specified character\r\n\t\tif(char == \"]\"):\r\n\t\t\t# add the position of that character to list\r\n\t\t\tpos_end_list.append(pos_start)\r\n\r\n\t# Make a list of (pos_start, pos_end) tuples\r\n\t# declare list of tuples for position ranges\r\n\tpos_range_list = []\r\n\r\n\t# for index position in the range of index positions in start list\r\n\tfor index in range(len(pos_start_list)):\r\n\t\t# make a tuple of pos_start and pos_end at this index in each list\r\n\t\tpos_range = (pos_start_list[index],pos_end_list[index])\r\n\t\t# append tuple to pos_range_list\r\n\t\tpos_range_list.append(pos_range)\r\n\r\n\t#print((pos_range_list))\r\n\treturn pos_range_list\r\n\r\n\r\ndef InsertAtRange(infile):\r\n\t\r\n\tmystring = \"\".join(GetTreeString(infile))\r\n\treplacements = EditStatComments(infile)\r\n\tpos_range_list = GetPositionRanges(infile)\r\n\tnew_string = []\r\n\r\n\tfor coord in range(len(replacements)-1):\r\n\t\t\r\n\t\tlength = int(len(replacements))\r\n\r\n\t\tif coord == 0:\r\n\t\t\tbegin = mystring[0:(pos_range_list[0][0])]\r\n\t\t\tfirst = replacements[coord]\r\n\t\t\tfollowing = mystring[(pos_range_list[0][1]):pos_range_list[1][0]]\r\n\t\t\t## INITIAL PART OF STRING\r\n\t\t\tnew_string.append(begin+first+following)\r\n\t\t\t\r\n\t\telif coord == 1:\r\n\t\t\tsecond = replacements[coord-1]\r\n\t\t\tnew_string.append(second)\r\n\t\t\t\r\n\t\telif 1 < coord < length:\r\n\t\t\tadd = replacements[coord]\r\n\t\t\tbegin = mystring[pos_range_list[coord][1]:pos_range_list[(coord+1)][0]]\r\n\t\t\tnew_string.append(add+begin)\r\n\r\n\tadd = replacements[coord+1]\r\n\tbegin = mystring[pos_range_list[coord+1][1]:pos_range_list[-1][0]]\r\n\tnew_string.append(add+begin)\r\n\tend = mystring[pos_range_list[-1][1]:int(len(mystring))]\r\n\r\n\tnew_string.append(end)\r\n\r\n\tFINAL = \"\".join(new_string)\r\n\treturn FINAL\r\n\r\n\r\n############# MAIN ##################\r\n\r\nif __name__ == '__main__':\r\n\r\n ## the infile name goes here, as a string\r\n\tfile_name = '20180920_ITSTrachyFungi_aligned_trim_oneoutgroup_JLAMJS_mb.nexus.con.tre'\r\n\r\n\tRESULT = InsertAtRange(file_name)\r\n\r\n\tprint(RESULT)\r\n\r\n\twith open('EDITEDSTATS.txt', 'w') as f:\r\n\t\tf.write(RESULT)","repo_name":"kbeigel/complicated_string_edits","sub_path":"edit_newick_comments.py","file_name":"edit_newick_comments.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"21373863193","text":"def read_matrix(cells_delimiter):\n (rows_count, columns_count) = map(int, input().split(cells_delimiter))\n return [list(map(int, input().split(cells_delimiter))) for _ in range(rows_count)]\n\ndef sum_matrix(matrix):\n the_sum = 0\n rows_count = len(matrix)\n columns_count = len(matrix[0])\n\n for row_index in range(rows_count):\n the_sum += sum(matrix[row_index])\n return the_sum\n\nmatrix = read_matrix(' ')\nprint(sum_matrix(matrix))\nprint(matrix)","repo_name":"StanShishmanov/Python-Courses","sub_path":"Python Advanced - SoftUni/Multidimensional Lists/1. Sum Matrix Elements .py","file_name":"1. Sum Matrix Elements .py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"23965153730","text":"\"\"\"\n使用二叉树对全括号表达式建立表达式解析树\n使用表达式解析树求值\n\"\"\"\nfrom pythonds.trees.binaryTree import BinaryTree\nfrom pythonds.basic.stack import Stack\nimport operator\n\n\n# 建立表达式解析树\ndef buildParseTree(fpexp):\n f_split = fpexp # 将表达式转换为列表\n tree_stack = Stack() # 为了取父节点 造的栈\n exp_tree = BinaryTree('') # 建立树\n tree_stack.push(exp_tree)\n current_tree = exp_tree\n\n for i in f_split:\n if i == '(': # 表达式开始\n current_tree.insertLeft('') # 创造左子节点\n tree_stack.push(current_tree) # 入栈当前节点\n current_tree = current_tree.getLeftChild() # 下降\n elif i not in ['+', '-', '*', '/', ')']: # 操作数\n current_tree.setRootVal(int(i)) # 设定当前值\n parent = tree_stack.pop() # 上升 取父节点\n current_tree = parent\n elif i in ['+', '-', '*', '/']: # 操作符\n current_tree.setRootVal(i) # 设定当前值\n current_tree.insertRight('') # 创造右子节点\n tree_stack.push(current_tree) # 入栈\n current_tree = current_tree.getRightChild() # 下降\n elif i in [')']: # 表达式结束\n current_tree = tree_stack.pop()\n else:\n raise ValueError\n\n return exp_tree\n\n\n# 使用递归的方法\n# 基本结束条件:没有左右子节点\n# 缩小规模: 将表达式分为左右子树\n# 调用自身\nopers = {'+': operator.add, '-': operator.sub,\n '*': operator.mul, '/': operator.truediv}\n\n\ndef evaluate(parseTree):\n left_tree = parseTree.getLeftChild()\n right_tree = parseTree.getRightChild()\n if left_tree and right_tree: # 如果左右子树不为空\n fn = opers[parseTree.getRootVal()]\n return fn(evaluate(left_tree), evaluate(right_tree))\n else:\n return parseTree.getRootVal()\n\n\n# 9_7_1的方法 后序方法计算树\ndef post_order_eval(tree):\n res1 = None\n res2 = None\n if tree:\n res1 = post_order_eval(tree.getLeftChild())\n res2 = post_order_eval(tree.getRightChild()) # 取左右两子树的值\n if res1 and res2: # 如果1 2有值 取出根结点进行计算\n return opers[tree.getRootVal()](res1, res2)\n else:\n return tree.getRootVal()\n\n\n# 9_7_1的方法 由树恢复回全括号表达式\ndef print_exp(tree):\n sval = ''\n if tree:\n left_num = tree.getLeftChild()\n operator = tree.getRootVal()\n right_num = tree.getRightChild()\n if left_num and right_num:\n sval = '(' + print_exp(tree.getLeftChild())\n sval = sval + str(tree.getRootVal())\n sval = sval + print_exp(tree.getRightChild()) + ')'\n else:\n sval = str(tree.getRootVal())\n return sval\n\n\na = \"((6-(5+(3*4)))/2)\"\nh = buildParseTree(a)\nprint(evaluate(h))\nprint(post_order_eval(h))\nprint(print_exp(h))\n\n","repo_name":"siyi-wind/cs-course-project","sub_path":"class/9_6_1.py","file_name":"9_6_1.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"13"} +{"seq_id":"4890920375","text":"\"\"\"\nTESTS is a dict with all of your tests.\nKeys for this will be the categories' names.\nEach test is a dict with\n \"input\" -- input data for a user function\n \"answer\" -- your right answer\n \"explanation\" -- not necessarily a key, it's used for an additional info in animation.\n\"\"\"\nfrom random import randint\nimport calendar\nimport datetime\n\n\ndef make_random_tests(num):\n random_tests = []\n f = datetime.date(1, 1, 1)\n for _ in range(num):\n tgt_date = f + datetime.timedelta(days=randint(0, 3652058))\n y, m, d, w = tgt_date.year, tgt_date.month, tgt_date.day, randint(0, 6)\n random_tests.append({\n \"input\": [y, m, d, w],\n \"answer\": [d.day for d in [week for week in calendar.Calendar(firstweekday=w).monthdatescalendar(y, m) if tgt_date in week].pop()]\n })\n return random_tests\n\n\nTESTS = {\n \"Randoms\": make_random_tests(5),\n \"Basics\": [\n {\n \"input\": [2020, 1, 1, 0],\n \"answer\": [30, 31, 1, 2, 3, 4, 5],\n },\n {\n \"input\": [2020, 9, 20, 6],\n \"answer\": [20, 21, 22, 23, 24, 25, 26],\n },\n {\n \"input\": [2020, 9, 30, 0],\n \"answer\": [28, 29, 30, 1, 2, 3, 4],\n },\n {\n \"input\": [2020, 2, 29, 2],\n \"answer\": [26, 27, 28, 29, 1, 2, 3],\n },\n ],\n}\n","repo_name":"kurosawa4434/checkio-mission-weekly-calendar","sub_path":"verification/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"70723181138","text":"from django import forms\nfrom django_select2.forms import Select2Widget, Select2MultipleWidget\n\nfrom home.models import Suburb, PricingMethod, PropertyType\n\n\nclass SearchForm(forms.Form):\n \"\"\"Form for search houses.\"\"\"\n suburbs = forms.MultipleChoiceField(label='Area selection',\n required=False)\n PRICE_FROM_CHOICES = (\n (None, 'Any'),\n (0, '$0'),\n (25000, '$25,000'),\n (50000, '$50,000'),\n (100000, '$100,000'),\n (150000, '$150,000'),\n (200000, '$200,000'),\n (250000, '$250,000'),\n (300000, '$300,000'),\n (350000, '$350,000'),\n (400000, '$400,000'),\n (450000, '$450,000'),\n (500000, '$500,000'),\n (600000, '$600,000'),\n (700000, '$700,000'),\n (800000, '$800,000'),\n (900000, '$900,000'),\n (1000000, '$1m'),\n (1200000, '$1,2m'),\n (1400000, '$1,4m'),\n (1600000, '$1,6m'),\n )\n PRICE_TO_CHOICES = (\n (None, 'Any'),\n (0, '$0'),\n (25000, '$25,000'),\n (50000, '$50,000'),\n (100000, '$100,000'),\n (150000, '$150,000'),\n (200000, '$200,000'),\n (250000, '$250,000'),\n (300000, '$300,000'),\n (350000, '$350,000'),\n (400000, '$400,000'),\n (450000, '$450,000'),\n (500000, '$500,000'),\n (600000, '$600,000'),\n (700000, '$700,000'),\n (800000, '$800,000'),\n (900000, '$900,000'),\n (1000000, '$1m'),\n (1200000, '$1,2m'),\n (1400000, '$1,4m'),\n (2000000, '$2m'),\n (2500000, '$2,5m'),\n (3500000, '$3,5m'),\n (5000000, '$5m'),\n (7500000, '$7,5m'),\n (10000000, '$10m'),\n (999999999, '$10m+'),\n )\n price_from = forms.ChoiceField(label='Price from',\n required=False,\n choices=PRICE_FROM_CHOICES)\n price_to = forms.ChoiceField(label='Price to',\n required=False,\n choices=PRICE_TO_CHOICES,\n initial=999999999)\n\n PRICING_METHODS_CHOICES = ((pricing_method.id, pricing_method.name)\n for pricing_method in PricingMethod.objects.order_by('name'))\n pricing_methods = forms.MultipleChoiceField(label='Pricing methods',\n required=False,\n choices=PRICING_METHODS_CHOICES,\n widget=Select2MultipleWidget(\n attrs={'data-placeholder': 'All pricing methods'}\n ),\n help_text='Select all')\n\n BEDROOMS_FROM_CHOICES = (\n (None, 'Any'),\n (1, '1'),\n (2, '2'),\n (3, '3'),\n (4, '4'),\n (5, '5'),\n )\n bedrooms_from = forms.ChoiceField(label='Bedrooms from',\n required=False,\n choices=BEDROOMS_FROM_CHOICES)\n\n BEDROOMS_TO_CHOICES = (\n (None, 'Any'),\n (1, '1'),\n (2, '2'),\n (3, '3'),\n (4, '4'),\n (5, '5'),\n (999, '5+'),\n )\n bedrooms_to = forms.ChoiceField(label='Bedrooms to',\n required=False,\n choices=BEDROOMS_TO_CHOICES,\n initial=999)\n\n BATHROOMS_FROM_CHOICES = (\n (None, 'Any'),\n (1, '1'),\n (2, '2'),\n (3, '3'),\n )\n bathrooms_from = forms.ChoiceField(label='Bathrooms from',\n required=False,\n choices=BATHROOMS_FROM_CHOICES)\n BATHROOMS_TO_CHOICES = (\n (1, '1'),\n (2, '2'),\n (3, '3'),\n (999, '3+'),\n )\n bathrooms_to = forms.ChoiceField(label='Bathrooms to',\n required=False,\n choices=BATHROOMS_TO_CHOICES,\n initial=999)\n\n LANDAREA_FROM_CHOICES = (\n (None, 'Any'),\n (0, '0 m²'),\n (100, '100 m²'),\n (200, '200 m²'),\n (300, '300 m²'),\n (500, '500 m²'),\n (750, '750 m²'),\n (1000, '1000 m²'),\n (2000, '2000 m²'),\n (5000, '5000 m²'),\n (10000, '1 HA'),\n (20000, '2 HA'),\n (50000, '5 HA'),\n (100000, '10 HA'),\n (150000, '15 HA'),\n (250000, '25 HA'),\n (999999999, '25 HA+'),\n )\n landarea_from = forms.ChoiceField(label='Landarea from',\n required=False,\n choices=LANDAREA_FROM_CHOICES)\n LANDAREA_TO_CHOICES = (\n (None, 'Any'),\n (0, '0 m²'),\n (100, '100 m²'),\n (200, '200 m²'),\n (300, '300 m²'),\n (500, '500 m²'),\n (750, '750 m²'),\n (1000, '1000 m²'),\n (2000, '2000 m²'),\n (5000, '5000 m²'),\n (10000, '1 HA'),\n (20000, '2 HA'),\n (50000, '5 HA'),\n (100000, '10 HA'),\n (150000, '15 HA'),\n (250000, '25 HA'),\n (999999999, '25 HA+'),\n )\n landarea_to = forms.ChoiceField(label='Landarea to',\n required=False,\n choices=LANDAREA_TO_CHOICES,\n initial=999999999)\n\n FLOORAREA_FROM_CHOICES = (\n (None, 'Any'),\n (0, '0 m²'),\n (20, '20 m²'),\n (40, '40 m²'),\n (60, '60 m²'),\n (80, '80 m²'),\n (100, '100 m²'),\n (120, '120 m²'),\n (150, '150 m²'),\n (180, '180 m²'),\n (999999999, '200 m²+'),\n )\n floorarea_from = forms.ChoiceField(label='Floorarea from',\n required=False,\n choices=FLOORAREA_FROM_CHOICES)\n\n FLOORAREA_TO_CHOICES = (\n (None, 'Any'),\n (0, '0 m²'),\n (20, '20 m²'),\n (40, '40 m²'),\n (60, '60 m²'),\n (80, '80 m²'),\n (100, '100 m²'),\n (120, '120 m²'),\n (150, '150 m²'),\n (180, '180 m²'),\n (999999999, '200 m²+'),\n )\n floorarea_to = forms.ChoiceField(label='Floorarea to',\n required=False,\n choices=FLOORAREA_TO_CHOICES,\n initial=999999999)\n\n PROPERTY_TYPE_CHOICES = [\n (\"Residential\", [\n (2, 'House'),\n (1, 'Apartment'),\n (11, 'Studio'),\n (4, 'Townhouse'),\n (5, 'Unit'),\n ]),\n (\"Other\", [\n (8, 'Home & Income'),\n (7, 'Lifestyle Property'),\n (6, 'Lifestyle Section'),\n (12, 'Section'),\n (14, 'Retirement Living'),\n (15, 'Carpark'),\n (16, 'Boat shed'),\n (13, 'Multiple Properties'),\n (17, 'Rentals House'),\n (10, 'Rural Lifestyle Property'),\n (9, 'Rural Lifestyle Section'),\n (12, 'Section')\n ])\n ]\n property_type = forms.MultipleChoiceField(label='Property type',\n choices=PROPERTY_TYPE_CHOICES,\n required=False,\n widget=Select2MultipleWidget(\n attrs={'data-placeholder': 'All property types'}\n ),\n help_text=''\n 'Select Residential types
'\n ''\n 'Select Other types
'\n 'Select all')\n show_only_open_homes = forms.BooleanField(label='Show only open homes', required=False)\n show_only_properties_with_address = forms.BooleanField(label='Show only properties with an address', required=False)\n keywords = forms.CharField(label='Keywords', required=False)\n listings_age_days = forms.IntegerField(\n label='Listings age (days), not more',\n required=True,\n initial=14,\n min_value=0\n )\n\n def __init__(self, *args, **kwargs):\n suburbs = [(suburb['id'], suburb['name'])\n for suburb\n in Suburb.objects.values('id', 'name').order_by('city__region__name', 'city__city_name', 'name')]\n self.base_fields['suburbs'].choices = suburbs\n\n super(SearchForm, self).__init__(*args, **kwargs)\n","repo_name":"alexmon1989/realestate","sub_path":"search/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":9086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"27844144304","text":"import pytest\nfrom dynaconf import settings\nfrom fastapi.testclient import TestClient\n\nfrom finance_api.gateways.paypal import paypal_handshake\n\n\n@pytest.fixture\ndef ipn_message():\n return {\n \"receiver_email\": settings.PAYPAL_ACCOUNT,\n }\n\n\ndef test_empty_post_to_paypal_gateway(client):\n response = client.post(\"/gateway/paypal\")\n assert response.status_code == 400\n\n\ndef test_post_with_correct_paypal_user_agent_accepted(mocker, client, ipn_message):\n paypal_handshake = mocker.patch(\"finance_api.gateways.paypal.paypal_handshake\")\n\n response = client.post(\"/gateway/paypal\", data=ipn_message,)\n assert response.status_code == 200\n\n\ndef test_valid_paypal_request_trigger_handshake_process(mocker, client, ipn_message):\n paypal_handshake = mocker.patch(\"finance_api.gateways.paypal.paypal_handshake\")\n\n response = client.post(\"/gateway/paypal\", data=ipn_message,)\n\n paypal_handshake.assert_called_with(ipn_message)\n\n\ndef test_raise_error_if_request_to_not_intended_account(mocker, client, ipn_message):\n ipn_message[\"receiver_email\"] = \"not_expected@account.com\"\n response = client.post(\"/gateway/paypal\", data=ipn_message,)\n assert response.status_code == 400\n\n\ndef test_do_not_trigger_handshake_process_if_not_intended_account(\n mocker, client, ipn_message\n):\n paypal_handshake = mocker.patch(\"finance_api.gateways.paypal.paypal_handshake\")\n\n ipn_message[\"receiver_email\"] = \"not_expected@account.com\"\n response = client.post(\"/gateway/paypal\", data=ipn_message,)\n assert not paypal_handshake.called\n\n\ndef test_paypal_handshake_send_request_to_verify_url(mocker, client, ipn_message):\n mock_post_request = mocker.patch(\"finance_api.gateways.paypal.requests.post\")\n paypal_handshake(ipn_message)\n\n mock_post_request.assert_called_with(\n settings.PAYPAL_VERIFY_IPN_URL, data=ipn_message\n )\n\n\ndef test_paypal_after_verified_store_notification_content(mocker, client, ipn_message):\n mock_response = mocker.patch(\"finance_api.gateways.paypal.requests.post\")\n mock_response.return_value.text = \"VERIFIED\"\n\n mock_store_notification = mocker.patch(\n \"finance_api.gateways.paypal.store_notification\"\n )\n\n paypal_handshake(ipn_message)\n\n mock_store_notification.assert_called_with(ipn_message)\n\n\ndef test_log_error_when_paypal_replied_with_invalid_message_received(\n mocker, client, ipn_message\n):\n mock_response = mocker.patch(\"finance_api.gateways.paypal.requests.post\")\n mock_response.return_value.text = \"INVALID\"\n mock_store_notification = mocker.patch(\n \"finance_api.gateways.paypal.store_notification\"\n )\n mock_logger_error = mocker.patch(\"finance_api.gateways.paypal.logger.error\")\n\n paypal_handshake(ipn_message)\n\n mock_logger_error.assert_called()\n","repo_name":"rennerocha/finance-api","sub_path":"tests/gateways/test_paypal.py","file_name":"test_paypal.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"36704240445","text":"\r\n\r\nfrom selenium import webdriver\r\nfrom time import sleep\r\nfrom trivia import trivia\r\nfrom tqdm import tqdm\r\nfrom utils import meta_dict_combo\r\nbar_size = 100\r\n\r\nclass searchable_page:\r\n searched_links = []\r\n unsearched_links = []\r\n bar = tqdm(total = bar_size, desc=\"scraping\")\r\n def __init__(self,url,meta_dict={}):\r\n self.url = url\r\n self.meta_dict = meta_dict\r\n def __str__(self):\r\n return f\"{self.__class__.__name__} url:{self.url} \"\r\n \r\n def get_unsearched_page():\r\n try:\r\n return searchable_page.unsearched_links.pop()\r\n except IndexError:\r\n return None\r\n\r\n\r\n def add_searchable_pages(new_pages):\r\n #check duplicates within the list\r\n index = 0\r\n while(index < len(new_pages)):\r\n second_index = index+1\r\n while(second_index0):\r\n value = len(searchable_page.searched_links)/(len(searchable_page.unsearched_links)+len(searchable_page.searched_links))\r\n value = value * bar_size\r\n\r\n searchable_page.bar.n = int(value)\r\n searchable_page.bar.refresh()\r\n \r\n def get_questions_from_web_site(self,driver):\r\n driver.get(self.url)\r\n searchable_page.bar.set_description(f\"searching: {self.url}\")\r\n #add sub pages\r\n searchable_page.add_searchable_pages(self.sub_links(driver))\r\n\r\n new_trivia = [\r\n trivia(question,choices,answer,self.meta_dict.copy()) \r\n for question,answer,choices in zip(self.questions(driver),self.correct_answers(driver),self.answers(driver))\r\n ]\r\n searchable_page.searched_links.append(self)\r\n return new_trivia\r\n\r\n #to make\r\n #ignore\r\n def sub_links(self,driver):\r\n raise NotImplemented()\r\n def questions(_,driver):\r\n raise NotImplemented()\r\n def answers(_,driver):\r\n raise NotImplemented()\r\n def correct_answers(_,driver):\r\n raise NotImplemented\r\n\r\n\r\nclass usefulltrivia_page(searchable_page):\r\n pass\r\n def sub_links(self,driver):#return a list of searchable pages and links\r\n subcats = driver.find_elements_by_css_selector(\"a.subcat\")\r\n new_pages1 = list(map(lambda item: \r\n usefulltrivia_page(item.get_attribute(\"href\"),meta_dict_combo(self.meta_dict,{\"catagory\":item.text})),\r\n subcats\r\n ))\r\n\r\n other_pages = driver.find_elements_by_css_selector(\"ul.pagination li a\")\r\n new_pages2 = list(map(lambda item: \r\n usefulltrivia_page(item.get_attribute(\"href\"),meta_dict_combo(self.meta_dict,{})),\r\n other_pages\r\n ))\r\n\r\n return new_pages1+new_pages2\r\n\r\n def questions(_,driver):#return text of questions on the page\r\n items = driver.find_elements_by_css_selector(\"div.mbr-article h2\")\r\n\r\n return list(map(lambda x: x.text, items))\r\n def answers(_,driver):\r\n items = driver.find_elements_by_css_selector(\"div.mbr-buttons\")\r\n\r\n answer_list = []\r\n for item in items:\r\n sub_buttons = item.find_elements_by_css_selector(\"a\")\r\n answer_list.append((list(map(lambda x:x.text,sub_buttons))))\r\n return answer_list\r\n\r\n def correct_answers(_,driver):\r\n items = driver.find_elements_by_css_selector(\"div.mbr-buttons\")\r\n\r\n answer_list = []\r\n for item in items:\r\n sub_buttons = item.find_elements_by_css_selector(\"a\")\r\n for answer in sub_buttons:\r\n if(answer.get_attribute(\"onmousedown\") == \"ding.play()\"):\r\n answer_list.append(answer.text)\r\n break\r\n return answer_list\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n driver = webdriver.Chrome('/home/alexander/hackathons/askedAsleep/trivia_scraper/chromedriver')\r\n a = usefulltrivia_page('https://www.usefultrivia.com/sports_trivia/football_trivia_index.html',{\"source\":\"usefultrivia\",\"catagory\":\"football_trivia\"})\r\n\r\n searchable_page.add_searchable_pages([a])\r\n\r\n questions = [] \r\n full_search_path = []\r\n\r\n while( True ):\r\n next_page = searchable_page.get_unsearched_page()\r\n if(next_page == None):\r\n break\r\n #print(f\"searching {next_page.url}\")\r\n full_search_path.append(next_page.url)\r\n questions.extend(next_page.get_questions_from_web_site(driver))\r\n\r\n driver.quit()\r\n\r\n for i in questions:\r\n print(i)\r\n\r\n if(len(list(set(full_search_path)))!= len(full_search_path)):\r\n print(\"double searching\")\r\n\r\n\r\n","repo_name":"AlexanderLuasan/askedAsleep","sub_path":"trivia_scraper/web_scraper.py","file_name":"web_scraper.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"32164314084","text":"class tracer(object):\n def __init__(self, func):\n self.calls = 0\n self.func = func\n def __call__(self, *args):\n self.calls += 1\n print ('call %s to %s ' % (self.calls, self.func.__name__))\n \n arg_sum = ''\n for arg in args:\n arg_sum = str(arg_sum) + str(arg)\n print ('sum of arguments as string %s ' % (arg_sum))\n self.func(*args)\n\n@tracer\ndef spam(a, b, c):\n print( a, b, c )\n\nif __name__ == '__main__':\n spam(1, 2, 3)\n spam('a', 'b', 'c')\n","repo_name":"alexvsuser/PythonExamples","sub_path":"class_deco1.py","file_name":"class_deco1.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"24635531239","text":"import datetime\nimport json\nimport os\nimport threading\nimport time\n\nimport croniter\nfrom flask import Flask, render_template\n\nimport database\nimport ecoflow_api\nimport pytz\nimport utils\n\n\ndef process_iter(db_conn, config, secrets):\n # Get and save telemetry once\n ts = int(time.time())\n telemetry = ecoflow_api.fetch_data(secrets['device_sn'], app_key=secrets['app_key'],\n secret_key=secrets['secret_key'])\n if telemetry is not None:\n database.save_telemetry(db_conn, ts, telemetry)\n\n # update status if it gets changed\n last_status_from_telemetry = database.is_there_input_watts(db_conn)\n last_saved_status = database.get_latest_saved_status(db_conn)\n\n # Update status if it got changed since last record\n if last_saved_status != last_status_from_telemetry or last_saved_status is None:\n database.save_status(db_conn, ts, last_status_from_telemetry)\n\n\ndef main():\n\n # Load configuration\n script_folder = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(script_folder, 'config.json')) as config_file:\n config = json.load(config_file)\n db = config['db_file']\n main_schedule = config['schedule']\n\n # Load secrets\n with open(os.path.join(script_folder, 'secrets.json')) as secret_file:\n secrets = json.load(secret_file)\n\n if not os.path.isfile(db):\n database.create_db_schema(db)\n db_conn = database.get_db_connection(str(db))\n\n last_run_ts = int(time.time())\n cron = croniter.croniter(config['schedule'], last_run_ts)\n\n app = Flask(__name__, template_folder=\"web/templates\", static_folder=\"web/static\")\n\n @app.route(\"/\")\n def endpoint_root():\n conn = database.get_db_connection(str(db))\n rows = database.get_status_history(conn)\n previous_ts = int(time.time())\n for row in rows:\n row['duration'] = str(datetime.timedelta(seconds=(previous_ts - row['ts'])))\n previous_ts = row['ts']\n return render_template('status.html', utils=utils, rows=rows)\n\n @app.route(\"/status\")\n def endpoint_status():\n return endpoint_root()\n\n threading.Thread(target=lambda: app.run(host='0.0.0.0', port=8080, debug=True, use_reloader=False)).start()\n\n sleep_time = 0\n while True:\n if sleep_time > 0:\n print(\"Sleeping {} seconds\".format(sleep_time))\n time.sleep(sleep_time)\n\n next_run_ts = cron.get_next(float)\n print(\"{}: Processing iteration...\".format(time.ctime()))\n process_iter(db_conn, config, secrets)\n last_run_ts = time.time()\n sleep_time = next_run_ts - last_run_ts\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alex-peresunko/svitlo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"18107148719","text":"\nimport numpy as np\nfrom TripletLoss import TripletLoss\nimport tqdm\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torchvision.models as models\nimport torch.nn.functional as F\nfrom custom_dataset import brid\nfrom thop import profile\nimport torch.nn as nn\nimport torch\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint(\"Current device: \",device)\n\n\n\n\n\nclass Net(nn.Module):\n\n total_FLOPs_count =0\n total_MACs_count = 0\n total_parameter_count = 0\n dropout = 0\n\n # define the layers\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 16, 3,padding=1)\n self.pool = nn.MaxPool2d(4, 4)\n self.conv2 = nn.Conv2d(16, 26, 3,padding=1)\n self.fc1 = nn.Linear(26 * 14 * 14, 512)\n self.fc2 = nn.Linear(512, 64)\n self.fc3 = nn.Linear(64, 10)\n self.relu = nn.ReLU()\n\n\n def defineDropout(self,dropout):\n self.dropout = dropout\n\n # concatenate these layers\n def forward(self, x):\n x = self.pool(self.relu(self.conv1(x)))\n x = self.pool(self.relu(self.conv2(x)))\n x = x.view(-1, 26 * 14 * 14)\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n x = self.fc3(x)\n x = nn.functional.dropout(x, p=self.dropout, training=self.training)\n return x\n\n def reg_forward_hook(self):\n modules = self.named_children()\n for name, module in modules:\n module.register_forward_hook(self.my_hook_function)\n\n def my_hook_function(self,layer, input, output):\n print(\"------------------------------------------------------------------------------------------------\")\n layer_name=str(layer.__class__.__name__)\n #print(\"%10d \".format(layer_name))\n\n input_shape = str([item for item in input[0].shape])\n output_shape = str([item for item in output.shape])\n\n layer_params=self.countLayerParam( layer, input, output)\n layer_MACs = self.countLayerMACs(layer,input,output)\n layer_FLOPs = self.countFLOPs(layer,input,output)\n\n #for index in range(output)\n #print(\"fdifdis\",output.shape.__len__())\n\n table={'op_type':layer_name,\"input_shape\":input_shape,\"output_shape\":output_shape \\\n ,\"params\":layer_params,\"MACs\":layer_MACs,\"FLOPs\":layer_FLOPs}\n\n #print('{op_type:10}'.format(**table))\n\n print('{0:10} {1:20} {2:20} \\\n {3:>10} {4:>10} {5:>10}'.format('op_type','input_shape','output_shape', \\\n 'params','MACs','FLOPs'))\n\n print('{op_type:10} {input_shape:20} {output_shape:20} \\\n {params:10d} {MACs:10d} {FLOPs:10d}'.format(**table))\n '''\n print('{op_type:10} {input_shape:10} '\n .format(**table))\n for param in layer.parameters():\n print(\"params shape: {}\".format(list(param.size())))\n '''\n #print(\"-----------------------------------------------------------------------------\")\n\n\n def countLayerMACs(self, layer, input, output):\n ##################count MACs\n layer_name=layer.__class__.__name__\n layer_MACs_count = 0\n if layer_name == \"Conv2d\":\n layer_MACs_count = 1\n for item in layer.weight.shape:\n layer_MACs_count *= item\n\n layer_MACs_count *= output.shape[-1]\n layer_MACs_count *= output.shape[-2]\n\n self.total_MACs_count+= layer_MACs_count\n if layer_name == \"Linear\":\n layer_MACs_count = 1\n for item in layer.weight.shape:\n layer_MACs_count *= item\n\n self.total_MACs_count += layer_MACs_count\n return layer_MACs_count\n\n\n def countFLOPs(self, layer, input, output):\n ##################count MACs\n layer_name=layer.__class__.__name__\n layer_FLOPs_count = 0\n if layer_name == \"Conv2d\":\n layer_FLOPs_count = 1\n for item in layer.weight.shape:\n layer_FLOPs_count *= item\n\n layer_FLOPs_count *= output.shape[-1]\n layer_FLOPs_count *= output.shape[-2]\n layer_FLOPs_count *= 2\n\n self.total_FLOPs_count += layer_FLOPs_count\n if layer_name == \"Linear\":\n layer_FLOPs_count = 1\n for item in layer.weight.shape:\n layer_FLOPs_count *= item\n\n layer_FLOPs_count *= 2\n layer_FLOPs_count += layer.bias.shape[0]\n self.total_FLOPs_count += layer_FLOPs_count\n\n if layer_name == \"MaxPool2d\":\n\n layer_FLOPs_count = pow(layer.kernel_size,2) - 1\n for item in output.shape:\n layer_FLOPs_count *= item\n\n self.total_FLOPs_count += layer_FLOPs_count\n return layer_FLOPs_count\n\n def countLayerParam(self, layer, input, output):\n ###############count total parameter\n layer_name=layer.__class__.__name__\n layer_parameter_count = 0\n if layer_name == \"Conv2d\" or layer_name == \"Linear\":\n layer_parameter_count = 1\n for item in layer.weight.shape:\n layer_parameter_count *= item\n layer_parameter_count+= layer.bias.shape[0]\n self.total_parameter_count +=layer_parameter_count\n #print(\"layer parameter: %.5fM\" % (layer_parameter_count* 1e-06))\n\n return layer_parameter_count\n\n\n def thopCaclate(self):\n #### thop\n input_data = torch.randn(1, 3, 224, 224)\n print(\"thop output:\")\n macs, params = profile(self, inputs=(input_data,))\n #print(\"Total parameter: %.2fM\" % (params * 1e-06))\n #print(\"Total MACs: %.2fM\" % (macs * 1e-06))\n print(\"thop Total parameter:\",params)\n print(\"thop Total MACs:\",macs)\n\n def getTotalParameterCount(self):\n print(\"total_parameter_count\",self.total_parameter_count)\n\n\n def getTotalMACsCount(self):\n print(\"total_MACs_count\",self.total_MACs_count)\n def getTotalFLOPsCount(self):\n print(\"total_FLOPs_count\",self.total_FLOPs_count)\n\nclass my_model(nn.Module):\n def __init__(self,num_of_class,init_lr,epochs):\n super().__init__()\n\n self.epochs = epochs\n self.init_lr =init_lr\n model = models.resnet50(pretrained=True);\n # replace the last layer\n num_features = model.fc.in_features\n model.fc = nn.Linear(num_features, num_of_class)\n # model.fc.requires_grad_\n # for idx, (name, param) in enumerate(model.named_parameters()):\n # param.requires_grad_ = False\n\n # model.fc.requires_grad_ = True\n #model.defineDropout(dropout)\n self.model = model.to(device)\n\n #loss function\n # weights=trainset.getClassWeight()\n # class_weights = torch.FloatTensor(weights).to(device)\n #optimization algorithm\n # self.optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n\n #weight_decay 是l2 regularization\n\n self.optimizer= optim.Adam(model.parameters(), lr=init_lr)\n self.criterion = nn.CrossEntropyLoss()\n # criterion = TripletLoss()\n\n\n def validation_run(self,validation_loader,validation_set):\n correct = 0 \n for i, (inputs, labels) in enumerate(validation_loader , 0):\n print(\"batch:\",i)\n # change the type into cuda tensor\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # forward + backward + optimize\n outputs = self.model(inputs)\n # select the class with highest probability\n _, pred = outputs.max(1)\n correct += pred.eq(labels).sum().item()\n\n # writeResult2Answer(testset,resultDict)\n print('validation accuracy: %.4f' % (correct/len(validation_set)))\n\n\n def train(self,trainloader,trainset,validation_loader,validation_set):\n self.model.train()\n for epoch in range(self.epochs): # loop over the dataset multiple times\n running_loss = 0.0\n correct = 0\n\n for i,(inputs, labels) in enumerate(trainloader,0):\n # print(\"inputs.shape\",inputs.shape)\n # print(\"labels\",labels)\n\n #change the type into cuda tensor\n inputs = inputs.to(device) \n labels = labels.to(device) \n\n # zero the parameter gradients\n self.optimizer.zero_grad()\n # forward + backward + optimize\n outputs = self.model(inputs)\n # select the class with highest probability\n _, pred = outputs.max(1)\n # if the model predicts the same results as the true\n # label, then the correct counter will plus 1\n correct += pred.eq(labels).sum().item()\n #each class\n loss = self.criterion(outputs, labels)\n loss.backward()\n self.optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n # if count % 20 == 19: # print every 200 mini-batches\n print('[%d, %5d] loss: %.3f' % (epoch + 1, i+ 1, running_loss / 20))\n running_loss = 0.0\n print('%d epoch, training accuracy: %.4f' % (epoch+1, correct/len(trainset)))\n self.validation_run(validation_loader,validation_set)\n def training_tripletLoss(self):\n self.model.train()\n criterion = TripletLoss()\n for epoch in tqdm(range(self.epochs), desc=\"Epochs\"):\n running_loss = []\n for step, (anchor_img, positive_img, negative_img, anchor_label) in enumerate(tqdm(train_loader, desc=\"Training\", leave=False)):\n anchor_img = anchor_img.to(device)\n positive_img = positive_img.to(device)\n negative_img = negative_img.to(device)\n \n self.optimizer.zero_grad()\n anchor_out = self.model(anchor_img)\n positive_out = self.model(positive_img)\n negative_out = self.model(negative_img)\n \n loss = criterion(anchor_out, positive_out, negative_out)\n loss.backward()\n self.optimizer.step()\n \n running_loss.append(loss.cpu().detach().numpy())\n print(\"Epoch: {}/{} - Loss: {:.4f}\".format(epoch+1, self.epochs, np.mean(running_loss)))\n\n\n def saveModel(self,correct):\n print('Finished Training')\n print('==> Saving model..')\n state = {\n 'model': self.model.state_dict(),\n 'acc': correct/len(self.trainset),\n 'parameters':{\n 'epoch': self.epochs,\n 'dropout': self.dropout,\n 'optimizer':self.optimizer.__repr__\n }\n }\n torch.save(state, './trainedModel/skew_checkpoint.t7')\n #save entire model\n torch.save(self.model, './trainedModel/skew_model.pt')\n print('Finished Saving')\n\n\n\n\n","repo_name":"hcgcarry/DLSR_LAB_learning","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"}