diff --git "a/3695.jsonl" "b/3695.jsonl" new file mode 100644--- /dev/null +++ "b/3695.jsonl" @@ -0,0 +1,752 @@ +{"seq_id":"119188029","text":"from datetime import date\nfrom dataclasses import dataclass\nfrom decimal import Decimal\n\n\n@dataclass\nclass InterestDataRow:\n date: date\n label: str\n amount: Decimal\n interest_rate: Decimal\n days_left_in_year: int\n fraction_of_year: Decimal\n interest: float\n\n\nclass InterestProcessor:\n def __init__(self, contract, year):\n self.year = year\n self.start_date = date(self.year, 1, 1)\n self.end_date = date(self.year, 12, 31)\n self.contract = contract\n self.calculation_rows = self.calculate_rows()\n\n @property\n def value(self):\n return sum([row.interest for row in self.calculation_rows])\n\n def calculate_rows(self):\n interest_rows = [self._saldo_row()]\n accounting_entries = self.contract.accounting_entries_in(self.year)\n for entry in accounting_entries:\n interest_rows.append(self._accounting_row(entry))\n\n contract_changes = self.contract.versions_in(self.year)\n if not contract_changes:\n return interest_rows\n\n old_interest_rate = interest_rows[0].interest_rate\n for contract_change in contract_changes:\n if contract_change.id == self.contract.first_version.id:\n continue\n if contract_change.start == self.start_date:\n continue\n if old_interest_rate == contract_change.interest_rate:\n continue\n\n interest_rows.extend(self._contract_change_rows(contract_change, old_interest_rate))\n old_interest_rate = contract_change.interest_rate\n\n return interest_rows\n\n def _saldo_row(self):\n start_balance = self.contract.balance_on(self.start_date)\n interest_rate = self.contract.interest_rate_on(self.start_date)\n interest_for_year = round(start_balance * interest_rate, 2)\n\n return InterestDataRow(\n date=self.start_date,\n label=\"Saldo\",\n amount=start_balance,\n interest_rate=interest_rate,\n days_left_in_year=360,\n fraction_of_year=1,\n interest=interest_for_year,\n )\n\n def _accounting_row(self, accounting_entry):\n days_left, fraction_year = self._days_fraction_360(accounting_entry.date)\n interest_rate = self.contract.interest_rate_on(accounting_entry.date)\n interest = round(accounting_entry.amount * fraction_year * interest_rate, 2)\n return InterestDataRow(\n date=accounting_entry.date,\n label=\"Einzahlung\" if accounting_entry.amount > 0 else \"Auszahlung\",\n amount=accounting_entry.amount,\n interest_rate=interest_rate,\n days_left_in_year=days_left,\n fraction_of_year=fraction_year,\n interest=interest,\n )\n\n def _contract_change_rows(self, contract_version, old_interest_rate):\n change_balance = self.contract.balance_on(contract_version.start)\n days_left, fraction_year = self._days_fraction_360(contract_version.start)\n interest_before = round(-change_balance * fraction_year * old_interest_rate, 2)\n interest_after = round(change_balance * fraction_year * contract_version.interest_rate, 2)\n return [\n InterestDataRow(\n date=contract_version.start,\n label=\"Vertragsänderung\",\n amount=-change_balance,\n interest_rate=old_interest_rate,\n days_left_in_year=days_left,\n fraction_of_year=fraction_year,\n interest=interest_before,\n ),\n InterestDataRow(\n date=contract_version.start,\n label=\"Vertragsänderung\",\n amount=change_balance,\n interest_rate=contract_version.interest_rate,\n days_left_in_year=days_left,\n fraction_of_year=fraction_year,\n interest=interest_after\n )\n ]\n\n def _days_fraction_360(self, date):\n days_left = days360_eu(date, self.end_date)\n fraction = Decimal(days_left/360)\n return days_left, fraction\n\n\ndef days360_eu(start_date, end_date):\n start_day = start_date.day\n start_month = start_date.month\n start_year = start_date.year\n end_day = end_date.day\n end_month = end_date.month\n end_year = end_date.year\n\n if start_day == 31:\n start_day = 30\n\n if end_day == 31:\n end_day = 30\n\n return (end_year - start_year) * 360 + (end_month - start_month) * 30 + (end_day - start_day)\n","sub_path":"dkapp/operations/interest.py","file_name":"interest.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"469712323","text":"from django.conf.urls import url\n\nfrom notes.views import entry_list, entry_detail, entry_create, entry_update, entry_delete\n\nurlpatterns = [\n url(r'^$', entry_list, name='list'),\n url(r'^create/$', entry_create, name='create'),\n url(r'^(?P\\d+)/$', entry_detail, name='detail'),\n\turl(r'^(?P\\d+)/update/$', entry_update, name='edit'),\n\turl(r'^(?P\\d+)/delete/$', entry_delete, name='delete')\n]\n","sub_path":"notes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"509192165","text":"#page 78\ndef main():\n maxwidth=100\n print_start()\n count=0\n while True:\n try:\n line=input()\n if count==0:\n color=\"lightgreen\"\n elif count % 2:\n color = \"white\"\n else:\n color=\"lightyellow\"\n print_line(line,color,maxwidth)\n count+=1\n except EOFError:\n break\n print_end()\n \ndef print_start():\n print(\"\")\n \ndef print_end():\n print(\"
\")\n \ndef print_line(line,color,maxwidth):\n fields=extract_fields(line)\n for field in fields:\n if not field:\n print(\"\")\n#心情不好 不想写了\n ","sub_path":"chapter_2/examples/csv2html.py","file_name":"csv2html.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"351589474","text":"# -*-encoding:utf-8-*-\n\"\"\"\n@version: Python2.7.1\n@author: Attack\n@time: 2015/12/15 22:19\n\"\"\"\nimport math\ndef is_prime(n):\n list_num = []\n for i in range(2, n):\n for num in range(2, int(math.sqrt(n))+1):\n if i % num == 0 and i != num:\n break\n elif i % num != 0 and num == (int(math.sqrt(n))):\n list_num.append(i)\n return list_num\nprint(is_prime(201))\n\n\nl=[1,2,3,4,5,5,5,5,5,5,5,5,5,5,5]\nprint(l)\n","sub_path":"Python/test/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"328228440","text":"import os\nimport sys\n\nimport braintree\n\n\n# helpers\nPROJ_DIR = os.path.dirname(__file__)\n\nBASE_DIR = os.path.dirname(PROJ_DIR)\n\n\n# django debug configs\nDEBUG = bool(os.environ.get('DEBUG', False))\n\nTEMPLATE_DEBUG = DEBUG\n\n\n# application configs\nINSTALLED_APPS = (\n # django core\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # social auth\n 'social.apps.django_app.default',\n\n # rest\n 'rest_framework',\n\n # project apps\n 'apps.vauth',\n 'apps.common',\n 'apps.constants',\n 'apps.customers',\n 'apps.vendors',\n 'apps.products',\n 'apps.warehousing',\n 'apps.purchasing',\n 'apps.receiving',\n 'apps.sales',\n 'apps.shipping',\n\n # legacy apps\n 'apps.dodger',\n 'apps.swipedeals',\n\n # front end single page app\n 'frontend.managers',\n 'frontend.store',\n)\n\nif DEBUG:\n # add debug toolbar\n INSTALLED_APPS += ('debug_toolbar',)\n\n\n# database configs\nDATABASE_ROUTERS = [\n 'apps.dodger.router.DodgerRouter',\n 'apps.swipedeals.router.SwipedealsRouter',\n]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ.get('BALTO_NAME', 'balto_dev'),\n 'USER': os.environ.get('BALTO_USER', 'vince'),\n 'PASSWORD': os.environ.get('BALTO_PASSWORD', ''),\n 'HOST': os.environ.get('BALTO_HOST', ''),\n 'PORT': os.environ.get('BALTO_PORT', '')\n },\n 'dodger': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ.get('DODGER_NAME', 'dodger'),\n 'USER': os.environ.get('DODGER_USER', 'vince'),\n 'PASSWORD': os.environ.get('DODGER_PASSWORD', ''),\n 'HOST': os.environ.get('DODGER_HOST', ''),\n 'PORT': os.environ.get('DODGER_PORT', '')\n },\n 'swipedeals': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.environ.get('SD_NAME', 'swipedeals_development'),\n 'USER': os.environ.get('SD_USER', 'root'),\n 'PASSWORD': os.environ.get('SD_PASSWORD', ''),\n 'HOST': os.environ.get('SD_HOST', ''),\n 'PORT': os.environ.get('SD_PORT', '')\n }\n}\n\nif DEBUG:\n DATABASES['default'] = {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(PROJ_DIR, 'db.sqlite3'),\n }\n\nif 'test' in sys.argv:\n DATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3'}\n DATABASES['dodger'] = {'ENGINE': 'django.db.backends.sqlite3'}\n DATABASES['swipedeals'] = {'ENGINE': 'django.db.backends.sqlite3'}\n\n\n# middleware configs\nMIDDLEWARE_CLASSES = (\n # django\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware', # turned off for front end\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware', # turned off for front end\n\n # social auth\n 'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',\n)\n\nif DEBUG:\n MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)\n\n\n# static configs\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n\n# template configs\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'social.apps.django_app.context_processors.backends',\n 'social.apps.django_app.context_processors.login_redirect',\n 'django.contrib.auth.context_processors.auth',\n)\n\n\n# project configs\nALLOWED_HOSTS = ['*']\n\nROOT_URLCONF = 'balto.urls'\n\nWSGI_APPLICATION = 'balto.wsgi.application'\n\n\n# site configs\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nAPPEND_SLASH = False\n\n\n# rest framework configs\nREST_FRAMEWORK = {\n 'DEFAULT_FILTER_BACKENDS': (\n 'rest_framework.filters.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n 'rest_framework.filters.SearchFilter',\n ),\n\n 'PAGINATE_BY': 200,\n 'PAGINATE_BY_PARAM': 'limit',\n 'MAX_PAGINATE_BY': 1000,\n\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n )\n}\n\n\n# email configs\nEMAIL_USE_TLS = True\n\nEMAIL_HOST = 'smtp.gmail.com'\n\nEMAIL_PORT = 587\n\nEMAIL_HOST_USER = os.environ.get('EMAIL_USER', '')\n\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASSWORD', '')\n\n\n# auth configs\nAUTHENTICATION_BACKENDS = (\n 'social.backends.google.GoogleOAuth2',\n 'social.backends.google.GooglePlusAuth',\n 'social.backends.facebook.FacebookOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nAUTH_USER_MODEL = 'vauth.User'\n\nLOGIN_REDIRECT_URL = '/'\n\nSOCIAL_AUTH_SESSION_EXPIRATION = False\n\n\n## secrets, keys, tokens\n# site\nSECRET_KEY = '*^=ev$cva2-7)e+e_4vxcg+h^18621#jk8454hokh%dxf*n1a2'\n\n# social auth\nSOCIAL_AUTH_USER_MODEL = 'vauth.User'\n\nSOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True\n\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get(\n 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY',\n '1007528749792-kk1bbesu6crn124tp6qp7ec9rn4a4efc.apps.googleusercontent.com'\n)\n\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get(\n 'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET',\n 'jFmCGiAfxAzd1OO6E9B5N5Wp'\n)\n\n# braintree\nbraintree.Configuration.configure(\n braintree.Environment.Sandbox,\n '6v23g5zdz7zrzb7q',\n 'hz29cgswfs5dw34w',\n '482266e36d0e091bb11a5315db7d974d'\n)\n\n\n# logging\nif not DEBUG:\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'logfile': {\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/balto/balto.log'\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['logfile'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'django': {\n 'handlers': ['logfile'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'myapp': {\n 'handlers': ['logfile'],\n 'level': 'WARNING',\n 'propagate': False\n },\n },\n }\n","sub_path":"balto/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"499791040","text":"#!/bin/python3\n\n########################\n# Symmetric Difference #\n########################\n\nif __name__ == '__main__':\n dif = set()\n n = input()\n a = {int(x) for x in input().split(' ')}\n m = input()\n b = {int(x) for x in input().split(' ')}\n dif.update(a - b)\n dif.update(b - a)\n for x in sorted(dif):\n print(x)","sub_path":"Python/Sets/symmetric-difference.py","file_name":"symmetric-difference.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"406636765","text":"#! pythoun3\n#\n# Program to take multiple lines of text on the clipboard and \n# prepend an asterisk to each line so that they are a bulleted\n# list in WIki markdown.\n\nimport pyperclip\n\ntext = pyperclip.paste()\n\nlines = text.split(\"\\n\")\nfor i in range(len(lines)):\n\tlines[i] = \"* \" + lines[i]\n\ntext = \"\\n\".join(lines)\n\npyperclip.copy(text)","sub_path":"ch_6/bulletPointAdder.py","file_name":"bulletPointAdder.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"277538297","text":"from collections import namedtuple\n\nfrom app.viewmodels.book import Book_viewmodel\n\nMygift = namedtuple('MyGIFT', ['id','book','count'])\n\n\nclass Mywishes:\n def __init__(self, gifts_of_mine, wish_count_list):\n self.gifts = []\n #私有变量\n self.__gifts_of_mine = gifts_of_mine\n self.__wish_count_list = wish_count_list\n self.gifts = self.__parse()\n#尽量不在方法中去修改实例变量,\n#nametuple和字典是类似的\n def __parse(self):\n tem_gifts = []\n for gift in self.__gifts_of_mine:\n tem_gifts.append(self.__matching(gift))\n return tem_gifts\n\n\n def __matching(self, gift):\n count = 0\n for wish_count in self.__wish_count_list:\n if gift.isbn == wish_count['isbn']:\n count = wish_count['count']\n my_gift = Mygift(gift.id, Book_viewmodel(gift.book), count)\n return my_gift\n\n\n","sub_path":"flask_learning/app/viewmodels/wish.py","file_name":"wish.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"15427596","text":"#Tran Le\r\n# Final project _ Principles of Programming\r\nimport pandas as pd\r\nimport pymysql.cursors\r\nfrom files import *\r\nfrom creds import *\r\nfrom CleaningData import *\r\nfrom ImportToTables import *\r\n#-------------------------------------------------\r\n# Two functions that will be used later:\r\n#-------------------------------------------------\r\ndef place_value(number):\r\n \"\"\"To format the inputted number\r\n Return the same number with commas as thousands separators.\r\n \"\"\"\r\n return (\"{:,}\".format(number))\r\n\r\ndef getmedianincome(cursor, myConnection):\r\n \"\"\"Use this function when user input a wrong zip-code\r\n Get the average of median income associated with user inputted zip-code\r\n Input: cursor, myConnection: to connect with sql\r\n Approach: Ask the user if wants to try again, then if yes, get the inputted zip-code,\r\n Compute the average o median income associated with the zip code\r\n Output: getincomenum: the average o median income associated with the inputted zip code \"\"\"\r\n choice = input('There is no zip-code that matchs with your input, Do you want to try again (Y?N)? ')\r\n if ((str(choice) == \"Y\") or (str(choice) == \"y\")):\r\n inputzip = input('Input the zipcode: ')\r\n sqlzipcode = \"\"\"select avg(`median_income`) as myanswer from housing where `zip_code` = %s;\"\"\"\r\n cursor.execute(sqlzipcode, inputzip)\r\n myConnection.commit()\r\n for row in cursor:\r\n getincomenum = (row.get(\"myanswer\"))\r\n return(getincomenum)\r\n# #----------------------------------------------------------------\r\n# # Step0: Create database \"\" and table Housing on SQL\r\n# #----------------------------------------------------------------\r\n# 1. Connect to your local instance of MySQL\r\n# a. mysql -u -p\r\n# 2. At the mysql> prompt, use source to read databaseCreationScript.sql execute the SQL script\r\n# a. mysql> source databaseCreationScript.sql\r\n# b. mysql> show databases;\r\n# c.mysql> use housing_project;\r\n\r\n# #----------------------------------------------------------------\r\n# # Step1: Read all of the three data files:\r\n# #----------------------------------------------------------------\r\nHousing = pd.read_csv(housingFile)\r\nIncome = pd.read_csv(incomeFile)\r\nZip = pd.read_csv(zipFile)\r\nprint(\"Data files have been imported\")\r\n#----------------------------------------------------------------\r\n#Step 2: Clean the 3 data files\r\n#----------------------------------------------------------------\r\nHousing = Cleanguid(Housing)\r\nHousing = CleanHousing(Housing)\r\n\r\nIncome = Cleanguid(Income)\r\nIncome = CleanMedianIncom(Income)\r\n\r\nZip = Cleanguid(Zip)\r\nZip = CleanZipCodeforZip(Zip)\r\n\r\nprint(\"Data tables have been cleaned\")\r\n#----------------------------------------------------------------\r\n#Step 3: Preparing for importing the data\r\n#Approach: Drop the zipcode in housing and Income, merge the three dataframes to create a data frame, named mydata.\r\n#Then arrange the columns of mydata to be associative with the order of table housing in my sql\r\nHousing = Housing.drop(['zip_code'], axis=1)\r\nIncome = Income.drop(['zip_code'], axis=1)\r\n\r\nmydata = pd.merge(Housing, Zip, on='guid')\r\nmydata = pd.merge(mydata, Income, on='guid')\r\n\r\nmydata = mydata[['guid','zip_code','city','state','county','housing_median_age',\r\n 'total_rooms','total_bedrooms','population','households','median_income','median_house_value']]\r\n#----------------------------------------------------------------\r\n#Step 4: Connect to sql and run function InsertToHousingTable to import the data from mydata to housing (sql table)\r\n#----------------------------------------------------------------\r\ntry:\r\n myConnection = pymysql.connect(host=hostname,\r\n user=username,\r\n password=password,\r\n db=database,\r\n charset='utf8mb4',\r\n cursorclass=pymysql.cursors.DictCursor\r\n )\r\nexcept Exception as e:\r\n print(f\"An error has occurred. Exiting: {e}\")\r\n print()\r\n exit()\r\n#---------------------------------\r\ntry:\r\n with myConnection.cursor() as cursor:\r\n\r\n # Delete the old data in table\r\n deleteHousing = \"\"\" delete from housing;\"\"\"\r\n cursor.execute(deleteHousing)\r\n myConnection.commit()\r\n\r\n #Reset the ID\r\n setAutoIncrement = \"\"\"alter table housing auto_increment =1;\"\"\"\r\n cursor.execute(setAutoIncrement)\r\n myConnection.commit()\r\n\r\n #Import data from \"mydata\" to housing (sql table)\r\n count = InsertToHousingTable(mydata, cursor, myConnection)\r\n print(f\"{count} records imported into the database\")\r\n\r\n print(\"Beginning validation\")\r\n\r\n #Validation for total_rooms associative with total_bedrooms\r\n inputnum = input('Total rooms ?')\r\n sqltotalbedroom=\"\"\"select `total_bedrooms` from housing where `total_rooms` > %s;\"\"\"\r\n cursor.execute(sqltotalbedroom, inputnum)\r\n myConnection.commit()\r\n getnum=0\r\n for row in cursor:\r\n getnum += (row.get(\"total_bedrooms\"))\r\n print(f\" For locations with more than {inputnum} rooms, there are a total of {getnum} bedrooms\")\r\n\r\n #Validation for the median_income associative with user inputted zip-code\r\n #Ask user to input a zip-code,\r\n # if there is a match in the housing table, then compute the average of the median-income\r\n #If the user inputs a zip-code that does not match with any zip-code in the housing table,\r\n #ask the user if the user want to try again and input another zip-code\r\n mylist = []\r\n inputzip = input('Zip Code ?')\r\n sqlzipcode = \"\"\"select avg(`median_income`) as myanswer from housing where `zip_code` = %s;\"\"\"\r\n cursor.execute(sqlzipcode, inputzip)\r\n myConnection.commit()\r\n for row in cursor:\r\n getincomenum = (row.get(\"myanswer\"))\r\n\r\n while getincomenum==None:\r\n getincomenum = getmedianincome(cursor, myConnection)\r\n\r\n print(\"The median income associated with the input zip code is\", place_value(round(getincomenum)))\r\n\r\nexcept Exception as e:\r\n print(f\"An error has occurred. Exiting: {e}\")\r\n print()\r\nfinally:\r\n myConnection.close()\r\n print(\"Program existing.\")\r\n\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"620659530","text":"product = input()\r\ncity = input()\r\nquantity = float(input())\r\n\r\ndict_sofia = {'coffee': 0.50, 'water': 0.80, 'beer': 1.20, 'sweets': 1.45, 'peanuts': 1.60}\r\ndict_plovdiv = {'coffee': 0.40, 'water': 0.70, 'beer': 1.15, 'sweets': 1.30, 'peanuts': 1.50}\r\ndict_varna = {'coffee': 0.45, 'water': 0.70, 'beer': 1.10, 'sweets': 1.35, 'peanuts': 1.55}\r\n\r\nprice = 0\r\nif city == \"Sofia\":\r\n for key, value in dict_sofia.items():\r\n if key == product:\r\n price += quantity * value\r\nelif city == \"Plovdiv\":\r\n for key, value in dict_plovdiv.items():\r\n if key == product:\r\n price += quantity * value\r\nelse:\r\n for key, value in dict_varna.items():\r\n if key == product:\r\n price += quantity * value\r\n\r\nprint(price)\r\n\r\n\r\n","sub_path":"Shop.py","file_name":"Shop.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"312352985","text":"# -*- coding: utf-8 -*-\r\nfrom threading import Thread\r\nfrom Log import Log\r\n\r\n\"\"\"\r\nDas Spiel bildet eine Spielrunde des Nim-Spiels ab.\r\nDie Spieler werden abwechselnd nach ihrem naechsten Zug gefragt und\r\ndieser wird dann dokumentiert.\r\n\"\"\"\r\nclass Spiel(Thread):\r\n \r\n def __init__(self, Spieler, Spielfeld):\r\n \"\"\"\r\n Constructor\r\n Spieler erwartet eine Collection mit den beiden Spielern\r\n Spielfeld erwartet eine Collection von Ganzzahlen\r\n \"\"\"\r\n self._Spieler = Spieler\r\n self._Spielfeld = Spielfeld\r\n Thread.__init__(self)\r\n self.winner = -1\r\n self.Log = Log()\r\n\r\n def _checkZug(self, row, val):\r\n \"\"\"\r\n Hier wird die Gueltigkeit des vom Spieler gewaehlten Zug geprueft.\r\n Gueltig ist er nur, wenn in der gewaehlten Reihe\r\n noch genuegend Hoelzer existieren.\r\n \"\"\"\r\n # optimistische behandlung. Erst wenn eine Bedingung nicht\r\n # erfuellt ist, wird result auf False gesetzt.\r\n result = True\r\n\r\n # Es koennen nur Fehler Auftreten, wenn auf einen nicht\r\n # vorhandenen Index zugegriffen werden soll. Daher ist\r\n # diese Pruefung in einen Try-Except Block gefasst.\r\n try:\r\n t = self._Spielfeld[row]\r\n\r\n # val muss zwischen 1 und der Anzahl der Hoelzer liegen.\r\n if val > t or val < 1:\r\n result = False\r\n except:\r\n result = False\r\n return result\r\n\r\n def _checkWin(self):\r\n \"\"\"\r\n Hier wird die Siegbedingung geprueft. Sie ist gegeben, wenn\r\n alle Hoelzer aufgenommen wurden.\r\n \"\"\"\r\n # optimistische Behandlung. Erst wenn die Bedingung nicht\r\n # erfuellt ist, wird result auf False gesetzt.\r\n result = True\r\n for val in self._Spielfeld:\r\n if val != 0:\r\n result = False\r\n return result\r\n\r\n def run(self):\r\n \"\"\"\r\n Run beschreibt den Ablauf des Spieles.\r\n \"\"\"\r\n IndexTemp = 0\r\n Index = 0\r\n\r\n # solange nicht gewonnen wurde\r\n while not self._checkWin():\r\n\r\n # Soll der Spieler der an der Reihe ist einen Zugvorschlag machen\r\n a = self._Spieler[Index].Zug(self._Spielfeld)\r\n\r\n # Wenn dieser Zugvorschlag gueltig ist\r\n if self._checkZug(a[0], a[1]):\r\n\r\n # wird sich der Spielerindex gemerkt\r\n IndexTemp = Index\r\n\r\n # wird das Spielfeld kopiert (um den Log das Feld vor und\r\n # nach der Aenderung zu uebergeben.\r\n after = self._Spielfeld.copy()\r\n\r\n # Der Zug durchgefuehrt\r\n after[a[0]] = after[a[0]] - a[1]\r\n\r\n # und in den Log geschrieben\r\n self.Log.LogZug(self._Spieler[Index], self._Spielfeld, after)\r\n\r\n # dem Spielfeld das Geaenderte Spielfeld uebergeben\r\n self._Spielfeld = after\r\n\r\n # der Naechste Spieler ermittelt.\r\n Index = (Index+1) % len(self._Spieler)\r\n\r\n # wenn gewonnen wurde, wird sich der Spielerindex des Gewinners gemerkt\r\n self.winner = IndexTemp \r\n","sub_path":"Quellcode/Spiel.py","file_name":"Spiel.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"297719202","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .forms import PostForm\nfrom .models import Post\nfrom django.shortcuts import redirect\nfrom django.core.paginator import Paginator\n\n#問題一覧ページ\ndef index(request,num=1):\n data = Post.objects.all()\n page = Paginator(data, 5)\n params = {\n 'data': page.get_page(num)\n }\n return render(request, 'posts/index.html', params)\n\n#問題作成ページ\ndef create(request):\n if (request.method == 'POST'):\n obj = Post()\n post = PostForm(request.POST, instance=obj)\n post.save()\n return redirect(to='/posts')\n params = {\n 'form': PostForm(), \n } \n return render(request,'posts/create.html',params)\n\n#問題編集ページ\ndef edit(request, num):\n obj = Post.objects.get(id=num)\n if(request.method == 'POST'):\n post = PostForm(request.POST, instance=obj)\n post.save()\n return redirect(to='/posts')\n params = {\n 'id':num,\n 'form': PostForm(instance=obj), \n }\n return render(request, 'posts/edit.html', params)\n\n#問題削除ページ\ndef delete(request, num):\n post = Post.objects.get(id=num)\n if(request.method == 'POST'):\n post.delete()\n return redirect(to='/posts')\n params = {\n 'id':num,\n 'obj': post,\n }\n return render(request, 'posts/delete.html', params)\n\n\n\n# 問題に回答する関数を定義。正解不正解の判定をさせるための条件分技をif文を使って定義。\ndef tyousen(request, num):\n obj = Post.objects.get(id=num)# 問題番号に応じたクエリセットのみをobjに代入\n if request.method == 'POST':# postsページ(index.html)でchallengeボタンを押した時の処理\n kaitouview = request.POST['kaitou']# tyousenページで回答欄に記入した文字列をkaitouviewに代入\n if kaitouview == obj.answer:# 正解の場合の処理\n dic_seikai = {\n 'pulldown':obj.pulldown,# models.pyのpulldownフィールドのみを抽出。\n 'text':obj.text,# models.pyのtextフィールドのみを抽出。\n 'reference':obj.reference,# models.pyのreferenceフィールドのみを抽出。\n 'id':num,\n 'kaitou_hyouzi':'',\n }\n dic_seikai['kaitou_hyouzi'] = kaitouview# 文字列を\bdic_seikaiの要素の'kaitou_hyouzi'keyに対応する値として定義\n return render(request, 'posts/seikai.html', dic_seikai) \n else:# 不正解の場合の処理\n dic_fuseikai = {\n 'pulldown':obj.pulldown,# models.pyのpulldownフィールドのみを抽出。\n 'text':obj.text,# models.pyのtextフィールドのみを抽出。\n 'id':num,\n 'answer':obj.answer,# models.pyのanswerフィールドのみを抽出。\n 'reference':obj.reference,# models.pyのreferenceフィールドのみを抽出。\n 'kaitou_hyouzi':'',\n }\n dic_fuseikai['kaitou_hyouzi'] = kaitouview# 文字列を\bdic_seikaiの要素の'kaitou_hyouzi'keyに対応する値として定義\n return render(request, 'posts/fuseikai.html', dic_fuseikai) \n params = {\n 'pulldown':obj.pulldown,\n 'text':obj.text,\n 'id':num,\n }\n return render(request, 'posts/tyousen.html', params) \n# Create your views here.\n","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"381344162","text":"groups = (\n ( \n ((\"--rice-woolfson-prior\",), {\n \"help\":\"Use empirical reference structure factor apmlitudes \" \n \"from an Mtz file to parameterize a Rician prior distribution for acentrics and. \"\n \"a folded normal distribution for centrics. \",\n \"action\":'store_true', \n \"default\":False,\n }),\n\n ((\"--laplace-prior\",), {\n \"help\":\"Use empirical reference structure factor apmlitudes \" \n \"from an Mtz file to parameterize a Laplacian prior distribution. \",\n \"action\":'store_true', \n \"default\":False,\n }),\n\n ((\"--normal-prior\",), {\n \"help\":\"Use empirical reference structure factor apmlitudes \" \n \"from an Mtz file to parameterize a Normal prior distribution. \",\n \"action\":'store_true', \n \"default\":False,\n }),\n\n ((\"--studentt-prior-dof\",), {\n \"help\":\"Use empirical reference structure factor apmlitudes \"\n \"from an Mtz file to parameterize a Student T prior distribution. \"\n \"Must specify an mtz file and the degrees of freedom. \",\n \"type\": float, \n \"default\": None,\n }),\n\n ((\"--wilson-prior-b\",), {\n \"help\":\"Experimental Feature: \"\n \"This flag enables learning reflections on a particular Wilson scale. \"\n \"By default, the Wilson prior is flat across resolution bins. \",\n \"type\": float, \n \"default\": None,\n }),\n ),\n\n (\n ((\"--studentt-likelihood-dof\",), { \n \"help\":\"Degrees of freedom for student t likelihood function.\",\n \"type\":float, \n \"metavar\":'DOF', \n \"default\":None,\n }),\n\n ((\"--laplace-likelihood\",), { \n \"help\":\"Use a Laplacian likelihood function.\", \n \"default\":False, \n \"action\":'store_true',\n }),\n ),\n\n)\n","sub_path":"careless/args/exclusive.py","file_name":"exclusive.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"595368278","text":"#! /usr/bin/python\n# https://adventofcode.com/2018/day/11\n\nfrom collections import defaultdict\nimport re\n\n# puzzle input\npuzzleInput = 6042\n\nclass Grid:\n def __init__(self,gsn):\n self.grid = []\n for y in range(1,301):\n self.grid.append([])\n for x in range(1,301):\n rackId = x + 10\n power = ((rackId * y) + gsn) * rackId\n # keep only 100's digit\n if power >= 100:\n power = power % 1000\n power -= (power % 100)\n power = int(power/100)\n else:\n power = 0\n power -= 5\n self.grid[y - 1].append(power)\n\n def sum3x3(self,x,y):\n # check we will be on the grid\n if x < 1 or y < 1 or x > (300 - 3) or y > (300 - 3):\n return None\n\n # normalize to deal with a list\n x -= 1\n y -= 1\n\n total = 0\n for i in range(y,y+3):\n for j in range(x,x+3):\n total += self.grid[i][j]\n\n return total\n\n def findHighestSum(self):\n total = None\n xhi, yhi = 0,0\n\n for y in range(1,301):\n for x in range(1,301):\n newSum = self.sum3x3(x,y)\n if newSum != None and (total == None or newSum > total):\n total = newSum\n xhi = x\n yhi = y\n\n return xhi,yhi\n\n def __repr__(self):\n s = \"\"\n for y in self.grid:\n for x in y:\n s += f\"{x} \"\n s += \"\\n\\r\"\n return s\n\ndef main():\n grid = Grid(puzzleInput)\n print(grid.findHighestSum())\n\nmain()\n","sub_path":"11/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"576822491","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.formtools.wizard import FormWizard\nfrom django.http import Http404\nfrom django import forms\n\nclass UploadLastFormWizard(FormWizard):\n\n def get_form(self, step, data=None, files=None):\n return self.form_list[step](data, files, prefix=self.prefix_for_step(step), initial=self.initial.get(step, None))\n\n def __call__(self, request, *args, **kwargs):\n self.instance = kwargs.pop('instance', None)\n if 'extra_context' in kwargs:\n self.extra_context.update(kwargs['extra_context'])\n current_step = self.determine_step(request, *args, **kwargs)\n self.parse_params(request, *args, **kwargs)\n\n # Sanity check.\n if current_step >= self.num_steps():\n raise Http404('Step %s does not exist' % current_step)\n\n previous_form_list = []\n for i in range(current_step):\n f = self.get_form(i, request.POST, request.FILES)\n if request.POST.get(\"hash_%d\" % i, '') != self.security_hash(request, f):\n return self.render_hash_failure(request, i)\n\n if not f.is_valid():\n return self.render_revalidation_failure(request, i, f)\n else:\n self.process_step(request, f, i)\n previous_form_list.append(f)\n\n # Process the current step. If it's valid, go to the next step or call\n # done(), depending on whether any steps remain.\n if request.method == 'POST':\n form = self.get_form(current_step, request.POST, request.FILES)\n else:\n form = self.get_form(current_step)\n\n if form.is_valid():\n self.process_step(request, form, current_step)\n next_step = current_step + 1\n\n\n if next_step == self.num_steps():\n return self.done(request, previous_form_list + [form])\n else:\n form = self.get_form(next_step)\n self.step = current_step = next_step\n\n return self.render(form, request, current_step)\n\nclass MailMessageForm(forms.Form):\n \"\"\"\n Форма отправки сообщения всем пользователям\n \"\"\"\n subject = forms.CharField(label=u'Тема', max_length=100,required=True,)\n body = forms.CharField(label=u'Текст сообщения', widget=forms.Textarea,required=True,max_length=255,)\n","sub_path":"apps/core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"13479641","text":"l=[]\nf = open(\"ml7-student-names\", 'r')\nfor line in f:\n\tline = line.strip()\n\t#print(line)\n\tl.append(line)\n#print(l)\nsix = []\nseven = []\nfor string in l:\n\tif '6' in string:\n\t\tsix.append(string)\n\telse:\n\t\tseven.append(string)\n#print(six)\n#print(len(six))\n#print(seven)\n#print(len(seven))\n\n#sixth period names are in the list 'six' and seventh period names are in the list 'seven'\n\nimport random","sub_path":"georgianay.py","file_name":"georgianay.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"385165558","text":"# -*- coding: utf-8 -*-\nimport concurrent.futures\nimport os\n\nimport numpy as np\nfrom rdkit.Chem import Descriptors\nfrom rdkit.Chem import MolFromSmiles as smi2mol\nfrom rdkit.Chem import MolToSmiles as mol2smi\nfrom selfies import decoder, encoder, get_semantic_robust_alphabet\nfrom tqdm import tqdm\n\nimport wandb\n\nfrom ...sa_scorer.sascorer import calculate_score\n\nwandb.init(project=\"ga_replication_study\", tags=[\"baseline\", \"experiment_1\", \"extended_alphabet\"])\ntable = wandb.Table(columns=[\"run\", \"SMILES\", \"J\"])\n\nTHIS_DIR = os.path.dirname(os.path.realpath(__file__))\nALPHABET = list(get_semantic_robust_alphabet())\n\n\ndef sanitize_smiles(smi):\n \"\"\"Return a canonical smile representation of smi\n\n Parameters:\n smi (string) : smile string to be canonicalized\n\n Returns:\n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)\n smi_canon (string) : Canonicalized smile representation of smi (None if invalid smile string smi)\n conversion_successful (bool): True/False to indicate if conversion was successful\n \"\"\"\n try:\n mol = smi2mol(smi, sanitize=True)\n smi_canon = mol2smi(mol, isomericSmiles=False, canonical=True)\n return (mol, smi_canon, True)\n except:\n return (None, None, False)\n\n\ndef get_logP(mol):\n \"\"\"Calculate logP of a molecule\n\n Parameters:\n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object, for which logP is to calculates\n\n Returns:\n float : logP of molecule (mol)\n \"\"\"\n return Descriptors.MolLogP(mol)\n\n\ndef get_SA(mol):\n return calculate_score(mol)\n\n\ndef calc_RingP(mol):\n \"\"\"Calculate Ring penalty for each molecule in unseen_smile_ls,\n results are recorded in locked dictionary props_collect\n \"\"\"\n cycle_list = mol.GetRingInfo().AtomRings()\n if len(cycle_list) == 0:\n cycle_length = 0\n else:\n cycle_length = max([len(j) for j in cycle_list])\n if cycle_length <= 6:\n cycle_length = 0\n else:\n cycle_length = cycle_length - 6\n return cycle_length\n\n\ndef get_random_selfie_mol():\n \"\"\"\n Create random molecules using the SELFIES alphabet. Strings are contrained to\n a length of 81.\n \"\"\"\n valid = False\n alphabet = ALPHABET\n\n while valid != True:\n selfie_char_ls = []\n for i in range(81):\n random_char = np.random.choice(alphabet, size=1)[0]\n selfie_char_ls.append(random_char)\n\n selfie_str = \"\".join(x for x in selfie_char_ls)\n decoded_smile_str = decoder(selfie_str)\n if decoded_smile_str != -1:\n valid = True\n # check if smile string is recognized by rdkit\n mol, smiles_canon, done = sanitize_smiles(decoded_smile_str)\n if mol == None or smiles_canon == \"\" or len(smiles_canon) > 81:\n valid = False\n continue\n\n return smiles_canon\n\n\ndef run_random_experiment(i):\n np.random.seed()\n A = []\n for j in range(50000):\n A.append(get_random_selfie_mol())\n\n if len(A[j]) > 81:\n raise Exception(\"Length fail!\")\n\n logP_scores = []\n SA_scores = []\n RingP_scores = []\n\n for item in A:\n mol, smiles_canon, done = sanitize_smiles(item)\n if mol == None or done == False:\n raise Exception(\"A molecule is incorrect! Test Failed\")\n\n logP_scores.append(get_logP(mol))\n SA_scores.append(get_SA(mol))\n RingP_scores.append(calc_RingP(mol))\n\n # Save all the smile codes of this data set\n with open(\n os.path.join(THIS_DIR, \"results/results_{}.txt\".format(i)), \"a+\"\n ) as handle:\n handle.write(\"smile: {} \\n\".format(smiles_canon))\n\n # Calculate J(m)\n logP_norm = [((x - 2.4729421499641497) / 1.4157879815362406) for x in logP_scores]\n SAS_norm = [((x - 3.0470797085649894) / 0.830643172314514) for x in SA_scores]\n RingP_norm = [\n ((x - 0.038131530820234766) / 0.2240274735210179) for x in RingP_scores\n ]\n J = []\n for counter in range(len(logP_norm)):\n J.append(logP_norm[counter] - SAS_norm[counter] - RingP_norm[counter])\n\n print(\"smile: \", A[J.index(max(J))], max(J))\n\n # Save result in text file\n with open(os.path.join(THIS_DIR, \"results.txt\"), \"a+\") as fh:\n fh.write(\"smile: {}, J: {} \\n\".format(A[J.index(max(J))], max(J)))\n\n return A[J.index(max(J))], max(J), A\n\n\nif __name__ == \"__main__\":\n num_runs = 100\n counter = 0\n if not os.path.exists(os.path.join(THIS_DIR, \"results\")):\n os.mkdir(os.path.join(THIS_DIR, \"results\"))\n\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for smiles, j, all_smiles in tqdm(\n executor.map(run_random_experiment, range(num_runs)), total=num_runs\n ):\n print(\"Run: \", counter)\n wandb.log({\"J\": j})\n\n table.add_data(counter, smiles, j)\n counter += 1\n\n wandb.log({\"Table of best SMILES\": table})","sub_path":"experiments/experiment_1/random_baseline/random_baseline_extended_alphabet.py","file_name":"random_baseline_extended_alphabet.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"432440101","text":"import math\nmaior_x = 0\nmaior_erro = 0\nfor x in range(0,91):\n funcao = 4 * x * (180-x)/(40500 - x * (180 - x))\n python = math.sin(math.radians(x))\n diferenca = abs(funcao - python)\n if diferenca > maior_erro:\n maior_erro = diferenca\n maior_x = x\nprint(maior_x)\n \n\n \n ","sub_path":"backup/user_348/ch160_2020_06_17_13_10_50_082169.py","file_name":"ch160_2020_06_17_13_10_50_082169.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"310422135","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n#==============================================================================\n# QARTOD to TIME SERIES DATA\n#==============================================================================\n\n\nThis script selects all data available for SSB buoy and save as CSV and \nPICKLE, to be used as time series and to filter based on QCT already created\nfor the REAL TIME. This analysis is necessary because some QCT needs previous\ninformation (curve for TS, climatology) to evaluate data.\n\n\n@author: fncsobral\n@date : 21/nov/2016\n@mod1. : 07/dec/2016 - Huge changes, arranging SUNA date, verifying number of \n sample data for each hour.\n@mod2. : 17/jan/2017 - Arranging index DataFrame problem\n#==============================================================================\n\"\"\"\n\nimport sys\nsys.path.append('./TimeSeries/')\n\n\n\n##\nimport time\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime as dtm\nfrom filter_date import ver_continuity\n\n#==============================================================================\n# \n#==============================================================================\n# Loading data from spreadsheet from WQMX\nwqmx = pd.read_csv('./TimeSeries/simcosta_data/wqmx.csv', skiprows=12, delimiter=',')\n\n\n# Loading data from SUNA (Nitrate)\nsuna = pd.read_csv('./TimeSeries/simcosta_data/sunasdb.csv', skiprows=6 , delimiter=',')\n\n\n# Verifying and filtering date values for both WQMX and SUNA instruments.\n# WQMX_EPC and SUNA_EPC are dictionaries where the keys are the epoch date \n# related to the time sampling (HH:01:30).\nprint('Verifying Dates in WQMX'); time.sleep(3)\nwqmx_epc, new_df_wqmx = ver_continuity(wqmx)\nprint(''); print('')\n\n\nprint('Verifying Dates in SUNA'); time.sleep(3)\nsuna_epc, new_df_suna = ver_continuity(suna)\nprint(''); print('')\n\n\n\n# Selecting the epoch dates (that are the keys of wqmx and suna dictionaries) \n# to concatenate in one list, to have then all together. \n# Each key (as epoch date), contain all the values from time sample.\nwqmx_keys = np.sort([int(nn) for nn in wqmx_epc.keys() if nn != 'epc_start' and nn != 'epc_future'])\nsuna_keys = np.sort([int(nn) for nn in suna_epc.keys() if nn != 'epc_start' and nn != 'epc_future']) \n\n\n# Merging all the keys, eliminating repeated values and sorting ascending.\nall_keys = np.unique(np.concatenate((wqmx_keys, suna_keys), axis=0))\n\n\n# Getting the first (mn) and last (mx) time period.\nmn = all_keys[0 ]\nmx = all_keys[-1]\n\n\n# Hour guide epoch date list.\nmnmx = list(range(int(mn), int(mx), 3600))\n\n\n# Creating empty DataFrame to store all data together\ndf_final = pd.DataFrame(columns=['DateTime', 'EpochDate', 'Temperature', 'Salinity',\n 'CDOM' , 'Chlorophyl', 'Turbidity' , 'DO' , \n 'DOS' , 'Nitrate'])\n\n\n\n#==============================================================================\n# INSERTING VALUES FROM WQMX AND SUNA TOGETHER IN DATAFRAME\n#==============================================================================\n# ------------------------------------ WQMX --------------------------------- #\n# Names for the DF_FINAL\nvar_nms = ['Temperature', \n 'Salinity' , \n 'CDOM' ,\n 'Chlorophyl' ,\n 'Turbidity' ,\n 'DO' ,\n 'DOS' \n ]\n\n# Names as is on WQMX equipament.\nwqmx_varnm = ['T(W)' , \n 'SALINITY' , \n 'CDOM' , \n 'FLUOR' , \n 'NTU(700)' ,\n 'DO' ,\n 'OXSAT'\n ]\n\n\n# Loop in guide list with epoch dates. \nprint('Verifying amount sample data, calculating mean value [WQMX]')\ntime.sleep(3)\n\nfor i, item in enumerate(mnmx): \n # Inserting DateTime\n df_final.set_value(i, 'DateTime', dtm.utcfromtimestamp(item)) \n \n # Inserting EpochDate\n df_final.set_value(i, 'EpochDate', item)\n \n # Verificando os valores que existem em wqmx_epc\n try:\n # Getting all WQMX_EPC index for ITEM key.\n data_epc = wqmx_epc[str(item)]\n \n # Minimum of 10 samples of data colected for each hour. If less,\n # NaN is inserted. Otherwise, a mean value will be calculated.\n if len(data_epc) < 10:\n print('Insufficient Data Sampling Amount (', i,'). [WQMX]') \n # Inserting NaN in Variable columns in DataFrame.\n for nm in var_nms:\n df_final.set_value(i, nm, float('nan'))\n else:\n for nm_wqmx, nm_final in zip(wqmx_varnm, var_nms):\n # Getting all values from DATA_EPC index, and doing MEAN VALUE.\n mean_data = np.mean(new_df_wqmx[nm_wqmx].iloc[data_epc])\n \n # Inserting MEAN values in Dataframe\n df_final.set_value(i, nm_final, mean_data)\n \n # If epoch is not in data sampling period.\n except: \n print('Date non existant, with index ', i, ' and value of: ', item, ' [WQMX]')\n for nm in var_nms:\n df_final.set_value(i, nm, float('nan'))\n print('Data does not exist (', i,'). [WQMX]')\n \n\n \n# ----------------------------------- SUNA ---------------------------------- #\n# Same process applyed for WQMX\nprint(''); print('')\nprint('Verifying amount sample data, calculating mean value [SUNA]')\ntime.sleep(3)\n\nfor i, item in enumerate(mnmx):\n \n # The TRY condition is in case of epoch does not exist in this equipament.\n try:\n data_epc = suna_epc[str(item)]\n \n if len(data_epc) < 10:\n print('Insufficient Data Sampling Amount (', i,'). [SUNA]')\n\n # Inserting NaN in Variable columns in DataFrame.\n df_final.set_value(i, 'Nitrate', float('nan'))\n else:\n mean_data = np.mean(new_df_suna['NITRATE_UM'].iloc[data_epc])\n \n # Inserting MEAN values in Dataframe\n df_final.set_value(i, 'Nitrate', mean_data)\n except:\n print('Date non existant, with index ', i, ' and value of: ', item, ' [SUNA]')\n df_final.set_value(i, 'Nitrate', float('nan'))\n print('Data does not exist (', i,'). [SUNA]')\n\n\n\n# ================================ PLOTTING ================================= #\nfor nn in df_final.columns:\n if nn == 'DateTime' or nn == 'EpochDate':\n pass\n else:\n plt.figure()\n plt.plot(df_final[nn])\n plt.title(nn)\n plt.show() \n \ntime.sleep(2)\nprint('Closing all figures...')\nplt.close('all') \n \n# ============================ SAVING VARIABLES ============================= #\ndf_final.to_csv( './Data/timeSeries0.csv', sep=',')\ndf_final.to_pickle('./Data/timeSeries0.pkl') \n \n\n \n#==============================================================================\n# END \n#==============================================================================","sub_path":"TimeSeries/getting_data.py","file_name":"getting_data.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"92015803","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom scrapy import cmdline\nimport time\nimport subprocess\nimport sched, os\n\nif __name__ == \"__main__\":\n cmdline.execute('scrapy crawl jdSpider'.split())\n\n# 初始化sched模块的scheduler类\n# 第一个参数是一个可以返回时间戳的函数,第二个参数可以在定时未到达之前阻塞。\nschedule = sched.scheduler(time.time, time.sleep)\n\n\n# 被周期性调度触发的函数\ndef func():\n print(\"爬虫开始\")\n subprocess.Popen(\"scrapy crawl jdSpider\")\n\n\ndef perform1(inc):\n schedule.enter(inc, 0, perform1, (inc,))\n func() # 需要周期执行的函数\n\n\ndef mymain():\n schedule.enter(0, 0, perform1, (6*3600,))\n\n# if __name__ == \"__main__\":\n# mymain()\n# schedule.run() # 开始运行,直到计划时间队列变成空为止\n","sub_path":"spider/jdSpider/jdSpider/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"460196714","text":"import hashlib\nfrom program_manager import choices\nfrom dateutil.parser import parse as dateutil_parse\n\nclass Deduper(object):\n\n def __init__(self, dm_rules, sr_rules, sv_rules):\n self.dm_rules = dm_rules\n self.sr_rules = sr_rules\n self.sv_rules = sv_rules\n # these are the fields we'll use to match duplicates\n self.match_fields = []\n for dm in self.dm_rules:\n self.match_fields.append(dm.field.get_name())\n self.hashes = {}\n self.clusters = {}\n\n def digest_key_field(self, value):\n if not value:\n return None\n return hashlib.sha1(value.strip().lower()).digest()\n\n def feed_record(self, record):\n try:\n if record['IsConverted'] == 'true':\n # ignore converted leads\n return\n except KeyError:\n pass\n key = tuple((self.digest_key_field(record[f]) for f in self.match_fields))\n if None in key:\n return\n key = hashlib.sha1(''.join(key)).digest()\n if key in self.hashes:\n #print 'found duplicate: %s' % str(key)\n self.hashes[key].append(record['_id'])\n self.clusters[key] = self.hashes[key]\n else:\n self.hashes[key] = [record['_id']]\n\n def record_passes_sr_rule(self, cluster, record_index, rule):\n record = cluster[record_index]\n field = rule.field.get_name()\n value = record[field].lower()\n if rule.rule in ['LOWEST_VALUE', 'HIGHEST_VALUE']:\n try:\n value = float(value)\n except ValueError:\n return False\n if rule.rule == 'LOWEST_VALUE':\n other = min(cluster, key=lambda r: r[field])\n elif rule.rule == 'HIGHEST_VALUE':\n other = max(cluster, key=lambda r: r[field])\n return cluster.index(other) == record_index\n elif rule.rule in ['OLDEST', 'NEWEST']:\n def sort_key(record):\n return dateutil_parse(record[field])\n try:\n if rule.rule == 'OLDEST':\n other = min(cluster, key=sort_key)\n elif rule.rule == 'NEWEST':\n other = max(cluster, key=sort_key)\n if cluster.index(other) == record_index:\n return True\n else:\n return False\n except ValueError:\n return False\n elif rule.rule in ['TRUE', 'FALSE']:\n if rule.rule == 'TRUE':\n return rule.value == 'true'\n else:\n return rule.value == 'false'\n else:\n return self.compute_text_rule(rule, value)\n\n def compute_text_rule(self, rule, value):\n value = value.lower()\n rule.value = rule.value.lower()\n if rule.rule == choices.TEXT_EQUALS:\n return value == rule.value\n elif rule.rule == choices.TEXT_NOT_EQUALS:\n return value != rule.value\n elif rule.rule == choices.TEXT_CONTAINS:\n return rule.value in value\n elif rule.rule == choices.TEXT_STARTS:\n return value.startswith(rule.value)\n\n def get_canonical_field_value(self, cluster_data, field_id):\n canonical = cluster_data[0][field_id]\n for rule in reversed(self.sv_rules):\n if field_id != rule:\n continue\n if rule.rule in ['OLDEST', 'NEWEST']:\n if rule.rule == 'OLDEST':\n return min(cluster_data, key=self.get_record_last_modified)[field_id]\n elif rule.rule == 'NEWEST':\n return max(cluster_data, key=self.get_record_last_modified)[field_id]\n elif rule.rule in ['NUMADD', 'NUMAVG', 'NUMMAX', 'NUMMIN']:\n numbers = []\n for record in cluster_data:\n try:\n numbers.append(float(record[field_id]))\n except ValueError:\n continue\n if not numbers:\n return canonical\n if rule.rule == 'NUMADD':\n return sum(numbers)\n elif rule.rule == 'NUMAVG':\n return sum(numbers) / float(len(numbers))\n elif rule.rule == 'NUMMAX':\n return max(numbers)\n elif rule.rule == 'NUMMIN':\n return min(numbers)\n elif rule.rule == 'CONCAT':\n return '\\n\\n'.join([f[field_id] for f in cluster_data if f[field_id]])\n else:\n for record in cluster_data:\n value = record[field_id]\n if self.compute_text_rule(rule, value):\n return value\n return canonical\n\n def sort_cluster(self, cluster_data):\n def sort_key(record):\n return (tuple((self.record_passes_sr_rule(cluster_data,\n cluster_data.index(record), rule) for rule in self.sr_rules)) +\n (self.get_record_last_modified(record),))\n cluster_data = sorted(cluster_data, key=sort_key, reverse=True)\n return cluster_data\n\n def get_record_last_modified(self, record):\n last_modified = (record['CreatedDate'] or\n record['LastModifiedDate'])\n return dateutil_parse(last_modified)\n\n def get_canonical(self, cluster_data):\n master = cluster_data[0]\n canonical = dict(master)\n # process sv rules\n for key, field in canonical.viewitems():\n if key.startswith('_'):\n continue\n canonical[key] = self.get_canonical_field_value(cluster_data, key)\n # update null fields\n for secondary in cluster_data[1:]:\n for key, value in secondary.viewitems():\n if key.startswith('_'):\n continue\n if not canonical[key]:\n canonical[key] = value\n return canonical\n\n","sub_path":"operations/dedupe.py","file_name":"dedupe.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"298363273","text":"from django.core.management.base import BaseCommand, CommandError\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport re\nimport urllib\nimport datetime\nfrom django.conf import settings\nfrom selenium import webdriver\nfrom PIL import Image\n\n\nfrom finder.models import ItemCategory, Brand, FashionImage, FashionItem, Designer\n\nBASE_URL = \"http://bergdorfgoodman.com\"\nHEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'}\t\t\n\ndef get_categories(url):\n\t\"\"\"takes bergdorf url and returns dict of categories and their urls\"\"\"\n\tbase_url = 'http://bergdorfgoodman.com'\n\tresponse = requests.get(url)\n\tcategories_to_crawl = ['Dresses', 'Capes & Ponchos', 'Sweaters', 'Jackets & Vests', 'Skirts', 'Pants & Shorts', 'Jumpsuits']\n\tsoup = BeautifulSoup(response.text, 'html.parser')\n\tcategory_menu = soup.find_all(\"ul\", class_=\"category-menu\")\n\tcategory_list = {}\n\tfor each in category_menu:\n\t\tlinks = each.find_all('a', href=True)\n\t\tfor link in links:\n\t\t\turl = base_url + link['href']\n\t\t\tname = str(link.text).strip(' \\t\\n\\r')\n\t\t\tif name in categories_to_crawl:\n\t\t\t\tcategory_list[name] = url\n\t\t\telse:\n\t\t\t\tcontinue\n\treturn category_list\n\n\ndef get_designers(url):\n\t\"\"\"take bergdorf url and return dict of designer names with url to their products\"\"\"\n\tresponse = requests.get(url)\n\tsoup = BeautifulSoup(response.text, 'html.parser')\n\tdesigner_links = soup.find_all(\"div\", class_=\"designerlink\")\n\tdesigners = {}\n\tfor link in designer_links:\n\t\tdesigner_url = link.a['href']\n\t\tlink = str(link.a.text).strip(' \\t\\n\\r')\n\t\tif link[-3:] == \"NEW\":\n\t\t\tlink = link[:-3]\n\t\t\tlink = link.strip()\n\t\tdesigners[link] = BASE_URL + designer_url\n\treturn designers\n\n\ndef create_products(products, current_page, designer_url, cookies, category):\n\t\"\"\"takes in a string from beautifulsoup for bergdorf products and creates FashionItem objects\"\"\"\n\tfirst_image_name = ''\n\timage_two_name = ''\n\tcategory_name = category\n\tnext_page_url = ''\n\tfor p in products:\t\n\t\tprodu = p.find(\"div\", {\"class\": \"productname\"})\n\t\tprice = p.find(\"div\", {\"class\": \"product-price\"})\n\t\tprice = str(price.text).strip(' \\t\\n\\r')\n\t\turl = p.a.get('href')\n\t\tstart = url.index(\"cat\") + 3\n\t\tend = url.index(\"__\")\n\t\tcategory_number = url[start:end]\n\t\tnext_page = current_page + 1\n\t\tpagination_end_of_url = '#endecaDrivenSiloRefinements=navAction%%3Dindex&userConstrainedResults=true&refinements=&page=%s&pageSize=120&sort=&definitionPath=/nm/commerce/pagedef_rwd/template/EndecaDriven&onlineOnly=&allStoresInput=false&rwd=true&catalogId=cat%s' % (next_page, category_number)\n\t\tnext_page_url = designer_url + pagination_end_of_url\n\t\tproduct_url = BASE_URL + url\n\t\titem_response = requests.get(product_url)\n\t\t#getting item description from individual product url\n\t\titem_soup = BeautifulSoup(item_response.text, 'html.parser')\n\t\titem_description = item_soup.find_all(\"div\", class_=\"productCutline\")\n\t\titem_name = p.h2.text\n\t\titem_name = str(p.h2.text).strip(' \\t\\n\\r')\n\t\titem_name = item_name.replace('/', '')\n\t\tfirst_image_name = item_name.replace('/', '')\n\t\tprint(first_image_name)\n\t\timage = p.img.get(\"item-url\")\n\t\tif image:\n\t\t\tfirst_image_url = image\n\t\t\ttry:\n\t\t\t\tfirst_image_response = requests.get(first_image_url, HEADERS)\n\t\t\texcept requests.exceptions.ConnectionError as e:\n\t\t\t\tprint(e)\n\t\t\t\tpass\n\t\t\texcept requests.exceptions.MissingSchema:\n\t\t\t\tpass\n\t\tsecond_image = p.img.get(\"data-alt-image\")\n\t\tif second_image:\n\t\t\tsecond_image_url = second_image\n\t\t\ttry:\n\t\t\t\tsecond_image_response = requests.get(second_image_url)\n\t\t\t\timage_two_name = image_two_name.replace('\\\\', '')\n\t\t\t\timage_two_name = item_name + \"-2\"\n\t\t\texcept requests.exceptions.ConnectionError as e:\n\t\t\t\tprint(e)\n\t\t\t\tpass\n\t\t\texcept requests.exceptions.MissingSchema:\n\t\t\t\tpass\n\t\tif not image and not image_two:\n\t\t\tprint(\"no images\")\n\t\t\tpass\n\t\telse:\n\t\t\tprint(\"there are images, so creating fashion items\")\n\t\t\tfashion_item = FashionItem.objects.get_or_create(name=item_name)[0]\n\t\t\tcategories = fashion_item.determine_categories(category)\n\t\tif image:\n\t\t\timage_one = FashionImage.objects.get_or_create(image=first_image_name, date_scanned = datetime.datetime.now(), fashion_item=fashion_item)[0]\n\t\t\timage_one.create_thumbnail(first_image_name, first_image_response)\n\t\tif second_image:\n\t\t\timage_two = FashionImage.objects.get_or_create(image=image_two_name, date_scanned = datetime.datetime.now(), fashion_item=fashion_item)[0]\n\t\t\timage_two.create_thumbnail(image_two_name, second_image_response)\n\t\tdescription = ''\n\t\tfor item in item_description:\n\t\t\tif item.h2:\n\t\t\t\tdescription = item.h2.text\n\t\t\t\tfashion_item.description = description\t\t\t\t\n\t\t\telse:\n\t\t\t\tcontinue \n\t\tfashion_item.category_number = str(category_number)\n\t\tbergdorf = Brand.objects.get_or_create(brand_name = \"Bergdorf Goodman\")[0]\n\t\tfashion_item.brand.add(bergdorf)\n\t\tfashion_item.save()\n\t\tif image:\n\t\t\timage_one.save()\n\t\tif second_image:\n\t\t\timage_two.save()\t\n\treturn next_page_url\n\ndef get_items(url, category):\n\t\"\"\"takes a designer url and creates fashion items\"\"\"\n\tcurrent_page = 1\n\tdesigner_url = url\n\tresponse = requests.get(url)\n\tcookies = response.cookies\n\tpage_number = 1\n\tsoup = BeautifulSoup(response.text, 'html.parser')\n\tproducts = soup.find_all(\"li\", class_=\"category-item\")\n\tfashion_items = []\n\tpage_number = soup.find_all(\"li\", class_=\"pageOffset\")\n\tpages = []\n\tlast_page = 0\n\tcurrent_page = 1\n\tfor p in page_number:\n\t\tif int(p.text) <= last_page + 1:\n\t\t\tlast_page = int(p.text)\n\tnext_page_url = ''\n\tif last_page == 0:\n\t\tproducts = create_products(products, current_page, designer_url, cookies, category)\n\twhile current_page <= last_page:\n\t\tif current_page > 1:\n\n\t\t\tbrowser = webdriver.Firefox()\n\t\t\tbrowser.get(next_page_url)\n\t\t\tsoup = BeautifulSoup(browser.page_source, 'html.parser')\n\t\t\tproducts = soup.find_all(\"li\", class_=\"category-item\")\n\t\t\tnew_products = create_products(products, current_page, designer_url, cookies, category)\n\t\t\tprint(new_products)\n\t\t\tcurrent_page += 1\n\t\t\tbrowser.close()\n\t\telse:\n\t\t\tnew_products = create_products(products, current_page, designer_url, cookies, category)\n\n\t\t\tnext_page_url = new_products\n\t\t\tcurrent_page += 1\n\t\t\t\n\treturn fashion_items\n\t\t\nurl = 'http://www.bergdorfgoodman.com/Designers-A-Z/cat000001_cat000000/c.cat'\n\nclass Command(BaseCommand):\n\n\tdef handle(self, *args, **options):\n\t\tbergdorf = Brand.objects.get_or_create(brand_name=\"Bergdorf Goodman\")[0]\n\t\t#url = 'http://www.bergdorfgoodman.com/Designers-A-Z/cat000001_cat000000/c.cat'\n\t\tcategories_url = 'http://www.bergdorfgoodman.com/Designer-Collections/Categories/cat000009_cat000002_cat000000/c.cat'\n\t\tcategories = get_categories(categories_url)\n\t\tfor category in categories.items():\n\t\t\titems = get_items(category[1], category[0])\n\t\t\t\n\t\t\n\n\n\n","sub_path":"finder/management/commands/bergdorf.py","file_name":"bergdorf.py","file_ext":"py","file_size_in_byte":6713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"353162554","text":"\nimport menu.usermenu as usermenu\nimport sql \nmycursor=sql.mycursor\nmydb=sql.mydb\n\n\ndef make_booking(udft):\n l=len(udft)\n for i in range(l):\n mycursor.execute(\"INSERT INTO BOOKING VALUES\"+str(tuple(udft.loc[i,:])))\n mydb.commit()\n print(\"YOUR FLIGHT HAS BEEN BOOKED !\")\n input(\"> \")\n \n username = udft.at[0,'username']\n return usermenu.user_menu(username)\n\n\n\n\ndef payment(total_amount,udft):\n print(\"--WELCOME TO THE PAYMENT PORTAL--\")\n \n cc_no=int(input(\"ENTER CREDIT CARD NUMBER : \"))\n \n if len(str(cc_no)) != 16:\n print(\"enter correct credit card no. \")\n payment(total_amount,udft)\n \n udft.loc[:,[\"creditcard\"]]=str(cc_no)\n \n cvv=int(input(\"ENTER CREDIT CARD CVV (4 max): \"))\n if len(str(cvv)) > 4:\n print(\"enter correct cvv\")\n payment(total_amount,udft)\n input(\"ENTER CREDIT CARD EXPIRY (MM-YYYY): \") \n \n print(total_amount,\"+\",(18/100)*total_amount, \"(tax) : Rs is to be deducted \")\n random=input(\"PRESS ENTER TO AGREE >\")\n if random=='': \n make_booking(udft)\n else:\n payment(total_amount,udft)\n","sub_path":"user/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"299470947","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n'''\nclass Actor:\n Actor Module - Neural Network for approximating the policy\n'''\nclass Actor(nn.Module):\n def __init__(self, n_state, n_action):\n super(Actor, self).__init__()\n self.hidden1 = nn.Linear(n_state, 8)\n self.out = nn.Linear(8, n_action)\n\n def forward(self, x):\n x = torch.Tensor(x)\n x = self.hidden1(x)\n x = torch.sigmoid(x)\n x = self.out(x)\n out = F.softmax(x, dim=-1)\n # print(out)\n return out \n\n'''\nclass Critic:\n Critic Module - Neural Network for approximating the value\n'''\nclass Critic(nn.Module):\n def __init__(self, n_state):\n super(Critic, self).__init__()\n self.hidden1 = nn.Linear(n_state, 16)\n self.hidden2 = nn.Linear(16, 16)\n self.out = nn.Linear(16, 1)\n\n def forward(self, x):\n x = torch.Tensor(x)\n x = self.hidden1(x)\n x = torch.tanh(x)\n x = self.hidden2(x)\n x = torch.tanh(x)\n out = self.out(x)\n return out","sub_path":"RL/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"206066872","text":"import sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QInputDialog, QLineEdit, QListWidgetItem, QMenu\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import QSize, Qt, QDir\nfrom Client.Engine import client_file\nfrom Client.GUI import ui_main_panel\nimport os\n\n\nclass Client(QtWidgets.QMainWindow, ui_main_panel.Ui_FTPClient):\n def __init__(self):\n QtWidgets.QMainWindow.__init__(self)\n ui_main_panel.Ui_FTPClient.__init__(self)\n self.setupUi(self)\n self.setWindowTitle(\"Client FTP Server\")\n\n self.client = client_file.Client()\n self.path_to_file = \"\"\n self.path_to_copy = \"\"\n self.path_to_move_and_stage = [\"\", False]\n self.PWD = str(self.client.PWD())\n self.directory = \"\"\n self.files_in_dir = {}\n self.new_file_name = \"\"\n self._get_list_of_files()\n\n self.lineEdit_directory.setText(self.PWD)\n self.lineEdit_directory.textChanged.connect(self.handle_edited_text)\n self.lineEdit_directory.editingFinished.connect(self.handle_editing_finished)\n self.pushButton_back.clicked.connect(self.handle_clicked_back_button)\n self.list_of_files.doubleClicked.connect(self.double_clicked_list_widget)\n self.list_of_files.currentRowChanged.connect(self.clicked_list_widget)\n\n def clicked_list_widget(self, index):\n try:\n dir = self.list_of_files.currentItem().text()\n self.path_to_file = self.PWD + \"/\" + dir\n except AttributeError:\n self.path_to_file = \"ERROR\"\n\n def double_clicked_list_widget(self):\n dir = self.list_of_files.currentItem().text()\n self._check_if_dir_exist(self.PWD + \"/\" + dir)\n\n def handle_edited_text(self, dir):\n self.directory = dir\n\n def handle_editing_finished(self):\n if self.lineEdit_directory.isModified():\n self._check_if_dir_exist(self.directory)\n self.lineEdit_directory.setModified(False)\n\n def _check_if_dir_exist(self, directory):\n self.client.CD(directory)\n self.PWD = str(self.client.PWD())\n self._get_list_of_files()\n self.lineEdit_directory.setText(self.PWD)\n\n def handle_clicked_back_button(self):\n dir = self.PWD.rsplit('/', 1)\n self.directory = dir[0]\n self._check_if_dir_exist(self.directory)\n\n def _get_list_of_files(self):\n self.list_of_files.clear()\n files_in_dir = self.client.LS()\n self.files_in_dir = files_in_dir.split('\\t')\n\n if not self.files_in_dir[0] == '':\n for file in self.files_in_dir:\n file_after = self.kill_char(file, len(file)-1)\n itm = QListWidgetItem(file_after)\n\n if file[-1] == \"0\":\n itm.setIcon(QIcon(r\"Icons/folder.ico\"))\n else:\n file = self.kill_char(file, len(file) - 1)\n try:\n after_dot = (file.split(\".\"))[1]\n if os.path.isfile(\"Icons/\"+after_dot+\".png\"):\n itm.setIcon(QIcon(r\"Icons/\"+after_dot+\".png\"))\n else:\n itm.setIcon(QIcon(r\"Icons/unknown.png\"))\n except IndexError:\n itm.setIcon(QIcon(r\"Icons/unknown.png\"))\n self.list_of_files.addItem(itm)\n self.list_of_files.setIconSize(QSize(25, 25))\n\n def mousePressEvent(self, QMouseEvent):\n self.list_of_files.clearSelection()\n\n def closeEvent(self, event):\n self.client.QUIT(\"Y\")\n event.accept() # let the window close\n\n def contextMenuEvent(self, event):\n menu = QMenu(self)\n copy = menu.addAction(\"Copy\")\n paste = menu.addAction(\"Paste\")\n delete = menu.addAction(\"Delete\")\n rename = menu.addAction(\"Rename\")\n new_folder = menu.addAction(\"New folder\")\n download = menu.addAction(\"Download\")\n upload = menu.addAction(\"Upload here\")\n move = menu.addAction(\"Move it\")\n if self.path_to_move_and_stage[1]:\n move.setText(\"Move here\")\n\n action = menu.exec_(self.mapToGlobal(event.pos()))\n try:\n if action == copy:\n self.path_to_copy = self.path_to_file\n elif action == paste:\n self.client.CP(self.path_to_copy + \" \" + self.PWD)\n elif action == delete:\n if self.client.ISFILE(self.path_to_file) == \"FILE\": #FIX HERE\n self.client.RM(self.path_to_file)\n else:\n self.client.RMDIR(self.path_to_file)\n elif action == rename:\n self.edit_file_name()\n elif action == new_folder:\n self.add_folder_here()\n elif action == download:\n self.download_file()\n elif action == upload:\n self.upload_file()\n elif action == move:\n if self.path_to_move_and_stage[1]:\n self.move_dir_here()\n else:\n self.copy_dir_to_move()\n finally:\n self._get_list_of_files()\n\n def copy_dir_to_move(self):\n self.path_to_move_and_stage[0] = self.path_to_file\n self.path_to_move_and_stage[1] = True\n\n def move_dir_here(self):\n self.client.MV(self.path_to_move_and_stage[0] + \" \" + self.PWD + \"/\" + self.get_name_file(self.path_to_move_and_stage[0]))\n self.path_to_move_and_stage[1] = False\n\n def download_file(self):\n file = self.get_name_file(self.path_to_file)\n dialog = QtWidgets.QFileDialog\n path = dialog.getSaveFileName(self, \"Download \"+ file, QDir.homePath())\n if not path[0] == '':\n self.client.DOWNLOAD(self.path_to_file + \" \" + path[0])\n\n def upload_file(self):\n dialog = QtWidgets.QFileDialog\n file = self.get_name_file(self.PWD)\n path = dialog.getOpenFileName(self, \"Upload to \" + file, QDir.homePath())\n if not path[0] == '':\n self.client.UPLOAD(path[0] + \" \" + self.PWD+\"/\"+self.get_name_file(path[0]))\n\n def get_name_file(self, dir):\n return dir.split(\"/\")[-1]\n\n def name_of_focus_element(self):\n name = self.path_to_file.split('/')\n return name[-1]\n\n def edit_file_name(self):\n text, okPressed = QInputDialog.getText(self, \" \", \"Rename to:\", QLineEdit.Normal, self.name_of_focus_element())\n if okPressed and text != '':\n self.client.RENAME(self.path_to_file + \" \" + self.PWD+\"/\"+text)\n\n def add_folder_here(self):\n text, okPressed = QInputDialog.getText(self, \" \", \"Add folder\", QLineEdit.Normal, self.name_of_focus_element())\n if okPressed and text != '':\n self.client.MKDIR(text)\n\n def kill_char(self, string, n): # n = position of which character you want to remove\n begin = string[:n] # from beginning to n (n not included)\n end = string[n+1:] # n+1 through end of string\n return begin + end\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n window = Client()\n window.show()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()","sub_path":"Client/GUI/main_panel.py","file_name":"main_panel.py","file_ext":"py","file_size_in_byte":7174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"400661926","text":"import math\n\nmean = lambda x : sum(x)/len(x)\nstd = lambda y : (sum([(z-mean(y))**2 for z in y])/(len(y)-1))**0.5\ne_dist = lambda x, y : sum([(x[z]-y[z])**2 for z in range(0,len(x))])**0.5\n\nclass County:\n def __init__(self, name, values):\n self.name = name\n self.values = values\n def distance(self, othervals):\n dist = 0\n for i in range(len(self.values)):\n dist += abs(self.values[i]-othervals[i])\n return dist\n\nclass Cluster:\n def __init__(self):\n self.centroid = []\n self.contents = []\n def updateCentroid(self):\n if self.contents!=[]:\n self.centroid = [mean([y.values[x] for y in self.contents]) for x in range(0,len(self.centroid))]\n def names(self):\n names = \"\"\n for c in self.contents:\n names += c.name + \"; \"\n return names\n def clear(self):\n self.contents = []\n\ndef readData(filename):\n in_file = open(filename, \"r\").readlines()\n in_file = [x.strip(\"\\n\").split(\";\") for x in in_file]\n header = in_file[0]\n in_file = in_file[1:]\n county_names = [y[0] for y in in_file]\n county_info = [[float(z[a]) for a in range(2,19)] for z in in_file]\n county_info = [[rows[b] if b!=1 else rows[1]/rows[16] for b in range(0,16)] for rows in county_info]\n counties = [County(county_names[n], county_info[n]) for n in range(0,len(county_names))]\n return counties\n\ndef normalizeCounties(counties):\n mean = lambda x : sum(x)/len(x)\n std = lambda y : (sum([(z-mean(y))**2 for z in y])/(len(y)))**0.5\n rot_df = [[y.values[x] for y in counties] for x in range (0,16)]\n global_means = [mean(z) for z in rot_df]\n global_stds = [std(a) for a in rot_df]\n rot_df = [[(c-global_means[b])/global_stds[b] for c in rot_df[b]] for b in range (0,16)]\n for d in range(0, len(counties)):\n counties[d].values = [rot_df[e][d] for e in range(0,16)]\n counties[:] = counties\n\ndef initClusters(counties, num):\n clusters = []\n for i in range(num):\n newcluster = Cluster()\n newcluster.centroid = counties[i].values[:]\n clusters.append(newcluster)\n return clusters\n\ndef placeCounties(counties, clusters):\n cluster_centers = [y.centroid for y in clusters]\n for x in counties:\n e_dist = lambda y : sum([abs(x.values[z]-y[z]) for z in range(0,len(x.values))])\n clusters[cluster_centers.index(min(cluster_centers, key=e_dist))].contents.append(x) # find closest\n clusters[:] = clusters\n\ndef updateCentroids(clusters):\n for c in clusters:\n c.updateCentroid()\n\ndef clearClusters(clusters):\n for c in clusters:\n c.clear()\n\ndef writeOutput(clusters, filename):\n #your code here (replace \"pass\" with code)\n output = []\n for x in range(0,len(clusters)):\n cluster = clusters[x]\n output.append(\"Cluster \" + str(x+1) + \"\\n\")\n output.append(\"size: \" + str(len(cluster.contents)) + \"\\n\")\n output.append(cluster.names() + \"\\n\")\n output.append(\"\\n\")\n out_file = open(filename, \"w\")\n out_file.writelines(output)\n out_file.close()\n\ndef kmeans(infile, outfile, k, cycles):\n counties = readData(infile)\n normalizeCounties(counties)\n clusters = initClusters(counties, k)\n for i in range(cycles):\n clearClusters(clusters)\n placeCounties(counties, clusters)\n updateCentroids(clusters)\n writeOutput(clusters, outfile)\n\n# You can use the line below to test your kmeans code once you've\n# completed the coding for all five exercises. Compare the text file\n# \"output.txt\" produced by the code below against the sample output\n# file called \"output30x120.txt\" included in this folder.\n#\n# Please comment out this line when you hand in your code for each\n# exercise, otherwise our tests might time out, taking too long to\n# load your code. (Our tests use other means to verify your code.)\n#\n\n#kmeans(\"counties.txt\", \"output.txt\", 30, 120)\n","sub_path":"Homeworks/HW12/counties.py","file_name":"counties.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"393246070","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pylinkirc/plugins/networks.py\n# Compiled at: 2020-04-11 03:31:40\n# Size of source mod 2**32: 8215 bytes\n__doc__ = 'Networks plugin - allows you to manipulate connections to various configured networks.'\nimport importlib, threading, types, pylinkirc\nfrom pylinkirc import utils, world\nfrom pylinkirc.coremods import control, permissions\nfrom pylinkirc.log import log\nREMOTE_IN_USE = threading.Event()\n\n@utils.add_cmd\ndef disconnect(irc, source, args):\n \"\"\"\n\n Disconnects the network . When all networks are disconnected, PyLink will automatically exit.\n\n To reconnect a network disconnected using this command, use REHASH to reload the networks list.\"\"\"\n permissions.check_permissions(irc, source, ['networks.disconnect'])\n try:\n netname = args[0]\n network = world.networkobjects[netname]\n except IndexError:\n irc.error('Not enough arguments (needs 1: network name (case sensitive)).')\n return\n except KeyError:\n irc.error('No such network \"%s\" (case sensitive).' % netname)\n return\n else:\n if network.has_cap('virtual-server'):\n irc.error('\"%s\" is a virtual server and cannot be directly disconnected.' % netname)\n return\n log.info('Disconnecting network %r per %s', netname, irc.get_hostmask(source))\n control.remove_network(network)\n irc.reply(\"Done. If you want to reconnect this network, use the 'rehash' command.\")\n\n\n@utils.add_cmd\ndef autoconnect(irc, source, args):\n \"\"\" \n\n Sets the autoconnect time for to .\n You can disable autoconnect for a network by setting to a negative value.\"\"\"\n permissions.check_permissions(irc, source, ['networks.autoconnect'])\n try:\n netname = args[0]\n seconds = float(args[1])\n network = world.networkobjects[netname]\n except IndexError:\n irc.error('Not enough arguments (needs 2: network name (case sensitive), autoconnect time (in seconds)).')\n return\n except KeyError:\n irc.error('No such network \"%s\" (case sensitive).' % netname)\n return\n except ValueError:\n irc.error('Invalid argument \"%s\" for .' % seconds)\n return\n else:\n network.serverdata['autoconnect'] = seconds\n irc.reply('Done.')\n\n\nremote_parser = utils.IRCParser()\nremote_parser.add_argument('--service', type=str, default='pylink')\nremote_parser.add_argument('network')\nremote_parser.add_argument('command', nargs=(utils.IRCParser.REMAINDER))\n\n@utils.add_cmd\ndef remote(irc, source, args):\n \"\"\"[--service ] \n\n Runs on the remote network . Plugin responses sent using irc.reply() are\n supported and returned here, but others are dropped due to protocol limitations.\"\"\"\n global REMOTE_IN_USE\n args = remote_parser.parse_args(args)\n if not args.command:\n irc.error('No command given!')\n return\n netname = args.network\n permissions.check_permissions(irc, source, [\n 'networks.remote',\n 'networks.remote.%s' % netname,\n 'networks.remote.%s.%s' % (netname, args.service),\n 'networks.remote.%s.%s.%s' % (netname, args.service, args.command[0])])\n if REMOTE_IN_USE.is_set():\n irc.error(\"The 'remote' command can not be nested.\")\n return\n REMOTE_IN_USE.set()\n if netname == irc.name:\n irc.error('Cannot remote-send a command to the local network; use a normal command!')\n REMOTE_IN_USE.clear()\n return\n try:\n remoteirc = world.networkobjects[netname]\n except KeyError:\n irc.error('No such network %r (case sensitive).' % netname)\n REMOTE_IN_USE.clear()\n return\n else:\n if args.service not in world.services:\n irc.error('Unknown service %r.' % args.service)\n REMOTE_IN_USE.clear()\n return\n elif not remoteirc.connected.is_set():\n irc.error('Network %r is not connected.' % netname)\n REMOTE_IN_USE.clear()\n return\n else:\n if not world.services[args.service].uids.get(netname):\n irc.error('The requested service %r is not available on %r.' % (args.service, netname))\n REMOTE_IN_USE.clear()\n return\n try:\n remoteirc.called_in = remoteirc.called_by = remoteirc.pseudoclient.uid\n remoteirc.pseudoclient.account = irc.users[source].account\n except:\n REMOTE_IN_USE.clear()\n raise\n\n def _remote_reply(placeholder_self, text, **kwargs):\n assert irc.name != placeholder_self.name, 'Refusing to route reply back to the same network, as this would cause a recursive loop'\n log.debug('(%s) networks.remote: re-routing reply %r from network %s', irc.name, text, placeholder_self.name)\n if 'source' in kwargs:\n del kwargs['source']\n (irc.reply)(text, source=irc.pseudoclient.uid, **kwargs)\n\n old_reply = remoteirc._reply\n with remoteirc._reply_lock:\n try:\n log.debug('(%s) networks.remote: overriding reply() of IRC object %s', irc.name, netname)\n remoteirc._reply = types.MethodType(_remote_reply, remoteirc)\n world.services[args.service].call_cmd(remoteirc, remoteirc.pseudoclient.uid, ' '.join(args.command))\n finally:\n log.debug('(%s) networks.remote: restoring reply() of IRC object %s', irc.name, netname)\n remoteirc._reply = old_reply\n try:\n remoteirc.pseudoclient.account = ''\n except:\n log.warning('(%s) networks.remote: failed to restore pseudoclient account for %s; did the remote network disconnect while running this command?', irc.name, netname)\n\n REMOTE_IN_USE.clear()\n\n\n@utils.add_cmd\ndef reloadproto(irc, source, args):\n \"\"\"\n\n Reloads the given protocol module without restart. You will have to manually disconnect and reconnect any network using the module for changes to apply.\"\"\"\n permissions.check_permissions(irc, source, ['networks.reloadproto'])\n try:\n name = args[0]\n except IndexError:\n irc.error('Not enough arguments (needs 1: protocol module name)')\n return\n else:\n importlib.reload(pylinkirc.classes)\n log.debug('networks.reloadproto: reloading %s', pylinkirc.classes)\n for common_name in pylinkirc.protocols.common_modules:\n module = utils._get_protocol_module(common_name)\n log.debug('networks.reloadproto: reloading %s', module)\n importlib.reload(module)\n\n proto = utils._get_protocol_module(name)\n log.debug('networks.reloadproto: reloading %s', proto)\n importlib.reload(proto)\n irc.reply('Done. You will have to manually disconnect and reconnect any network using the %r module for changes to apply.' % name)","sub_path":"pycfiles/pylinkmobile-0.3.0.tar/networks.cpython-36.py","file_name":"networks.cpython-36.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"486062422","text":"# Copyright 2020-2021 Canonical Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For further info, check https://github.com/canonical/charmcraft\n\n\"\"\"Infrastructure for the 'build' command.\"\"\"\n\nimport errno\nimport logging\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nimport zipfile\n\nimport yaml\n\nfrom charmcraft.cmdbase import BaseCommand, CommandError\nfrom charmcraft.jujuignore import JujuIgnore, default_juju_ignore\nfrom charmcraft.utils import make_executable, create_manifest\n\nlogger = logging.getLogger(__name__)\n\n# Some constants that are used through the code.\nCHARM_METADATA = \"metadata.yaml\"\nBUILD_DIRNAME = \"build\"\nVENV_DIRNAME = \"venv\"\n\n# The file name and template for the dispatch script\nDISPATCH_FILENAME = \"dispatch\"\n# If Juju doesn't support the dispatch mechanism, it will execute the\n# hook, and we'd need sys.argv[0] to be the name of the hook but it's\n# geting lost by calling this dispatch, so we fake JUJU_DISPATCH_PATH\n# to be the value it would've otherwise been.\nDISPATCH_CONTENT = \"\"\"#!/bin/sh\n\nJUJU_DISPATCH_PATH=\"${{JUJU_DISPATCH_PATH:-$0}}\" PYTHONPATH=lib:venv ./{entrypoint_relative_path}\n\"\"\"\n\n# The minimum set of hooks to be provided for compatibility with old Juju\nMANDATORY_HOOK_NAMES = {\"install\", \"start\", \"upgrade-charm\"}\nHOOKS_DIR = \"hooks\"\n\n\ndef _pip_needs_system():\n \"\"\"Determine whether pip3 defaults to --user, needing --system to turn it off.\"\"\"\n cmd = [\n \"python3\",\n \"-c\",\n (\n \"from pip.commands.install import InstallCommand; \"\n 'assert InstallCommand().cmd_opts.get_option(\"--system\") is not None'\n ),\n ]\n proc = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n return proc.returncode == 0\n\n\ndef polite_exec(cmd):\n \"\"\"Execute a command, only showing output if error.\"\"\"\n logger.debug(\"Running external command %s\", cmd)\n try:\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n except Exception as err:\n logger.error(\"Executing %s crashed with %r\", cmd, err)\n return 1\n\n for line in proc.stdout:\n logger.debug(\":: %s\", line.rstrip())\n retcode = proc.wait()\n\n if retcode:\n logger.error(\"Executing %s failed with return code %d\", cmd, retcode)\n return retcode\n\n\ndef relativise(src, dst):\n \"\"\"Build a relative path from src to dst.\"\"\"\n return pathlib.Path(os.path.relpath(str(dst), str(src.parent)))\n\n\nclass Builder:\n \"\"\"The package builder.\"\"\"\n\n def __init__(self, args, config):\n self.charmdir = args[\"from\"]\n self.entrypoint = args[\"entrypoint\"]\n self.requirement_paths = args[\"requirement\"]\n\n self.buildpath = self.charmdir / BUILD_DIRNAME\n self.ignore_rules = self._load_juju_ignore()\n self.config = config\n\n def run(self):\n \"\"\"Build the charm.\"\"\"\n logger.debug(\"Building charm in '%s'\", self.buildpath)\n\n if self.buildpath.exists():\n shutil.rmtree(str(self.buildpath))\n self.buildpath.mkdir()\n\n create_manifest(self.buildpath, self.config.project.started_at)\n\n linked_entrypoint = self.handle_generic_paths()\n self.handle_dispatcher(linked_entrypoint)\n self.handle_dependencies()\n zipname = self.handle_package()\n\n logger.info(\"Created '%s'.\", zipname)\n return zipname\n\n def _load_juju_ignore(self):\n ignore = JujuIgnore(default_juju_ignore)\n path = self.charmdir / \".jujuignore\"\n if path.exists():\n with path.open(\"r\", encoding=\"utf-8\") as ignores:\n ignore.extend_patterns(ignores)\n return ignore\n\n def create_symlink(self, src_path, dest_path):\n \"\"\"Create a symlink in dest_path pointing relatively like src_path.\n\n It also verifies that the linked dir or file is inside the project.\n \"\"\"\n resolved_path = src_path.resolve()\n if self.charmdir in resolved_path.parents:\n relative_link = relativise(src_path, resolved_path)\n dest_path.symlink_to(relative_link)\n else:\n rel_path = src_path.relative_to(self.charmdir)\n logger.warning(\n \"Ignoring symlink because targets outside the project: '%s'\", rel_path\n )\n\n def handle_generic_paths(self):\n \"\"\"Handle all files and dirs except what's ignored and what will be handled later.\n\n Works differently for the different file types:\n - regular files: hard links\n - directories: created\n - symlinks: respected if are internal to the project\n - other types (blocks, mount points, etc): ignored\n \"\"\"\n logger.debug(\"Linking in generic paths\")\n\n for basedir, dirnames, filenames in os.walk(\n str(self.charmdir), followlinks=False\n ):\n abs_basedir = pathlib.Path(basedir)\n rel_basedir = abs_basedir.relative_to(self.charmdir)\n\n # process the directories\n ignored = []\n for pos, name in enumerate(dirnames):\n rel_path = rel_basedir / name\n abs_path = abs_basedir / name\n\n if self.ignore_rules.match(str(rel_path), is_dir=True):\n logger.debug(\"Ignoring directory because of rules: '%s'\", rel_path)\n ignored.append(pos)\n elif abs_path.is_symlink():\n dest_path = self.buildpath / rel_path\n self.create_symlink(abs_path, dest_path)\n else:\n dest_path = self.buildpath / rel_path\n dest_path.mkdir(mode=abs_path.stat().st_mode)\n\n # in the future don't go inside ignored directories\n for pos in reversed(ignored):\n del dirnames[pos]\n\n # process the files\n for name in filenames:\n rel_path = rel_basedir / name\n abs_path = abs_basedir / name\n\n if self.ignore_rules.match(str(rel_path), is_dir=False):\n logger.debug(\"Ignoring file because of rules: '%s'\", rel_path)\n elif abs_path.is_symlink():\n dest_path = self.buildpath / rel_path\n self.create_symlink(abs_path, dest_path)\n elif abs_path.is_file():\n dest_path = self.buildpath / rel_path\n try:\n os.link(str(abs_path), str(dest_path))\n except PermissionError:\n # when not allowed to create hard links\n shutil.copy2(str(abs_path), str(dest_path))\n except OSError as e:\n if e.errno != errno.EXDEV:\n raise\n shutil.copy2(str(abs_path), str(dest_path))\n else:\n logger.debug(\"Ignoring file because of type: '%s'\", rel_path)\n\n # the linked entrypoint is calculated here because it's when it's really in the build dir\n linked_entrypoint = self.buildpath / self.entrypoint.relative_to(self.charmdir)\n return linked_entrypoint\n\n def handle_dispatcher(self, linked_entrypoint):\n \"\"\"Handle modern and classic dispatch mechanisms.\"\"\"\n # dispatch mechanism, create one if wasn't provided by the project\n dispatch_path = self.buildpath / DISPATCH_FILENAME\n if not dispatch_path.exists():\n logger.debug(\"Creating the dispatch mechanism\")\n dispatch_content = DISPATCH_CONTENT.format(\n entrypoint_relative_path=linked_entrypoint.relative_to(self.buildpath)\n )\n with dispatch_path.open(\"wt\", encoding=\"utf8\") as fh:\n fh.write(dispatch_content)\n make_executable(fh)\n\n # bunch of symlinks, to support old juju: verify that any of the already included hooks\n # in the directory is not linking directly to the entrypoint, and also check all the\n # mandatory ones are present\n dest_hookpath = self.buildpath / HOOKS_DIR\n if not dest_hookpath.exists():\n dest_hookpath.mkdir()\n\n # get those built hooks that we need to replace because they are pointing to the\n # entrypoint directly and we need to fix the environment in the middle\n current_hooks_to_replace = []\n for node in dest_hookpath.iterdir():\n if node.resolve() == linked_entrypoint:\n current_hooks_to_replace.append(node)\n node.unlink()\n logger.debug(\n \"Replacing existing hook %r as it's a symlink to the entrypoint\",\n node.name,\n )\n\n # include the mandatory ones and those we need to replace\n hooknames = MANDATORY_HOOK_NAMES | {x.name for x in current_hooks_to_replace}\n for hookname in hooknames:\n logger.debug(\"Creating the %r hook script pointing to dispatch\", hookname)\n dest_hook = dest_hookpath / hookname\n if not dest_hook.exists():\n relative_link = relativise(dest_hook, dispatch_path)\n dest_hook.symlink_to(relative_link)\n\n def handle_dependencies(self):\n \"\"\"Handle from-directory and virtualenv dependencies.\"\"\"\n logger.debug(\"Installing dependencies\")\n\n # virtualenv with other dependencies (if any)\n if self.requirement_paths:\n retcode = polite_exec([\"pip3\", \"list\"])\n if retcode:\n raise CommandError(\"problems using pip\")\n\n venvpath = self.buildpath / VENV_DIRNAME\n cmd = [\n \"pip3\",\n \"install\", # base command\n \"--target={}\".format(\n venvpath\n ), # put all the resulting files in that specific dir\n ]\n if _pip_needs_system():\n logger.debug(\"adding --system to work around pip3 defaulting to --user\")\n cmd.append(\"--system\")\n for reqspath in self.requirement_paths:\n cmd.append(\n \"--requirement={}\".format(reqspath)\n ) # the dependencies file(s)\n retcode = polite_exec(cmd)\n if retcode:\n raise CommandError(\"problems installing dependencies\")\n\n def handle_package(self):\n \"\"\"Handle the final package creation.\"\"\"\n logger.debug(\"Parsing the project's metadata\")\n with (self.charmdir / CHARM_METADATA).open(\"rt\", encoding=\"utf8\") as fh:\n metadata = yaml.safe_load(fh)\n\n logger.debug(\"Creating the package itself\")\n zipname = metadata[\"name\"] + \".charm\"\n zipfh = zipfile.ZipFile(zipname, \"w\", zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(self.buildpath, followlinks=True):\n dirpath = pathlib.Path(dirpath)\n for filename in filenames:\n filepath = dirpath / filename\n zipfh.write(str(filepath), str(filepath.relative_to(self.buildpath)))\n\n zipfh.close()\n return zipname\n\n\nclass Validator:\n \"\"\"A validator of all received options.\"\"\"\n\n _options = [\n \"from\", # this needs to be processed first, as it's a base dir to find other files\n \"entrypoint\",\n \"requirement\",\n ]\n\n def __init__(self):\n self.basedir = None # this will be fulfilled when processing 'from'\n\n def process(self, parsed_args):\n \"\"\"Process the received options.\"\"\"\n result = {}\n for opt in self._options:\n meth = getattr(self, \"validate_\" + opt)\n result[opt] = meth(getattr(parsed_args, opt, None))\n return result\n\n def validate_from(self, dirpath):\n \"\"\"Validate that the charm dir is there and yes, a directory.\"\"\"\n if dirpath is None:\n dirpath = pathlib.Path.cwd()\n else:\n dirpath = dirpath.expanduser().absolute()\n\n if not dirpath.exists():\n raise CommandError(\n \"Charm directory was not found: {!r}\".format(str(dirpath))\n )\n if not dirpath.is_dir():\n raise CommandError(\n \"Charm directory is not really a directory: {!r}\".format(str(dirpath))\n )\n\n self.basedir = dirpath\n return dirpath\n\n def validate_entrypoint(self, filepath):\n \"\"\"Validate that the entrypoint exists and is executable.\"\"\"\n if filepath is None:\n filepath = self.basedir / \"src\" / \"charm.py\"\n else:\n filepath = filepath.expanduser().absolute()\n\n if not filepath.exists():\n raise CommandError(\n \"Charm entry point was not found: {!r}\".format(str(filepath))\n )\n if self.basedir not in filepath.parents:\n raise CommandError(\n \"Charm entry point must be inside the project: {!r}\".format(\n str(filepath)\n )\n )\n if not os.access(filepath, os.X_OK):\n raise CommandError(\n \"Charm entry point must be executable: {!r}\".format(str(filepath))\n )\n return filepath\n\n def validate_requirement(self, filepaths):\n \"\"\"Validate that the given requirement(s) (if any) exist.\n\n If not specified, default to requirements.txt if there.\n \"\"\"\n if filepaths is None:\n req = self.basedir / \"requirements.txt\"\n if req.exists() and os.access(req, os.R_OK):\n return [req]\n return []\n\n filepaths = [x.expanduser().absolute() for x in filepaths]\n for fpath in filepaths:\n if not fpath.exists():\n raise CommandError(\n \"the requirements file was not found: {!r}\".format(str(fpath))\n )\n return filepaths\n\n\n_overview = \"\"\"\nBuild a charm operator package.\n\nYou can `juju deploy` the resulting `.charm` file directly, or upload it\nto Charmhub with `charmcraft upload`.\n\nYou must be inside a charm directory with a valid `metadata.yaml`,\n`requirements.txt` including the `ops` package for the Python operator\nframework, and an operator entrypoint, usually `src/charm.py`.\n\nSee `charmcraft init` to create a template charm directory structure.\n\"\"\"\n\n\nclass BuildCommand(BaseCommand):\n \"\"\"Build the charm.\"\"\"\n\n name = \"build\"\n help_msg = \"Build the charm\"\n overview = _overview\n common = True\n\n def fill_parser(self, parser):\n \"\"\"Add own parameters to the general parser.\"\"\"\n parser.add_argument(\n \"-f\",\n \"--from\",\n type=pathlib.Path,\n help=\"Charm directory with metadata.yaml where the build \"\n \"takes place; defaults to '.'\",\n )\n parser.add_argument(\n \"-e\",\n \"--entrypoint\",\n type=pathlib.Path,\n help=\"The executable which is the operator entry point; \"\n \"defaults to 'src/charm.py'\",\n )\n parser.add_argument(\n \"-r\",\n \"--requirement\",\n action=\"append\",\n type=pathlib.Path,\n help=\"File(s) listing needed PyPI dependencies (can be used multiple \"\n \"times); defaults to 'requirements.txt'\",\n )\n\n def run(self, parsed_args):\n \"\"\"Run the command.\"\"\"\n validator = Validator()\n args = validator.process(parsed_args)\n logger.debug(\"working arguments: %s\", args)\n builder = Builder(args, self.config)\n builder.run()\n","sub_path":"charmcraft/commands/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":16128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"115722022","text":"import random\r\nfrom images import stages\r\nfrom words import word_list, logo\r\n\r\n#word_list = [\"one\", \"two\", \"tree\"]\r\n\r\n\r\ndef hamgman_game():\r\n chosen_word = random.choice(word_list)\r\n wordlenth = len(chosen_word)\r\n display = []\r\n print(logo)\r\n\r\n for letter in range(wordlenth):\r\n display += '_'\r\n print(display)\r\n\r\n end_of_game = False\r\n lives = 6\r\n\r\n while end_of_game != True:\r\n guess = input(\"guess the letters for this word\\n\").lower()\r\n if guess in display:\r\n print(f\"you have already guessed this word {guess}, try another one.\")\r\n for position in range(wordlenth):\r\n letter = chosen_word[position]\r\n if letter == guess:\r\n display[position] = letter\r\n if guess not in chosen_word:\r\n lives -= 1\r\n print(f'You have guess a wrong letter you loose a life. now you have {lives} left')\r\n image = stages[lives]\r\n print(image)\r\n print(display)\r\n\r\n if \"_\" not in display:\r\n end_of_game = True\r\n print(\"Your Won!\")\r\n else:\r\n if lives == 0:\r\n end_of_game = True\r\n print('YOU LOSE!')\r\n\r\n start_again = input('Would you like to play again? please input \"y\" for yes or \"n\" for no ').lower()\r\n if start_again == 'y':\r\n hamgman_game()\r\n else:\r\n print('😎 Thank you for playing 🤗')\r\n\r\n\r\nhamgman_game()","sub_path":"hangman/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"132509523","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Error',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('kind', models.CharField(db_index=True, max_length=128, null=True, verbose_name='type', blank=True)),\n ('info', models.TextField()),\n ('data', models.TextField(null=True, blank=True)),\n ('path', models.URLField(null=True, blank=True)),\n ('when', models.DateTimeField(auto_now_add=True, db_index=True)),\n ('html', models.TextField(null=True, blank=True)),\n ('modified', models.DateTimeField(auto_now=True)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Error',\n 'verbose_name_plural': 'Errors',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"erroneous/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378989491","text":"import logging\nimport numpy as np\nfrom model import count_points \nfrom model import nodes,make_id\nfrom model import ppwrs2,ppwrs3,psize2,psize3,cff_cnt,mvmonoss\nfrom poly import mvmonos, powers\n\nimport solvers.simplex as simplex\nimport solvers.solve_constractions_cone as constr_cone\nimport solvers.iterate_simplex as iterate_simplex\n\nfrom constants import *\n\ndef count(params, eps=0.01):\n itcnt = 0\n outx = None\n pprx = params[\"pprx\"]\n pprt = params[\"pprt\"]\n xreg = params[\"xreg\"]\n treg = params[\"treg\"] \n is_run = True\n totalx = xreg*pprx - xreg + 1\n totalt = treg*pprt - treg + 1\n X = np.linspace(0, length, totalx)\n T = np.linspace(0, total_time, totalt)\n R = np.linspace(0.01*rball, rball, 10)\n R = R[::-1]\n X_part = list(mit.windowed(X,n=pprx,step=pprx - 1))\n T_part = list(mit.windowed(T,n=pprt,step=pprt - 1))\n lxreg = X_part[0][-1] - X_part[0][0]\n ltreg = T_part[0][-1] - T_part[0][0]\n bsize = sum(cff_cnt)\n outx_old = None\n outx = None \n cff = None\n cff_old = None\n is_refin = True\n task_old = None\n while is_run or itcnt == 0:\n itcnt += 1\n logging.info(f\"ITER: {itcnt}\")\n stime = time.time()\n sdcf = None\n refit = 0\n monos, rhs, ct, *_ = count_points(params)\n ct = np.hstack([ct,ct])\n if cff_old is None:\n cff_old = np.copy(cff)\n ones = np.ones((len(monos),1))\n\n A1 = np.hstack([monos, ones])\n A2 = np.hstack([-monos, ones])\n task_A = np.vstack([A1,A2])\n\n task_rhs = np.hstack([rhs,-rhs])\n print(np.max(task_A),np.min(task_A))\n print((f\"TASK SIZE XCOUNT: {task_A.shape[1]} \" \n f\"GXCOUNT: {task_A.shape[0]}\"))\n outx = simplex.solve(task_A, task_rhs, ct=ct, \n logLevel=1,outx=outx)\n \n is_run = False\n# opt = test(params, outx, itcnt)\n\n np.savetxt(f\"xdata_{itcnt}.dat\", outx)\n \n #v_lst = []\n #p_lst = []\n #for i in range(treg):\n # for j in range(xreg):\n # ind = make_id(i, j, params)\n # pts = nodes(T_part[i],X_part[j])\n # cf = outx[ind*bsize:(ind+1)*bsize]\n # tv = mvmonoss(pts, ppwrs, 1, cff_cnt, [0, 0])\n # tp = mvmonoss(pts, ppwrs, 0, cff_cnt, [0, 0])\n # ttv = tv.dot(cf)\n # ttp = tp.dot(cf)\n # v_lst.append(ttv)\n # p_lst.append(ttp)\n #v = np.hstack(v_lst)\n #p = np.hstack(p_lst)\n #ind = 0\n #if v_0 is None:\n # vu= v*a\n # #vu= v\n # delta_v = abs(vu)\n # ind = np.argmax(delta_v)\n # logging.info(f\"delta_v[{ind}]: {delta_v[ind]}\")\n # logging.info(f\"delta_v avg: {np.average(delta_v)}\")\n # v_0 = vu\n #else:\n # delta_v = abs(v-v_0)\n # vu = (v-v0)*a+v\n # #vu = v\n # ind = np.argmax(delta_v)\n # is_run = delta_v[ind] > accs[\"v\"]\n # logging.info(f\"delta_v[{ind}]: {delta_v[ind]}\")\n # logging.info(f\"delta_v avg: {np.average(delta_v)}\")\n # v_0 = vu\n #logging.info(f\"current a: {a}\")\n #logging.debug(f\"max_v: {np.max(v_0)} | {np.max(v)}\")\n #logging.debug(f\"min_v: {np.min(v_0)} | {np.min(v)}\")\n #logging.debug(f\"max_p: {np.max(p)}\")\n #logging.debug(f\"min_p: {np.min(p)}\")\n #\n #f = open('dv.txt','a')\n #f.write(f\"{itcnt} {outx[-1]} {delta_v[ind]}\\n\")\n #f.close()\n\n t = time.time() - stime\n logging.debug(\"iter time {} seconds\".format(t) )\n np.savetxt(f\"xdata.txt\", outx)\n\nif __name__ == \"__main__\":\n import time\n import argparse\n import sys\n np.set_printoptions(threshold=sys.maxsize)\n logging.basicConfig(filename=\"cpm.log\",\n level=logging.DEBUG,\n format='%(asctime)s %(message)s', \n datefmt='%Y-%m-%d %H-%M-%S')\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--xreg\", default=1,type=int)\n parser.add_argument(\"--treg\", default=1,type=int)\n parser.add_argument(\"--pprx\", default=7,type=int)\n parser.add_argument(\"--pprt\", default=7,type=int)\n args = parser.parse_args(sys.argv[1:])\n params = vars(args)\n\n logging.info(\"*\"*40)\n logging.info(\"START\")\n stime = time.time()\n count(params)\n t = time.time() - stime\n logging.debug(\"total time {} seconds\".format(t) )\n","sub_path":"cerhe.py","file_name":"cerhe.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"557042552","text":"# Bradley Taniguchi\n# test database handler with sqlite3 \n# Python interaction example\n# https://docs.python.org/2/library/sqlite3.html\n# Sqlite3 example\n# http://www.thegeekstuff.com/2012/09/sqlite-command-examples/\n\nimport sqlite3\n\nconn = sqlite3.connect('example.database') # connect to database\nc = conn.cursor()\n\n\n# create table, NOT NECCESSARY in primary program\nc.execute('''CREATE TABLE Students (name text, )''')","sub_path":"Sqlite3test.py","file_name":"Sqlite3test.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"609060760","text":"import os\nimport csv\nimport datetime\nimport re\n\n\nclass Searches():\n \"\"\"Contains all applicable methods and variables\n applicable to searching the .CSV file\"\"\"\n\n def __init__(self, search_value='', search_column=''):\n self.search_value = search_value\n self.search_column = search_column\n\n def list_values(self):\n \"\"\"Shows the user the availible values to search\n based on selected search type\"\"\"\n csv_file = csv.reader(open('data.csv', \"r\"))\n data_elements = ''\n self.search_value = list(self.search_value)\n # Changed from empty list to variable\n firstline = True # Skip header line in .CSV file\n for row in csv_file:\n if firstline:\n firstline = False\n continue\n else:\n data_elements = list(\n row[column] for column in [int(self.search_column)])\n self.search_value.append(data_elements[0])\n print(data_elements[0])\n\n def print_results(self, search_value):\n \"\"\"Shows the user multiple output rows based\n on a provided search value\"\"\"\n os.system('clear')\n print(\"HERE'S YOUR DATA!!!\")\n csv_file = csv.reader(open('data.csv', \"r\"))\n returned_values = []\n for row in csv_file:\n for field in row:\n if field == search_value: # Match field to search value\n if row not in returned_values: # Prevent Dupes\n returned_values.append(row)\n titles = ['DATE', 'TITLE', 'TIME', 'NOTES']\n for x in returned_values: # Make list readable\n dictionary = dict(zip(titles, x)) # Convert to dict\n for key, value in dictionary.items():\n print(key + ':', value)\n print('\\n')\n\n def date_search(self):\n \"\"\"Method to search .CSV file by date column\"\"\"\n while True:\n os.system('clear')\n print(\"Dates Availible to Search:\")\n self.search_column = 0\n self.list_values()\n search_date = input('\\nPlease Use DD/MM/YYYY: ')\n try:\n datetime.datetime.strptime(search_date, '%d/%m/%Y')\n if search_date not in self.search_value:\n print(\"Not an Availible Selection!\")\n input(\"Press ENTER to Try Again!\")\n continue\n else:\n self.print_results(search_date)\n input(\"\\nPress ENTER to Return to the Main Menu\")\n break\n except ValueError:\n print(\"ValueError: Please Use DD/MM/YYYY.\")\n input(\"Press ENTER to Try Again\")\n continue\n else:\n break\n\n def time_search(self):\n \"\"\"Method to search .CSV file by time column\"\"\"\n while True:\n os.system('clear')\n print(\"Times Availible to Search:\")\n self.search_column = 2\n self.list_values()\n search_time = str(input('\\nPlease Use a Whole Number: '))\n if search_time not in self.search_value:\n input(\"Not an Availible Selection! Press ENTER to Try Again!\")\n continue\n else:\n self.print_results(search_time)\n input(\"\\nPress ENTER to Return to Main Menu\")\n break\n\n def string_search(self):\n \"\"\"Method to search .CSV file by string\n in task names and notes\"\"\"\n while True:\n os.system('clear')\n print(\"Strings Availible to Search:\")\n self.search_column = 1\n self.list_values()\n self.search_column = 3\n self.list_values()\n search_string = str(input('\\nPlease Use CaSe sEnSiTiVe Text: '))\n if search_string not in self.search_value:\n print(\"Not an Availible Selection!\")\n input(\"Press ENTER to Try Again!\")\n continue\n else:\n self.print_results(search_string)\n input(\"\\nPress ENTER to Return to Main Menu\")\n break\n\n def regex_search(self):\n \"\"\"Method to search .CSV file by regex patterns\n in task names and notes\"\"\"\n while True:\n file = open(\"data.csv\")\n data = file.read()\n file.close()\n os.system('clear')\n print(\"Select whatever regex or value you wish to search for:\")\n print(\"For example: \\d would return Unicode digits from 0 to 9\")\n regex_string = input('\\nPlease enter a regex or value: ')\n regex_results = (re.findall(regex_string, data))\n regex_results_list = (list(set(regex_results)))\n with open('data.csv', 'rt') as f:\n reader = csv.reader(f)\n returned_values = []\n for row in reader:\n for field in row:\n for item in regex_results_list:\n if item in field: # Match field to search value\n if row not in returned_values: # Prevent Dupes\n returned_values.append(row)\n print('\\nYou found {} instances of this criterion and {} values!'\n .format(len(regex_results), len(returned_values)))\n next_step = input('Would you like to see the results? (y/n): ')\n if next_step.lower() == 'n':\n continue\n elif next_step.lower() == 'y':\n os.system('clear')\n print(\"HERE'S YOUR DATA!!!\")\n titles = ['DATE', 'TITLE', 'TIME', 'NOTES']\n for x in returned_values: # Make list readable\n dictionary = dict(zip(titles, x)) # Convert to dict\n for key, value in dictionary.items():\n print(key + ':', value)\n print('\\n')\n input(\"\\nPress ENTER to Return to Main Menu\")\n break\n else:\n continue\n","sub_path":"searches.py","file_name":"searches.py","file_ext":"py","file_size_in_byte":6145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"57884140","text":"from sqlalchemy import *\r\nfrom migrate import *\r\n\r\n\r\nfrom migrate.changeset import schema\r\npre_meta = MetaData()\r\npost_meta = MetaData()\r\npost = Table('post', pre_meta,\r\n Column('id', INTEGER(), primary_key=True, nullable=False),\r\n Column('body', VARCHAR(length=256)),\r\n Column('timestamp', DATETIME),\r\n Column('user_id', INTEGER()),\r\n)\r\n\r\nbillingXML = Table('billingXML', post_meta,\r\n Column('id', INTEGER, primary_key=True, nullable=False),\r\n Column('regional_center_id', INTEGER),\r\n Column('billing_month', DATETIME),\r\n Column('file_link', VARCHAR(length=255)),\r\n Column('created_date', DATETIME),\r\n)\r\n\r\nbilling_notes = Table('billing_notes', post_meta,\r\n Column('id', INTEGER, primary_key=True, nullable=False),\r\n Column('billing_xml_id', INTEGER),\r\n Column('client_appt_id', INTEGER),\r\n Column('note', Text),\r\n Column('created_date', DATETIME),\r\n)\r\n\r\nclient_appt_type = Table('client_appt_type', post_meta,\r\n Column('id', INTEGER, primary_key=True, nullable=False),\r\n Column('name', VARCHAR(length=20)),\r\n Column('service_code', INTEGER),\r\n Column('service_type_code', VARCHAR(length=15)),\r\n)\r\n\r\nclient_appt = Table('client_appt', post_meta,\r\n Column('id', INTEGER, primary_key=True, nullable=False),\r\n Column('client_id', INTEGER),\r\n Column('therapist_id', INTEGER),\r\n Column('start_datetime', DATETIME),\r\n Column('end_datetime', DATETIME),\r\n Column('appointment_type', VARCHAR(length=15)),\r\n Column('appt_type_id', INTEGER),\r\n Column('cancelled', SMALLINT, default=ColumnDefault(0)),\r\n Column('billed', SMALLINT, default=ColumnDefault(0)),\r\n)\r\n\r\n\r\ndef upgrade(migrate_engine):\r\n # Upgrade operations go here. Don't create your own engine; bind\r\n # migrate_engine to your metadata\r\n pre_meta.bind = migrate_engine\r\n post_meta.bind = migrate_engine\r\n pre_meta.tables['post'].drop()\r\n post_meta.tables['billingXML'].create()\r\n post_meta.tables['billing_notes'].create()\r\n post_meta.tables['client_appt_type'].create()\r\n post_meta.tables['client_appt'].columns['appt_type_id'].create()\r\n\r\n\r\ndef downgrade(migrate_engine):\r\n # Operations to reverse the above upgrade go here.\r\n pre_meta.bind = migrate_engine\r\n post_meta.bind = migrate_engine\r\n pre_meta.tables['post'].create()\r\n post_meta.tables['billingXML'].drop()\r\n post_meta.tables['billing_notes'].drop()\r\n post_meta.tables['client_appt_type'].drop()\r\n post_meta.tables['client_appt'].columns['appt_type_id'].drop()\r\n","sub_path":"db_repository/versions/028_migration.py","file_name":"028_migration.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"324442093","text":"# make a program that will let me enter a number and then\n# print out all of the multiples until 100. So, if I enter a 2, \n# then the program should print out all of the multiples of 2 until 100. \n# If I enter a 8, then the program should print out all of the multiples of\n# 8 until 100. \n\n\nnumber = input(\"what is your number? \")\n\nfor i in range(0,101,int(number)):\n print(i)\n\n ","sub_path":"more_chapter_4_pt2/homework3.py","file_name":"homework3.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"143567604","text":"import cv2\n\ndef get_landmarks(detector, image):\n # Flip the image horizontally for a later selfie-view display, and convert\n # the BGR image to RGB.\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # To improve performance, optionally mark the image as not writeable to\n # pass by reference.\n image.flags.writeable = False\n\n results = detector.process(image)\n\n # Draw the pose annotation on the image.\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n return image, results\n\n\n","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"146277736","text":"def beautifulText(inputString, l, r):\n words = inputString.split()\n lens = []\n\n if len(words) == 1:\n if (len(words[0]) >= l) and (len(words[0]) <= r):\n return True\n else:\n return False\n\n def searchLine(pos, cl):\n for i in range(pos + 1, len(words) + 1):\n le = len(' '.join(words[pos:i]))\n # print(cl, i, le, len(words))\n if i == len(words):\n if cl == le:\n return True\n else:\n return False\n if le < l:\n continue\n elif (le <= r):\n if (cl == le):\n return searchLine(i, le)\n else:\n return False\n\n for i in range(1, len(words) + 1):\n le = len(' '.join(words[:i]))\n if le < l:\n continue\n elif le <= r:\n # print(le)\n lens.append(searchLine(i, le))\n else:\n break\n\n return any(lens)\n\n","sub_path":"Beautiful Text.py","file_name":"Beautiful Text.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"554510670","text":"from __future__ import annotations\n\nimport asyncio\nimport datetime\nimport textwrap\nfrom typing import Any, Dict, Tuple\nimport uuid\n\nimport libvirt\n\nfrom . import _routing\nfrom . import errors\n\nfrom .. import objects\n\n\nclass InvalidAttachmentNotFound(_routing.ClientError):\n\n code = \"InvalidAttachment.NotFound\"\n\n\nclass InvalidVolumeNotFound(_routing.ClientError):\n\n code = \"InvalidVolume.NotFound\"\n\n\n_known_attachments: Dict[Tuple[str, str], Tuple[str, str]] = {}\n\n\n@_routing.handler(\"CreateVolume\")\nasync def create_volume(\n args: _routing.HandlerArgs,\n app: _routing.App,\n) -> Dict[str, Any]:\n pool: libvirt.virStoragePool = app[\"libvirt_pool\"]\n size = args.get(\"Size\")\n if not size:\n raise _routing.InvalidParameterError(\"missing required Size\")\n\n az = args.get(\"AvailabilityZone\")\n if not az:\n raise _routing.InvalidParameterError(\n \"missing required AvailabilityZone\"\n )\n\n voltype = args.get(\"VolumeType\")\n if not voltype:\n voltype = \"gp2\"\n\n volname = f\"{uuid.uuid4()}.qcow2\"\n\n xml = textwrap.dedent(\n f\"\"\"\\\n \n {volname}\n {size}\n \n {volname}\n \n 0644\n \n \n 1.1\n \n \n \n \n \"\"\"\n )\n\n pool.createXML(xml, flags=libvirt.VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA)\n\n create_time = datetime.datetime.now(datetime.timezone.utc)\n\n tags = {}\n tag_spec = args.get(\"TagSpecification\")\n if tag_spec:\n for spec_entry in tag_spec:\n tag_entries = spec_entry[\"Tag\"]\n for tag in tag_entries:\n tags[tag[\"Key\"]] = tag[\"Value\"]\n\n if tags:\n cur = app[\"db\"].cursor()\n cur.executemany(\n \"\"\"\n INSERT INTO tags\n (resource_name, resource_type, tagname, tagvalue)\n VALUES (?, ?, ?, ?)\n \"\"\",\n [[volname, \"volume\", n, v] for n, v in tags.items()],\n )\n app[\"db\"].commit()\n\n return {\n \"volumeId\": volname,\n \"size\": size,\n \"iops\": 10000,\n \"availabilityZone\": az,\n \"snapshotId\": None,\n \"status\": \"creating\",\n \"createTime\": create_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f000Z\"),\n \"volumeType\": voltype,\n \"tagSet\": [{\"key\": k, \"value\": v} for k, v in tags.items()],\n \"multiAttachEnabled\": \"false\",\n }\n\n\n@_routing.handler(\"DeleteVolume\")\nasync def delete_volume(\n args: _routing.HandlerArgs,\n app: _routing.App,\n) -> Dict[str, Any]:\n pool: libvirt.virStoragePool = app[\"libvirt_pool\"]\n volname = args.get(\"VolumeId\")\n if not volname:\n raise _routing.InvalidParameterError(\"missing required VolumeId\")\n\n try:\n vol = pool.storageVolLookupByName(volname)\n except libvirt.libvirtError as e:\n raise InvalidVolumeNotFound(e.args[0]) from None\n\n vol.delete()\n\n return {\n \"return\": \"true\",\n }\n\n\n@_routing.handler(\"DescribeVolumes\")\nasync def describe_volumes(\n args: _routing.HandlerArgs,\n app: _routing.App,\n) -> Dict[str, Any]:\n pool: libvirt.virStoragePool = app[\"libvirt_pool\"]\n volume_ids = set(args.get(\"VolumeId\", ()))\n result = []\n filters = args.get(\"Filter\")\n if filters:\n filtered_volume_ids = set()\n for flt in filters:\n if flt[\"Name\"].startswith(\"tag:\"):\n tagname = flt[\"Name\"][len(\"tag:\")]\n tagvalue = flt[\"Value\"]\n\n cur = app[\"db\"].cursor()\n cur.execute(\n f\"\"\"\n SELECT resource_name FROM tags\n WHERE tagname = ? AND resource_type = 'volume'\n AND tagvalue IN ({\",\".join([\"?\"] * len(tagvalue))})\n \"\"\",\n [tagname] + list(tagvalue),\n )\n filtered_volume_ids.update(cur.fetchall())\n cur.close()\n else:\n raise _routing.InvalidParameterError(\n f\"unsupported filter type: {flt['Name']}\"\n )\n\n if volume_ids:\n volume_ids -= filtered_volume_ids\n else:\n volume_ids = filtered_volume_ids\n\n for volume in objects.get_all_volumes(pool):\n volname = volume.name\n if (not volume_ids and not filters) or volname in volume_ids:\n result.append(_describe_volume(pool, volume))\n\n return {\n \"volumeSet\": result,\n }\n\n\n@_routing.handler(\"AttachVolume\")\nasync def attach_volume(\n args: _routing.HandlerArgs,\n app: _routing.App,\n) -> Dict[str, Any]:\n pool: libvirt.virStoragePool = app[\"libvirt_pool\"]\n instance_id = args.get(\"InstanceId\")\n if not instance_id:\n raise _routing.InvalidParameterError(\"missing required InstanceId\")\n if not isinstance(instance_id, str):\n raise _routing.InvalidParameterError(\"invalid InstanceId value\")\n volume_id = args.get(\"VolumeId\")\n if not volume_id:\n raise _routing.InvalidParameterError(\"missing required VolumeId\")\n device = args.get(\"Device\")\n if not device:\n raise _routing.InvalidParameterError(\"missing required Device\")\n if not isinstance(device, str):\n raise _routing.InvalidParameterError(\"invalid Device value\")\n if not isinstance(volume_id, str):\n raise _routing.InvalidParameterError(\"invalid VolumeId value\")\n\n if device.startswith(\"/\"):\n if not device.startswith(\"/dev/\"):\n raise _routing.InvalidParameterError(\n \"invalid Device, must start with /dev\"\n )\n device = device[len(\"/dev/\") :]\n\n conn = pool.connect()\n try:\n virdom = conn.lookupByName(instance_id)\n except libvirt.libvirtError as e:\n raise _routing.InvalidParameterError(f\"invalid InstanceId: {e}\") from e\n\n try:\n virvol = pool.storageVolLookupByName(volume_id)\n except libvirt.libvirtError as e:\n raise _routing.InvalidParameterError(f\"invalid VolumeId: {e}\") from e\n\n volume = objects.volume_from_xml(virvol.XMLDesc(0))\n\n if _get_volume_status(pool, volume) != \"available\":\n raise _routing.IncorrectStateError(\n f\"Volume {volume.name} is in use and cannot be attached.\"\n )\n\n xml = textwrap.dedent(\n f\"\"\"\\\n \n \n \n \n lvirtebs-{device}\n \"\"\"\n )\n\n try:\n virdom.attachDevice(xml)\n except libvirt.libvirtError as e:\n raise _routing.InternalServerError(str(e)) from e\n\n # Give the new attachment time to settle. Alas, there seems to be\n # no obvious way to actually verify the status of the device in the\n # target VM.\n key = (volume_id, instance_id)\n dev = device\n for (vol, dom), (_, att_status) in tuple(_known_attachments.items()):\n if vol == volume_id and att_status == \"detached\":\n del _known_attachments[vol, dom]\n _known_attachments[key] = (dev, \"attaching\")\n\n def _mark_attached() -> None:\n _known_attachments[key] = (dev, \"attached\")\n\n asyncio.get_running_loop().call_later(3, _mark_attached)\n\n return {\n \"volumeId\": volume_id,\n \"instanceId\": instance_id,\n \"device\": f\"/dev/{device}\",\n \"status\": \"attaching\",\n }\n\n\n@_routing.handler(\"DetachVolume\")\nasync def detach_volume(\n args: _routing.HandlerArgs,\n app: _routing.App,\n) -> Dict[str, Any]:\n pool: libvirt.virStoragePool = app[\"libvirt_pool\"]\n instance_id = args.get(\"InstanceId\")\n if not instance_id:\n raise _routing.InvalidParameterError(\"missing required InstanceId\")\n if not isinstance(instance_id, str):\n raise _routing.InvalidParameterError(\"invalid InstanceId value\")\n volume_id = args.get(\"VolumeId\")\n if not volume_id:\n raise _routing.InvalidParameterError(\"missing required VolumeId\")\n if not isinstance(volume_id, str):\n raise _routing.InvalidParameterError(\"invalid VolumeId value\")\n\n key = (volume_id, instance_id)\n\n conn = pool.connect()\n try:\n virdom = conn.lookupByName(instance_id)\n except libvirt.libvirtError as e:\n raise errors.InvalidInstanceID_NotFound(\n f\"invalid InstanceId: {e}\"\n ) from e\n\n try:\n virvol = pool.storageVolLookupByName(volume_id)\n except libvirt.libvirtError as e:\n raise InvalidVolumeNotFound(f\"invalid VolumeId: {e}\") from e\n\n volume = objects.volume_from_xml(virvol.XMLDesc(0))\n\n attachments = objects.get_vol_attachments(pool, volume)\n device = None\n for attachment in attachments:\n if attachment.domain == instance_id:\n device = attachment.device\n break\n\n if device is None:\n known = _known_attachments.get(key)\n if not known or known[1] != \"detaching\":\n raise InvalidAttachmentNotFound(\n f\"Volume {volume_id} is not attached to Instance {instance_id}\"\n )\n else:\n return {\n \"volumeId\": volume_id,\n \"instanceId\": instance_id,\n \"status\": known[1],\n \"device\": f\"/dev/{known[0]}\",\n }\n\n xml = textwrap.dedent(\n f\"\"\"\\\n \n \n \n \n \"\"\"\n )\n\n try:\n virdom.detachDevice(xml)\n except libvirt.libvirtError as e:\n raise _routing.InternalServerError(str(e)) from e\n\n # Give the detachment time to settle. Alas, there seems to be\n # no obvious way to actually verify the status of the device in the\n # target VM.\n dev = device\n _known_attachments[key] = (dev, \"detaching\")\n\n def _mark_detached() -> None:\n _known_attachments[key] = (dev, \"detached\")\n\n asyncio.get_running_loop().call_later(3, _mark_detached)\n\n return {\n \"volumeId\": volume_id,\n \"instanceId\": instance_id,\n \"status\": \"detaching\",\n \"device\": f\"/dev/{device}\",\n }\n\n\ndef get_attachment_status(att: objects.VolumeAttachment) -> str:\n key = (att.volume, att.domain)\n state = _known_attachments.get(key)\n if state is None:\n return \"attached\"\n else:\n return state[1]\n\n\ndef get_known_attachments() -> Dict[Tuple[str, str], Tuple[str, str]]:\n return _known_attachments\n\n\ndef _get_volume_status(\n pool: libvirt.virStoragePool,\n volume: objects.Volume,\n) -> str:\n attachments = objects.get_vol_attachments(pool, volume)\n existing = {(att.volume, att.domain) for att in attachments}\n\n att_set = []\n for att in attachments:\n att_set.append(\n {\n \"status\": get_attachment_status(att),\n }\n )\n\n for (vol, dom), (_, status) in _known_attachments.items():\n if (vol, dom) not in existing and vol == volume.name:\n att_set.append(\n {\n \"status\": status,\n }\n )\n\n if all(att[\"status\"] == \"detached\" for att in att_set):\n status = \"available\"\n else:\n status = \"in-use\"\n\n return status\n\n\ndef _describe_volume(\n pool: libvirt.virStoragePool,\n volume: objects.Volume,\n) -> Dict[str, Any]:\n\n attachments = objects.get_vol_attachments(pool, volume)\n existing = {(att.volume, att.domain) for att in attachments}\n\n att_set = []\n for att in attachments:\n att_set.append(\n {\n \"instanceId\": att.domain,\n \"volumeId\": att.volume,\n \"device\": f\"/dev/{att.device}\",\n \"status\": get_attachment_status(att),\n }\n )\n\n for (vol, dom), (device, status) in _known_attachments.items():\n if (vol, dom) not in existing and vol == volume.name:\n att_set.append(\n {\n \"instanceId\": dom,\n \"volumeId\": vol,\n \"device\": f\"/dev/{device}\",\n \"status\": status,\n }\n )\n\n if all(att[\"status\"] == \"detached\" for att in att_set):\n status = \"available\"\n else:\n status = \"in-use\"\n\n return {\n \"volumeId\": volume.name,\n \"volumeType\": \"standard\",\n \"size\": volume.capacity // 1073741824,\n \"status\": status,\n \"attachmentSet\": att_set,\n }\n","sub_path":"libvirt_aws/handlers/volumes.py","file_name":"volumes.py","file_ext":"py","file_size_in_byte":12754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"68592307","text":"from pprint import pprint\n\ndef addiere_kw(**kwargs):\n sum = 0\n pprint(kwargs)\n for a in kwargs.values():\n sum += a\n return sum\n\ndef addiere_a(*args):\n sum = 0\n pprint(args)\n for a in args:\n sum += a\n return sum\n\n \n \nsumme = addiere_kw(a=1, b=2, c=7, f=5)\nprint(summe)\n\nsumme = addiere_a(1, 2, 7, 5)\nprint(summe)\n","sub_path":"kwargs.py","file_name":"kwargs.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"526523476","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\nimport scipy.interpolate as interp\n\n\n# This was a homework exercise on CPA, as part of my computational physics class. \n# I was to investigate how the pulse duration of a chirped pulse evolve over time and distance in an optical fibre.\n# Graded - 97%\n\n\n# 1) creating given variables and other arrays to be used later in the exercise\n\nT0 = 0.5e-12\nL = 1.064e-6 # wavelength\nF = np.array([-1, 0, 1]) # chirp parameter\nc = 3e8 # speed of light\nB = np.array([0.6961663, 0.4079426, 0.8974794]) # Sellmeier coefficients\nC = np.array([0.0684043e-6, 0.1162414e-6, 9.896161e-6])\nT = np.linspace(-20*T0, 20*T0, 1001) # time array\nz = np.linspace(0, 101, 101) # distance\nwave = np.linspace(1.0e-6, 1.1e-6, 1001) # wave linspace\nwave1 = np.linspace(1.0e-6, 1.1e-6, 997) # wave linspace minus start and end points due to differentiation\nwavestep = wave[1]-wave[0] # dx or \"h\" value\nxrange1 = np.linspace(1, 1.1, 1001) # array for better scale on plots\nxrange2 = np.linspace(1, 1.1, 997)\n\n\n# 2) Creates a function for eqn (4).\n\ndef pulse(t, t0, z, b2, f):\n \"\"\"\n Takes in parameters T, T0, z, b2, F and computes equation 4 returning the pulse intensity, I.\n \"\"\"\n ip = (t0**4 / ((t0**2 + (b2 * f * z))**2 + (b2 * z)**2)) * np.exp(- (t0**2 * t**2) / ((t0**2 + (b2 * f * z))**2 + (b2 * z)**2))\n return ip\n\n\n# for loop for calculating n using eqn 1\n\ndef refractive_index(wavelength):\n \"\"\"\n For each wavelength value passed in, or each array element, the sum starts at 1 and then computes and adds the sum\n of equation 1 for each B and C index. The value of the refractive index, n, is then calculated by taking the square\n root of this sum.\n \"\"\"\n sum = 1\n for i in range(len(B)):\n sum += ((B[i] * (wavelength**2)) / (wavelength**2 - C[i]**2))\n\n nl = np.sqrt(sum)\n return nl\n\n\nn = refractive_index(wave)\n\n# 4) Differentiation. Takes the n array and subtracts the value of n[i+1] - n[i-1] divided by the dx value, wavestep.\n# for e.g takes (n[2] - n[0])/wavestep, it then calculates it for each i and holds a value in first_derivative.\n# Then x.append(first_derivative) adds that value to the empty list I created called x, then finally an array is created\n# with these values called der1. This step corresponds to the eqn (f(x+1) - f(x-1)/2h).\n\n\ni = 1 # setting the starting point to 1 since the index value at n[i-1] for i = 0 returns n[-1] or n[1000] which we dont want.\nx = [] # empty lists for appending values from while loop.\ny = []\n\nwhile 1 <= i < (len(n) - 1):\n first_derivative = (n[i+1] - n[i-1]) / (2 * wavestep)\n x.append(first_derivative)\n i += 1 # counter\n\nfirst_derivative = np.array(x) # creates an array from the values in the x list\n\ni = 1 # resetting i counter back to 1 for 2nd while loop for 2nd derivative\n\nwhile 1 <= i < (len(first_derivative) - 1):\n second_derivative = (first_derivative[i+1] - first_derivative[i-1]) / (2 * wavestep)\n y.append(second_derivative)\n i += 1\n\nsecond_derivative = np.array(y) # creates an array from the values in the y list\n\nD = - (wave1 / c) * second_derivative\nprint('Since D = {} is negative, the material is said to exhibit normal dispersion.'.format(D.max()))\nb2_array = - ((wave1 ** 2) / (2 * np.pi * c)) * D # computes equation (5)\n\n# (5) linear interpolation method used as our graph is a close approximation of a straight line.\n\nlinear_interpolation = interp.interp1d(wave1, b2_array, kind='linear') # creates a linear interpolation function of x, y data\nb2_value = linear_interpolation(1.064e-6) # uses the created function and takes an x value as the argument, returning the corresponding y value\nprint('b2 value at 1.064e-6m is {}'.format(b2_value))\n\npulse_width = np.zeros((3, 101))\n\nfor i in range(len(F)):\n for j in range(len(z)):\n\n intensity_profile = pulse(T, T0, z[j], b2_value, F[i])\n amplitude = intensity_profile.max()\n ip_shift = intensity_profile - (amplitude / np.e) # adjusting graph to intersect at a/e\n\n ip_interp = interp.UnivariateSpline(xrange1, ip_shift, s=0) # creates a spline of our Ip data with smoothing set to 0\n r1, r2 = ip_interp.roots() # finds roots of our pulse width\n root_length = r2 - r1 # calculates length between roots\n pulse_width[i][j] = root_length # creates an array from the pulse_width list and reshapes it\n\n\n\npulse0 = pulse_width[0] # corresponds to F[0]\npulse1 = pulse_width[1] # F[1]\npulse2 = pulse_width[2] # F[2]\n\n\n# the UnivariateSpline method is used here since it is a good method for finding multiple roots of a gaussian as in our case. Methods\n# such as newtons or brentq were only useful for returning single or limited roots. The UnivariateSpline with smoothing\n# set to 0, s=0, returns the same results as the \"InterpolatedUnivariateSpline\" method from the same library and so that method\n# could have also been used.\n\n\n# (8)\n\ndef tevol(z, a, t0, f):\n\n \"\"\"\n This function represents the 1/e pulse duration equation 6. Returns the pulse duration as a function of a, F and z.\n \"\"\"\n b = ((1 + (a * f * z))**2 + (a * z)**2)\n tp = t0 * (np.sqrt(b))\n return tp\n\n\n# The opt.curve_fit() curve fitting method was used here to better approximate the best curve fit for the data set.\n# As seen on graph 4, the fitted data set with x markers is an extremely close approximation of the original data.\n# this fitted data will later be used to extract values to investigate how good the curve fitting method was.\n\nzrange = np.linspace(0, 100, 10) # z linspace for fit data\n\nfit_params = [] # empty list to hold fit parameters\nextracted_data = [] # data extracted from fit placeholder\n\nfor i in range(3):\n fit, cov = opt.curve_fit(tevol, z, pulse_width[i])\n fit_params.append(fit)\n\n extract = tevol(zrange, *fit)\n extracted_data.append(extract)\n\n a_fit = fit_params[i][0]\n T_fit = fit_params[i][1]\n F_fit = fit_params[i][2]\n b2_at_a = T_fit**2 * a_fit\n\n print('Fit values for F = {}: a = {}, T = {}, F = {}, B2 = {}.'.format(F[i], a_fit, T_fit, F_fit, b2_at_a))\n\n\n# Note on the returned values: for F = -1 we get a return of F = -0.999 which is a close approximation indicating a good\n# fit for this curve. For F = 0 we get a return of F = -1.126e-8 which can be considered a close approximation of 0 since\n# on array inspection in variable explorer this actually shows up as -0. For F = 1, we get a close approximation of the\n# magnitude but the wrong sign. However for this curve the value for a and b2 are equal in magnitude but the opposite sign\n# of the values returned for F = -1, meaning this could be considered a close approximation also.\n\n\nduration0 = extracted_data[0] # F[0]\nduration1 = extracted_data[1] # F[1]\nduration2 = extracted_data[2] # F[2]\n\nplt.figure()\nfig, ((ax1, ax2), (ax4, ax5)) = plt.subplots(2, 2, figsize=(12, 12)) # creating empty axes\n\nax1.plot(xrange1, n,'-')\nax1.set_title('Refractive Index vs Wavelength\\n')\nax1.set_xlabel('\\nWavelength (μm)')\nax1.set_ylabel('Refractive Index, n\\n')\nax1.grid(b=True, which='major', axis='both', linestyle='-')\n\nax2.set_title('Dispersion Parameter vs Wavelength\\n')\nax2.plot(xrange2, b2_array, '-')\nax2.set_xlabel('\\nWavelength (μm)')\nax2.set_ylabel('Dispersion Parameter, β2\\n')\nax2.grid(b=True, which='both', axis='both', linestyle='-')\n\nax4.plot(z, pulse0, '-', label='F = -1')\nax4.plot(z, pulse1, '-', label='F = 0')\nax4.plot(z, pulse2, '-', label='F = 1')\nax4.set_title('Pulse Duration\\n')\nax4.set_xlabel('\\nDistance (m)')\nax4.set_ylabel('Pulse Duration (ps)\\n')\nax4.grid(b=True, which='major', axis='both', linestyle='-')\nax4.legend()\n\nax5.plot(z, pulse0, '-', label='F = -1')\nax5.plot(z, pulse1, '-', label='F = 0')\nax5.plot(z, pulse2, '-', label='F = 1')\nax5.plot(zrange, duration0, 'x', label='Fit of F = -1')\nax5.plot(zrange, duration1, 'x', label='Fit of F = 0')\nax5.plot(zrange, duration2, 'x', label='Fit of F = 1')\nax5.set_title('Pulse Duration Fit\\n')\nax5.set_xlabel('\\nDistance (m)')\nax5.set_ylabel('Pulse Duration (ps)\\n')\nax5.grid(b=True, which='major', axis='both', linestyle='-')\nax5.legend()\nplt.show()\n\n\n","sub_path":"ChirpedPulseAmplification.py","file_name":"ChirpedPulseAmplification.py","file_ext":"py","file_size_in_byte":8162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"460534976","text":"# def lingkaran (Π,r):\n# luas = Π * r * r\n# keliling = Π * 2 * r\n# return luas,keliling\n\n# print(lingkaran(22/7,70))\n\ndef lingkaran(jari):\n phi = 22/7\n L = phi*jari*jari\n K = 2*phi*jari\n return L,K\n\ndef Luas(jari):\n phi = 22/7\n L = phi*jari*jari\n L = float(\"{:.2f}\".format(Luas(10)))\n return L\n\ndef Kel(jari):\n phi = 22/7\n K = 2*phi*jari\n return K\n\n# print(Luas(10))\nl = float(\"{:.2f}\".format(Luas(10)))\nprint(type(l))\n# luas = lingkaran(7)[0]\n# kel = lingkaran(7)[1]\n\n# print(luas)\n# print(kel)\n\n# input_data = float(input(\"Masukkan Jari - jari : \"))\n# print(Luas(input_data))\n# print(Kel(input_data))\n\n\n\n# # lingkaran(7)\n# print(lingkaran(10))\n# ling = lingkaran(10)\n# # print(ling[0])\n# # print(ling[1])\n# print(\"Luas : {:.2f} cm\\u00b2\".format(ling[0]))\n# print(\"Keliling : {:.2f} cm\".format(ling[1]))","sub_path":"function_example2.py","file_name":"function_example2.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"65932637","text":"import sys\nN = int(sys.stdin.readline())\n\n# P는 공백으로 구분해 입력받기\nP = list(map(int, input().split()))\nP.sort()\n\ntime = 0\n\nfor idx, item in enumerate(P):\n for j in range(0, idx+1): # 0~idx까지 돌면서 소요시간 합하기\n time += P[j]\n\nprint(time)","sub_path":"greedy_implement/atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"89370840","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\nresults = \"\"\"20,107.05\n30,83.83\n40,76.15\n50,72.07\n60,69.69\n70,68.20\n80,67.42\n90,66.95\n100,66.47\"\"\"\n\nresults2 = \"\"\"10,349.14\n20,131.37\n30,82.86\n40,69.59\n50,64.82\n60,62.54\n70,61.36\n80,60.47\n90,60.02\n100,59.69\"\"\"\n\ndef plot(data):\n x_values = []\n y_values = []\n for line in data.split(\"\\n\"):\n point = [float(v) for v in line.split(\",\")]\n x_values.append(point[0])\n y_values.append(point[1])\n plt.hlines(y_values, 0, x_values, color=\"black\", linewidth=\"1\", linestyle=\"dashed\")\n plt.plot(x_values, y_values, \"-*\")\n\nplot(results)\n# plot(results2)\n# plot(results3)\n# plot(results_lstm)\n# plot(results_lstm_cp)\n\nplt.title(\"Linear Dropout Pruning on QRNN for WT-2\")\nplt.ylabel(\"Word-level Perplexity\")\nplt.xlabel(\"% FLOPs in RNN layers\")\nplt.xlim(0)\ny_range = (65, 94)\nplt.ylim(*y_range)\n# plt.yticks(np.arange(54, 75, 1))\nplt.yticks(np.arange(*y_range, 1))\nplt.xticks(np.arange(0, 110, 10))\nplt.hlines(66.76, 0, 110, color=\"red\", linewidth=\"1\")\nplt.text(93, 66.96, \"Original model result\")\nplt.show()\n","sub_path":"wt2_results.py","file_name":"wt2_results.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"205069567","text":"import com.ihsan.foundation.pobjecthelper as phelper\n\nclass Peringatan(Exception):\n def __init__(self, value):\n self.parameter=value\n def __str__(self):\n return repr(self.parameter)\n\ndef formSetDataEx(UIDefList, Parameter):\n #raise 'm', Parameter.FirstRecord.key\n #raise Peringatan('Test')\n config = UIDefList.config\n dspeserta = UIDefList.GetPClassUIByName('uipinit')\n rec = dspeserta.Dataset.AddRecord()\n if Parameter.FirstRecord not in [None,'',0]:\n key=Parameter.FirstRecord.key.split('=')[-1]\n rec.SetFieldByName('Peserta.CustomerId', int(key))\n s = 'select customername from customer where customerid=%s' % key\n res = config.CreateSQL(s).RawResult\n nama = res.customername\n rec.SetFieldByName('Peserta.CustomerName', nama)\n else :\n rec.SetFieldByName('Peserta.CustomerId', None)\n rec.SetFieldByName('Peserta.CustomerName', None)\n\n atid = config.CreateSQL(\"select appraisaltemplateid from appraisaltemplate where appraisalname='ScoreBoard'\").RawResult.appraisaltemplateid\n rec.Tanggal = config.Now()\n sSQL = \"select appraisaltemplateid,itemtemplateid,sequenceno,itemname,valuedatatype,parentid,level,description from AppraisalItemTemplate where AppraisalTemplateId=%d and isactive='A' and islowlevel='T' order by sequenceno, parentid\" % atid\n ret_items = config.CreateSQL(sSQL).rawresult\n rec.cmid = ret_items.appraisaltemplateid\n dsitems = UIDefList.GetPClassUIByName('uipactivity')\n nomor_index = 1\n tipedata = {None:'None','I':'Integer','B':'Boolean','F':'Float','S':'String'}\n while not ret_items.Eof :\n set_item = dsitems.Dataset.AddRecord()\n set_item.Nomor = nomor_index\n set_item.Desc = ret_items.itemname\n set_item.tipe = tipedata[ret_items.valuedatatype]\n set_item.ttipe = ret_items.valuedatatype\n set_item.id = ret_items.itemtemplateid\n #set_item.grups = vgrupdesc[ret_items.visitgroup]\n set_item.answerhelp = ret_items.description or ''\n parent=''\n pid = ret_items.parentid\n for i in range(ret_items.level):\n pSQL = \"select itemname,parentid from appraisalitemtemplate where itemtemplateid=%s\" % str(pid)\n parentdata = config.CreateSQL(pSQL).RawResult\n pid = parentdata.parentid\n parent = parentdata.itemname+' - '+parent\n parent=parent.rstrip(' - ')\n set_item.induk = parent\n nomor_index += 1\n ret_items.Next()\n\ndef SimpanData(config, parameter, returnpacket):\n rp = returnpacket.CreateValues(['hasil',''],['modal',''])\n cs_rec = parameter.uipinit.GetRecord(0)\n tahunskrg = config.ModLibUtils.DecodeDate(config.Now())[0]\n xx1 = int(tahunskrg)\n #raise '', tahunskrg\n config.BeginTransaction()\n try:\n helper = phelper.PObjectHelper(config)\n rumus = helper.GetObject('AppraisalTemplate', cs_rec.cmid).Formula\n modalkum3 = config.CreateSQL(\"select formula from appraisaltemplate where appraisalname='ModalKUM3'\").RawResult.formula\n modal = 0.0\n hitmodal = \"modal = %s\" % modalkum3\n hasil = 'TIDAK LULUS'\n fullrumus = \"%s: hasil='LULUS'\" % rumus\n acts = helper.CreatePObject('Appraisal')\n acts.AppraisalDate = cs_rec.Tanggal\n acts.ApprovalStatus = 'T'\n acts.CustomerId = cs_rec.GetFieldByName('Peserta.CustomerId')\n acts.AppraisalTemplateId = cs_rec.cmid\n #raise 'recnum', parameter.uipactivity.recordCount\n for i in range(parameter.uipactivity.recordCount):\n act_rec = parameter.uipactivity.GetRecord(i)\n actitem = helper.CreatePObject('AppraisalItem')\n actitem.AppraisalId = acts.AppraisalId\n actitem.ItemTemplateId = act_rec.id\n if act_rec.ttipe == 'I' :\n actitem.IntegerValue = int(act_rec.Nilai or '0')\n exec(\"v%d=%s\" % (act_rec.id,act_rec.Nilai or '0'))\n elif act_rec.ttipe == 'F' :\n actitem.FloatValue = float(act_rec.Nilai or '0')\n exec(\"v%d=%s\" % (act_rec.id,act_rec.Nilai or '0'))\n elif act_rec.ttipe == 'S' :\n actitem.StringValue = act_rec.Nilai or ''\n exec(\"v%d='%s'\" % (act_rec.id,act_rec.Nilai or ''))\n elif act_rec.ttipe == 'B' :\n if act_rec.Nilai.upper().find('T') < 0:\n actitem.BoolValue = 'F'\n exec(\"v%d='%s'\" % (act_rec.id,act_rec.Nilai))\n else:\n actitem.BoolValue = 'T'\n exec(\"v%d='%s'\" % (act_rec.id,act_rec.Nilai))\n exec(fullrumus)\n exec(hitmodal)\n rp.hasil = hasil\n rp.modal = config.FormatFloat('##,000.00',modal)\n updCalon = helper.GetObject('MustahiqCandidate', acts.CustomerId)\n updCalon.MonthlyIncome = modal\n if hasil=='LULUS':\n updCalon.CandidateStatus = 'V'\n else:\n updCalon.CandidateStatus = 'X'\n config.Commit()\n except:\n config.Rollback()\n raise\n\n","sub_path":"dialogs/KUM3/fAddPenilaianCalonPeserta_data.py","file_name":"fAddPenilaianCalonPeserta_data.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"545643244","text":"import sys\nimport socket\n\nSERVERPORT = 9000\n\ndef main(argv):\n serverName = sys.argv[1]\n serverArg = sys.argv[2]\n\n clientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n clientSocket.settimeout(1)\n clientSocket.sendto(serverArg.encode(), (serverName, SERVERPORT))\n \n try:\n data, adrr = clientSocket.recvfrom(1024)\n print(data.decode())\n except socket.timeout:\n print('Request timed out')\n \n clientSocket.close()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","sub_path":"UDPClient.py","file_name":"UDPClient.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"463126848","text":"# -*- coding: utf-8 -*-\n# Dada una cadena de texto de 10 a 20 caracteres ingresada por el usuario quedarse con los primeros 3 y los ultimos 5.\n\nif __name__ == '__main__':\n while(True):\n cadena = raw_input(\"Ingrese una palabra de 10 a 20 caracteres: \")\n if len(cadena) >= 10 and len(cadena) <= 20:\n break\n else:\n print(\"Error, reingrese\")\n print(\" \")\n\n #muestro los 3 primeros y los 5 ultimos caracteres\n print(\"3 primeros caracteres: \" + cadena[0:3])\n print(\"5 ultimos caracteres: \" + cadena[-5:])\n","sub_path":"2.0-tipos-de-datos/ejercicios_alumnos/Zardain-Sergio/ej04.py","file_name":"ej04.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"120299259","text":"import pygame\nfrom pygame.sprite import Sprite\nfrom spritesheet import SpriteSheet\nfrom Timer import Timer\n\n\nclass PiranhaPlant(Sprite):\n def __init__(self, screen, pop_list):\n super(PiranhaPlant, self).__init__()\n\n # get screen dimensions\n self.screen = screen\n self.screen_rect = self.screen.get_rect()\n\n # list to hold animation images\n self.pop_list = pop_list\n\n # Timer class to animate sprites\n self.animation = Timer(frames=self.pop_list)\n\n # get the rect of the image\n self.imagePop = self.animation.imagerect()\n self.rect = self.imagePop.get_rect()\n\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n\n # store objects exact position\n self.x = float(self.rect.centerx)\n self.y = float(self.rect.centery)\n\n # movement flags\n self.going_up = False\n self.going_down = False\n self.direction = 1\n\n def blitme(self):\n if self.going_up or self.going_down:\n self.screen.blit(self.imagePop, self.rect)\n\n def update(self):\n self.y += (1 * self.direction)\n # (2 * self.direction)\n self.rect.y = self.y\n\n if self.rect.top >= self.screen_rect.centery + 250:\n self.direction *= -1\n self.going_up = False\n self.going_down = True\n if self.rect.bottom <= self.screen_rect.centery - 250:\n self.direction *= -1\n self.going_up = True\n self.going_down = False\n if self.going_down or self.going_up:\n self.imagePop = self.pop_list[self.animation.frame_index()]\n\n\nclass UnderGroundPiranha(PiranhaPlant):\n def __init__(self, screen):\n sprite_sheet = SpriteSheet(\"Images/enemies.png\")\n self.under_piranha = []\n imagePop = pygame.transform.scale(sprite_sheet.get_image(390, 60, 19, 25), (32, 32))\n self.under_piranha.append(imagePop)\n imagePop = pygame.transform.scale(sprite_sheet.get_image(420, 60, 19, 25), (32, 32))\n self.under_piranha.append(imagePop)\n super().__init__(screen=screen, pop_list=self.under_piranha)","sub_path":"PiranhaPlant.py","file_name":"PiranhaPlant.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"465301374","text":"class stack:\n stacklist=[]\n def __init__(self):\n self.n = int(input())\n for i in range(self.n):\n a = input()\n if \"push\" in a:\n self.push(a[-1])\n elif \"pop\" in a:\n self.pop()\n elif \"size\" in a:\n self.size()\n elif \"top\" in a:\n self.top()\n elif \"empty\" in a:\n self.empty()\n def push(self,a):\n self.stacklist.append(a)\n def pop(self):\n if self.empty():\n print(self.stacklist.pop())\n else:\n print(-1)\n def size(self):\n print(len(self.stacklist))\n def empty(self):\n if self.stacklist:\n print(0)\n else:\n print(1)\n def top(self):\n if self.empty():\n print(self.stacklist[-1])\n else:\n print(-1)\na = stack()\n","sub_path":"자료구조/10828.py","file_name":"10828.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"133186928","text":"from helper import Functions\n\nclass Order:\n def persist(self):\n sql = \"INSERT INTO `asd`.`orders` VALUES \\\n('\" + self.OrderID + \"', '\" + self.CustomerID + \"', '\" + self.EmployeeID + \"', '\" + self.OrderDate + \"', '\" + self.RequiredDate\\\n + \"', '\" + self.ShippedDate + \"', '\" + self.ShipVia + \"', '\" + self.Freight + \"', '\" + self.ShipName + \"', '\" \\\n + self.ShipAddress + \"', '\" + self.ShipCity + \"', '\" + self.ShipRegion + \"', '\" + self.ShipPostalCode\\\n + \"', '\" + self.ShipCountry + \"');\"\n\n print(sql)\n Functions.sql_executer(sql)\n\n\n @staticmethod\n def parse(row):\n parsed_row = row.split(\";\")\n order = Order()\n order.OrderID = parsed_row[0]\n order.CustomerID = parsed_row[1]\n order.EmployeeID = parsed_row[2]\n order.OrderDate = parsed_row[3]\n order.RequiredDate = parsed_row[4]\n order.ShippedDate = parsed_row[5]\n order.ShipVia = parsed_row[6]\n order.Freight = parsed_row[7]\n order.ShipName = parsed_row[8]\n order.ShipAddress = parsed_row[9]\n order.ShipCity = parsed_row[10]\n order.ShipRegion = parsed_row[11]\n order.ShipPostalCode = parsed_row[12]\n order.ShipCountry = parsed_row[13]\n return order\n\n\n def caller(self):\n datas = Functions.data_reader(\"orders.csv\")\n for i in range(1, len(datas)):\n order = Order.parse(datas[i])\n order.persist()\n\n\ntest = Order()\ntest.caller()","sub_path":"python_training/my_orm_pattern_python/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"14387563","text":"import numpy as np\nimport random, time\n\nfrom .component import Block, Grid\nfrom .mdp import State, Action, Reward\nfrom .utils import analyze_grid, compute_score\n\n__all__ = [\n 'BaseEnv',\n 'FakeEnv',\n]\n\n\nclass BaseEnv:\n def __init__(self):\n self.grid = Grid()\n self.current = None\n self.holding = None\n self.buffer = [None] * 5\n\n self.steps = 0\n self.combo = 0\n self.combo_max = 0\n\n self.lines = 0\n self.score = 0\n \n def get_state(self):\n return State(\n self.grid.data.copy(),\n self.current,\n self.holding,\n self.buffer,\n self.combo)\n\n def action_space(self):\n return Action.N\n\n def print(self):\n print(' ' + '==' * 10)\n self.grid.print()\n print(' ' + '--' * 10)\n print(' current: {}'.format(self.current))\n if self.__class__.__name__ is not 'RealEnv':\n print(' combo: {}'.format(self.combo))\n print(' combo_max: {}'.format(self.combo_max))\n print(' lines: {}'.format(self.lines))\n print(' score: {}'.format(self.score))\n print(' steps: {}'.format(self.steps))\n print(' ' + '==' * 10)\n\n def copy(self):\n env = self.__class__()\n env.grid = self.grid.copy()\n env.current = self.current\n env.holding = self.holding\n env.buffer = self.buffer.copy()\n env.steps = self.steps\n env.combo = self.combo\n env.combo_max = self.combo_max\n env.lines = self.lines\n env.score = self.score\n return env\n\n\n\nclass FakeEnv(BaseEnv):\n def __init__(self, max_steps=None):\n super().__init__()\n self.max_steps = np.inf if max_steps is None else max_steps\n self.end = False\n\n self.blocks = self._block_gen()\n self.current = next(self.blocks)\n self.holding = next(self.blocks)\n self.buffer = [next(self.blocks) for _ in range(5)]\n\n self.combo_init = False\n\n def step(self, action):\n state, reward = self._step(action)\n return state, reward\n\n def coroutine(self):\n action = yield self.get_state()\n while True:\n action = yield self.step(action)\n \n def random_init(self):\n height = random.randint(3, 18)\n top = 20 - height\n rand = np.random.random((height, 10))\n self.grid.data[top:] = (rand > 0.2).astype(np.int8)\n self.grid.clear()\n \n def _block_gen(self, full=True):\n symbols = Block.symbols.copy()\n random.shuffle(symbols)\n yield from symbols\n while True:\n yield random.choice(symbols)\n \n def _step(self, action):\n self.steps += 1\n if self.steps == self.max_steps:\n self.end = True\n\n # Swap current and holding if action.hold\n if getattr(action, 'hold', 0):\n self.current, self.holding = self.holding, self.current\n\n block = Block(self.current)\n block.rotate(action.rotate)\n block.move(action.move)\n\n # Test collision\n d = self.grid._drop(block)\n if d < 0 or block.top + d < 0:\n self.end = True\n self.grid.fill()\n return self.get_state(), Reward.END\n\n height_prev, holes_prev, hdiff_prev, vdiff_prev \\\n = analyze_grid(self.grid)\n\n self.grid.add(block)\n\n # Pop and push new block to buffer\n self.current = self.buffer.pop(0)\n self.buffer.append(next(self.blocks))\n\n # Analyze for reward\n lines_gain = self.grid.clear()\n combo_prev = self.combo\n score_gain = 0\n\n if lines_gain:\n if self.combo_init:\n # Start accumulating combo\n self.combo += 1\n self.combo_max = max(self.combo, self.combo_max)\n \n if lines_gain >= 4:\n score_gain += 4\n elif lines_gain > 1:\n score_gain += lines_gain - 1\n \n score_gain += min(self.combo + 1, 8) // 2\n \n self.score += score_gain\n self.grid.unpad(lines_gain)\n\n else:\n # Start accumulating combo next step\n self.combo_init = True\n\n self.lines += lines_gain\n \n else:\n self.combo_init = False\n self.combo = 0\n\n height, holes, hdiff, vdiff \\\n = analyze_grid(self.grid)\n\n if height >= 20:\n self.end = True\n self.grid.fill()\n return self.get_state(), Reward.END\n\n state = self.get_state()\n reward = Reward.compute(lines_gain,\n self.combo - combo_prev,\n score_gain,\n height,\n height - height_prev,\n holes - holes_prev,\n hdiff - hdiff_prev,\n vdiff - vdiff_prev)\n\n return state, reward\n","sub_path":"autotb/env/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"357831615","text":"import base64\nimport os\nimport time\n\nfrom appium import webdriver\n\n\ndef change_img_as_base64(img_name):\n with open(img_name, 'rb') as f:\n return base64.b64encode(f.read())\n\ncaps = {}\ncaps['platformName'] = 'Android'\ncaps['deviceName'] = 'Google Pixel'\ncaps['automationName'] = 'UiAutomator2'\ncaps['appPackage'] = 'com.android.gallery3d'\ncaps['appActivity'] = 'com.android.gallery3d.app.GalleryActivity'\n\ndriver = webdriver.Remote(\"http://localhost:4723/wd/hub\", caps)\n\n# 这里我死等待5s,是为了让效果更加明显一点,因为刚开始的时候它会弹出当前没有照片的toast\ntime.sleep(5)\n# 由于我选择的模拟器打开相册应用不会有其他界面,直接进入了显示照片的界面,所以我这里不需要操作\nimg = 'Android.jpg'\nimg_path = os.path.abspath(img)\ndevice_photo_path = '/sdcard/Pictures'\ndriver.push_file(device_photo_path+'/'+img, source_path=img)\n\n\n\n\n\n","sub_path":"edition2/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"132953812","text":"print('Welcome to the TicTacToe Game')\n\ngame = [['', '', ''],['', '', ''],['', '', '']]\n\nfirst_player_turn = True\nfirst_symbol = 'X'\nsecond_symbol = 'O'\n\n\ndef print_game(game):\n printout = ''\n for (row_index, row) in enumerate(game):\n for (cell_index, cell) in enumerate(row):\n printout += (' '+ cell if cell != '' else ' ') + (' |' if cell_index < len(row) - 1 else '')\n printout+= '\\n-----------\\n' if row_index < len(game) - 1 else '\\n'\n print(printout)\n\ndef play(game, symbol, position):\n positions = { '1': (0,0), '2': (0, 1), '3': (0, 2), '4': (1, 0), '5': (1, 1), '6': (1, 2), '7': (2, 0), '8': (2, 1), '9': (2, 2)}\n if (position in positions):\n pos = positions[position]\n if game[pos[0]][pos[1]] == '':\n game[pos[0]][pos[1]] = symbol\n return True\n return False\n\ndef has_finished(game):\n for index in range(0, 3):\n if game[index][0] == game[index][1] == game[index][2]:\n return game[index][0]\n if game[0][index] == game[1][index] == game[2][index]:\n return game[0][index]\n if (game[0][0] == game[1][1] == game[2][2]):\n return game[0][0]\n if (game[0][2] == game[1][1] == game[2][0]):\n return game[0][2]\n\n return ''\n\ndef ask_turn(game):\n global first_player_turn\n player_name = 'Player 1' if first_player_turn else 'Player 2'\n player_symbol = first_symbol if first_player_turn else second_symbol\n coord = input(f\"It's {player_name}'s turn to play. Enter a coordinate between 1 and 9:'\")\n success = play(game, player_symbol, coord)\n if success:\n first_player_turn = not first_player_turn\n\nprint_game(game)\nwhile has_finished(game) == '':\n ask_turn(game)\n print_game(game)\n\nwinner = 'Player 1' if has_finished(game) == 'X' else 'Player 2'\nprint(f'Bravo! The winner is {winner}')","sub_path":"04-Milestone Project - 1/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"572081772","text":"#!/usr/bin/env python3\n__author__='DGideas';\n#Release:Dorado\nimport sys;\nimport fileinput;\nimport codecs;\ni=1;\nu=codecs.open('utrain.txt','a','utf8');\nucache=[];\nwith open('train.txt') as f:\n\tfor line in f:\n\t\tsplit=line.split('\\t');\n\t\tif split[0] not in ucache:\n\t\t\tucache.append(split[0]);\n\t\t\tu.write(split[0]+'\\n');\n\t\t\tprint(str(i));\n\t\t\ti=i+1","sub_path":"fetchtrainuser.py","file_name":"fetchtrainuser.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"267349591","text":"#!/usr/bin/python3\n# Lab assignment 3: part 1\n\n\"\"\"\nData link: http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/\n\"\"\"\n\nimport csv\nimport numpy as np\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\n# Create empty variable to hold data for X and Y\nx_data = []\ny_data = []\n# Populate data into x_data and y_data\nwith open(\"forestfires.csv\") as f:\n csv_reader = csv.reader(f, delimiter=',')\n # Skip header\n next(csv_reader)\n for line in csv_reader:\n temp = float(line[8])\n relative_humidity = float(line[9])\n wind = float(line[10])\n rain = float(line[11])\n area = 1 if float(line[12]) > 0 else 0\n x_data.append([temp, relative_humidity, wind, rain])\n y_data.append(area)\n# Convert x_data and y_data into numpy array\nnp_x_data = np.array(x_data)\nnp_y_data = np.array(y_data)\n# Create and build the model\nmodel = LinearDiscriminantAnalysis()\nmodel.fit(np_x_data, np_y_data)\n# Perform prediction (may has a forest fire)\ntemp = 30\nrelative_humidity = 90\nwind = 8\nrain = 0.1\nprint(\"With temperature [%f] in Celcius, relative humidity [%f] of percent, wind speed [%f] in km/h, and rain [%f] in mm/m2\" % (temp, relative_humidity, wind, rain))\nif model.predict([[temp, relative_humidity, wind, rain]])[0]:\n print(\"It is likely to have a forest fire.\")\nelse:\n print(\"It is not likely to have a forest fire.\")\n# Perform prediction (may not have a forest fire)\ntemp = 5\nrelative_humidity = 20\nwind = 0.5\nrain = 5.8\nprint(\"With temperature [%f] in Celcius, relative humidity [%f] of percent, wind speed [%f] in km/h, and rain [%f] in mm/m2\" % (temp, relative_humidity, wind, rain))\nif model.predict([[temp, relative_humidity, wind, rain]])[0]:\n print(\"It is likely to have a forest fire.\")\nelse:\n print(\"It is not likely to have a forest fire.\")\n","sub_path":"labs/python/3/source/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"456132520","text":"def format_name(f_name, l_name):\n \"\"\"Take a first and last name and format it to return the title case version of the name.\"\"\"\n if f_name == \"\" or l_name == \"\":\n return\n formatted_f_name = f_name.title()\n formatted_l_name = l_name.title()\n return f\"{formatted_f_name} {formatted_l_name}\"\n\nprint(format_name(input(\"What is your first name? \"),input(\"What is your last name? \")))\n\n","sub_path":"Day 10/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"448952613","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.checkpoint import checkpoint as checkpoint_grads\nfrom torch.autograd import Function, set_grad_enabled, grad, gradcheck\nimport numpy as np\n\nfrom functools import reduce\nfrom operator import mul\n\n\nclass AffineCouplingBlock(nn.Module):\n def __init__(self,\n transform_type,\n memory_efficient=True,\n **kwargs):\n super().__init__()\n \n self.WN = transform_type(**kwargs)\n if memory_efficient:\n self.efficient_forward = AffineCouplingFunc.apply\n self.efficient_inverse = InvAffineCouplingFunc.apply\n self.param_list = list(self.WN.parameters())\n \n def forward(self, z, spect, speaker_ids=None):\n if hasattr(self, 'efficient_forward'):\n audio_out, log_s = self.efficient_forward(z, spect, speaker_ids, self.WN, *self.param_list)\n z.storage().resize_(0)\n return audio_out, log_s\n else:\n audio_0, audio_1 = z.chunk(2, 1)\n log_s, t = self.WN(audio_0, spect, speaker_ids)\n audio_1_out = audio_1 * log_s.exp() + t\n audio_out = torch.cat((audio_0, audio_1_out), 1)\n return audio_out, log_s\n \n def inverse(self, audio_out, spect, speaker_ids=None):\n if hasattr(self, 'efficient_inverse'):\n z, log_s = self.efficient_inverse(audio_out, spect, speaker_ids, self.WN, *self.param_list)\n audio_out.storage().resize_(0)\n return z, log_s\n else:\n audio_0_out, audio_1_out = audio_out.chunk(2, 1)\n log_s, t = self.WN(audio_0_out, spect, speaker_ids)\n audio_1 = (audio_1_out - t) / log_s.exp()\n z = torch.cat((audio_0_out, audio_1), 1)\n return z, -log_s\n\n\nclass AffineCouplingFunc(Function):\n @staticmethod\n def forward(ctx, z, spect, speaker_ids, F, *F_weights):\n ctx.F = F\n with torch.no_grad():\n audio_0, audio_1 = z.chunk(2, 1)\n audio_0, audio_1 = audio_0.contiguous(), audio_1.contiguous()\n \n log_s, t = F(audio_0, spect, speaker_ids)\n audio_1_out = audio_1 * log_s.exp() + t\n audio_0_out = audio_0\n audio_out = torch.cat((audio_0_out, audio_1_out), 1)\n \n ctx.save_for_backward(z.data, spect, speaker_ids, audio_out)\n return audio_out, log_s\n\n @staticmethod\n def backward(ctx, z_grad, log_s_grad):\n F = ctx.F\n z, spect, speaker_ids, audio_out = ctx.saved_tensors\n \n audio_0_out, audio_1_out = audio_out.chunk(2, 1)\n audio_0_out, audio_1_out = audio_0_out.contiguous(), audio_1_out.contiguous()\n dza, dzb = z_grad.chunk(2, 1)\n dza, dzb = dza.contiguous(), dzb.contiguous()\n \n with set_grad_enabled(True):\n audio_0 = audio_0_out\n audio_0.requires_grad = True\n log_s, t = F(audio_0, spect, speaker_ids)\n \n with torch.no_grad():\n s = torch.exp(log_s).half() # exp not implemented for fp16 therefore this is cast to fp32 by Nvidia/Apex\n audio_1 = (audio_1_out - t) / s # s is fp32 therefore audio_1 is cast to fp32.\n z.storage().resize_(reduce(mul, audio_1.shape) * 2) # z is fp16\n if z.dtype == torch.float16: # if z is fp16, cast audio_0 and audio_1 back to fp16.\n torch.cat((audio_0.half(), audio_1.half()), 1, out=z)#fp16 # .contiguous()\n else:\n torch.cat((audio_0, audio_1), 1, out=z) #fp32 # .contiguous()\n #z.copy_(xout) # .detach()\n \n with set_grad_enabled(True):\n param_list = [audio_0] + list(F.parameters())\n if ctx.needs_input_grad[1]:\n param_list += [spect]\n if ctx.needs_input_grad[2]:\n param_list += [speaker_ids]\n dtsdxa, *dw = grad(torch.cat((log_s, t), 1), param_list,\n grad_outputs=torch.cat((dzb * audio_1 * s + log_s_grad, dzb), 1))\n \n dxa = dza + dtsdxa\n dxb = dzb * s\n dx = torch.cat((dxa, dxb), 1)\n if ctx.needs_input_grad[1]:\n *dw, dy = dw\n else:\n dy = None\n if ctx.needs_input_grad[2]:\n *dw, ds = dw\n else:\n ds = None\n \n return (dx, dy, ds, None) + tuple(dw)\n\n\nclass InvAffineCouplingFunc(Function):\n @staticmethod\n def forward(ctx, audio_out, spect, speaker_ids, F, *F_weights):\n ctx.F = F\n with torch.no_grad():\n audio_0_out, audio_1_out = audio_out.chunk(2, 1)\n audio_0_out, audio_1_out = audio_0_out.contiguous(), audio_1_out.contiguous()\n \n log_s, t = F(audio_0_out, spect, speaker_ids)\n audio_1 = (audio_1_out - t) / log_s.exp()\n audio_0 = audio_0_out\n z = torch.cat((audio_0, audio_1), 1)\n \n ctx.save_for_backward(audio_out.data, spect, speaker_ids, z)\n return z, -log_s\n \n @staticmethod\n def backward(ctx, x_grad, log_s_grad):\n F = ctx.F\n audio_out, spect, speaker_ids, z = ctx.saved_tensors\n \n audio_0, audio_1 = z.chunk(2, 1)\n audio_0, audio_1 = audio_0.contiguous(), audio_1.contiguous()\n dxa, dxb = x_grad.chunk(2, 1)\n dxa, dxb = dxa.contiguous(), dxb.contiguous()\n \n with set_grad_enabled(True):\n audio_0_out = audio_0\n audio_0_out.requires_grad = True\n log_s, t = F(audio_0_out, spect, speaker_ids)\n s = log_s.exp()\n \n with torch.no_grad():\n audio_1_out = audio_1 * s + t\n \n audio_out.storage().resize_(reduce(mul, audio_1_out.shape) * 2)\n torch.cat((audio_0_out, audio_1_out), 1, out=audio_out)\n #audio_out.copy_(zout)\n \n with set_grad_enabled(True):\n param_list = [audio_0_out] + list(F.parameters())\n if ctx.needs_input_grad[1]:\n param_list += [spect]\n if ctx.needs_input_grad[2]:\n param_list += [speaker_ids]\n dtsdza, *dw = grad(torch.cat((-log_s, -t / s), 1), param_list,\n grad_outputs=torch.cat((dxb * audio_1_out / s.detach() + log_s_grad, dxb), 1))\n \n dza = dxa + dtsdza\n dzb = dxb / s.detach()\n dz = torch.cat((dza, dzb), 1)\n if ctx.needs_input_grad[1]:\n *dw, dy = dw\n else:\n dy = None\n if ctx.needs_input_grad[2]:\n *dw, ds = dw\n else:\n ds = None\n \n return (dz, dy, ds, None) + tuple(dw)\n\n\nclass InvertibleConv1x1(nn.Conv1d):\n \"\"\"\n The layer outputs both the convolution, and the log determinant\n of its weight matrix. If reverse=True it does convolution with\n inverse\n \"\"\"\n def __init__(self, c, memory_efficient=False):\n super().__init__(c, c, 1, bias=False) # init as nn.Conv1d(c, c, kernel_size=1, stride=1) \n \n # Sample a random orthonormal matrix to initialize weights\n W = torch.qr(torch.FloatTensor(c, c).normal_())[0]\n \n # Ensure determinant is 1.0 not -1.0\n if torch.det(W) < 0:\n W[:,0] = -1*W[:,0]\n W = W.view(c, c, 1)\n self.weight.data = W\n \n if memory_efficient:\n self.efficient_forward = Conv1x1Func.apply\n #self.efficient_inverse = InvConv1x1Func.apply # memory efficient Inverse is not needed and it's making fp16 more complicated so I'm ignoring it for now.\n \n def forward(self, z):\n if hasattr(self, 'efficient_forward'):\n audio_out, log_det_W = self.efficient_forward(z, self.weight)\n z.storage().resize_(0)\n return audio_out, log_det_W\n else:\n *_, n_of_groups = z.shape# [B, C, T//n_group]\n log_det_W = n_of_groups * self.weight.squeeze().float().slogdet()[1] # should fix nan logdet\n audio_out = super().forward(z)\n return audio_out, log_det_W\n \n def inverse(self, audio_out):\n W = self.weight.squeeze()\n if not hasattr(self, 'W_inverse'):\n W_inverse = W.float().inverse().unsqueeze(-1)\n if audio_out.dtype == torch.float16:\n W_inverse = W_inverse.half()\n self.W_inverse = W_inverse\n \n if hasattr(self, 'efficient_inverse'):\n z, log_det_W = self.efficient_inverse(audio_out, self.weight)\n audio_out.storage().resize_(0)\n return z, log_det_W\n else:\n log_det_W = None\n #*_, n_of_groups = audio_out.shape\n #log_det_W = -n_of_groups * weight.slogdet()[1] # should fix nan logdet\n z = F.conv1d(audio_out, self.W_inverse, bias=None, stride=1, padding=0)\n return z, log_det_W\n\n\nclass Conv1x1Func(Function):\n @staticmethod\n def forward(ctx, z, weight):\n with torch.no_grad():\n *_, n_of_groups = z.shape\n if weight.dtype == torch.float16:\n log_det_W = n_of_groups * weight.squeeze().float().slogdet()[1].half()\n else:\n log_det_W = n_of_groups * weight.squeeze().slogdet()[1]\n audio_out = F.conv1d(z, weight)\n \n ctx.save_for_backward(z.data, weight, audio_out)\n return audio_out, log_det_W\n \n @staticmethod\n def backward(ctx, z_grad, log_det_W_grad):\n z, weight, audio_out = ctx.saved_tensors\n *_, n_of_groups = audio_out.shape\n \n with torch.no_grad():\n if weight.dtype == torch.float16:\n inv_weight = weight.squeeze().float().inverse().half()\n else:\n inv_weight = weight.squeeze().inverse()\n z.storage().resize_(reduce(mul, audio_out.shape))\n z[:] = F.conv1d(audio_out, inv_weight.unsqueeze(-1))\n \n dx = F.conv1d(z_grad, weight[..., 0].t().unsqueeze(-1))\n dw = z_grad.transpose(0, 1).contiguous().view(weight.shape[0], -1) @ z.transpose(1, 2).contiguous().view(\n -1, weight.shape[1])\n dw += inv_weight.t() * log_det_W_grad * n_of_groups\n \n return dx, dw.unsqueeze(-1)\n\n\nclass InvConv1x1Func(Function):\n @staticmethod\n def forward(ctx, z, inv_weight, weight):\n with torch.no_grad():\n squ_weight = weight.squeeze()\n *_, n_of_groups = z.shape\n if squ_weight.dtype == torch.float16:\n log_det_W = -squ_weight.float().slogdet()[1].half() * n_of_groups\n audio_out = F.conv1d(z, squ_weight.inverse().unsqueeze(-1).half())\n else:\n log_det_W = -squ_weight.slogdet()[1] * n_of_groups\n audio_out = F.conv1d(z, squ_weight.inverse().unsqueeze(-1))\n \n ctx.save_for_backward(z.data, weight, audio_out)\n return audio_out, log_det_W\n \n @staticmethod\n def backward(ctx, z_grad, log_det_W_grad):\n z, weight, audio_out = ctx.saved_tensors\n *_, n_of_groups = audio_out.shape\n \n with torch.no_grad():\n z.storage().resize_(reduce(mul, audio_out.shape))\n z[:] = F.conv1d(audio_out, weight)\n \n weight = weight.squeeze()\n weight_T = weight.inverse().t()\n dx = F.conv1d(z_grad, weight_T.unsqueeze(-1))\n dw = z_grad.transpose(0, 1).contiguous().view(weight_T.shape[0], -1) @ \\\n z.transpose(1, 2).contiguous().view(-1, weight_T.shape[1])\n dinvw = - weight_T @ dw @ weight_T\n dinvw -= weight_T * log_det_W_grad * n_of_groups\n \n return dx, dinvw.unsqueeze(-1)","sub_path":"CookieTTS/_2_ttm/untts/waveglow/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":11859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"269754517","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\nfrom causality.causalvec.models.base import BaseModel\nfrom causality.causalvec.models.base import BaseData\nfrom time import time\n# import os\n# os.chdir(\"/home/xiezp/KE_Projects/CausalEmbedding/\")\n\n\nclass AttData(BaseData):\n def __init__(self, sample_neg_randomly, num_samples=None):\n BaseData.__init__(self, sample_neg_randomly, num_samples)\n\n def sample_negative(self, batch_size):\n neg_left, neg_right = self.neg_sent(batch_size)\n neg_labels = []\n L = len(neg_left)\n for i in range(L):\n neg_labels.append(0.0)\n left_len, right_len = self.get_len(neg_left, neg_right)\n padded_left, padded_right = self.padding_data(neg_left, neg_right)\n # del neg_left, neg_right\n return list(zip(padded_left, padded_right, neg_labels, left_len, right_len))\n\n def generate_pos_data(self):\n pos_labels = []\n L = len(self.x_left)\n for i in range(L):\n pos_labels.append(1.0)\n padded_left, padded_right = self.padding_data(self.x_left, self.x_right)\n left_len, right_len = self.get_len(self.x_left, self.x_right)\n return list(zip(padded_left, padded_right, pos_labels, left_len, right_len))\n # return list(zip(padded_left, padded_right, pos_mask))\n\n def generate_mixed_data(self):\n left_len, right_len = self.get_len(self.x_left, self.x_right)\n padded_left, padded_right = self.padding_data(self.x_left, self.x_right)\n return list(zip(padded_left, padded_right, self.x_target, left_len, right_len))\n\n\nclass AttModel(BaseModel):\n def __init__(self, embedding_size, batch_size, num_epochs, num_samples, learning_rate, data_loader):\n BaseModel.__init__(self, embedding_size, batch_size, num_epochs, num_samples, learning_rate, data_loader)\n self.alpha, self.gamma = None, None\n\n def train_stage(self, cause_output_path, effect_output_path):\n print('model: Attentive started!\\n')\n with self.sess.as_default():\n if self.sample_neg_randomly:\n data = self.dataLoader.generate_pos_data()\n else:\n data = self.dataLoader.generate_mixed_data()\n for current_epoch in range(self.num_epochs):\n print('current epoch: {} started.'.format(current_epoch+1))\n start_time = time()\n train_batches = self.generate_batches(data, self.batch_size)\n for per_batch in train_batches:\n if self.sample_neg_randomly:\n neg_data = self.dataLoader.sample_negative(self.batch_size)\n new_data = np.concatenate([per_batch, np.array(neg_data)], 0)\n # mixed_data = self.shuffle_pos_neg(pos_batch, neg_data)\n input_left, input_right, input_labels, left_len, right_len = zip(*new_data)\n else:\n input_left, input_right, input_labels, left_len, right_len = zip(*per_batch)\n\n feed_dict = {\n self.input_left: np.array(input_left),\n self.input_right: np.array(input_right),\n self.targets: input_labels,\n self.left_len: left_len,\n self.right_len: right_len,\n self.alpha: 0.8,\n self.gamma: 2.0\n }\n self.show_loss(feed_dict)\n acc = self.accuracy()\n print('accuracy at epoch:{} is {}.'.format(current_epoch+1, acc))\n if current_epoch % 10 == 0 and current_epoch != 0:\n self.write_embedding(cause_output_path, effect_output_path, str(current_epoch+1))\n end_time = time()\n print('epoch: {} uses {} mins.\\n'.format(current_epoch+1, float(end_time-start_time)/60))\n\n def construct_graph(self):\n\n with self.graph.as_default():\n session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)\n session_conf.gpu_options.allow_growth = True\n self.sess = tf.Session(config=session_conf)\n\n self.input_left = tf.placeholder(tf.int32, [None, self.max_len])\n self.input_right = tf.placeholder(tf.int32, [None, self.max_len])\n self.left_len = tf.placeholder(tf.float32, [None, ])\n self.right_len = tf.placeholder(tf.float32, [None, ])\n self.targets = tf.placeholder(tf.float32, [None, ])\n self.alpha = tf.placeholder(tf.float32, name='alpha')\n self.gamma = tf.placeholder(tf.float32, name='gamma')\n self.init_embedding()\n\n self.input_left_embed = tf.nn.embedding_lookup(self.cause_embed_dict, self.input_left)\n self.input_right_embed = tf.nn.embedding_lookup(self.effect_embed_dict, self.input_right)\n left_mask = tf.sequence_mask(self.left_len, self.max_len, dtype=tf.float32)\n right_mask = tf.sequence_mask(self.right_len, self.max_len, dtype=tf.float32)\n mask_matrix = tf.matmul(tf.expand_dims(left_mask, 2), tf.expand_dims(right_mask, 1))\n\n logits = self.make_attention(self.input_left_embed, self.input_right_embed)\n\n # 按行按列做softmax\n softmax_1, softmax_2 = self.mask_softmax(logits, mask_matrix)\n left_attentive = tf.matmul(tf.transpose(softmax_1, [0, 2, 1]), self.input_left_embed) # (batch, r, dims)\n right_attentive = tf.matmul(softmax_2, self.input_right_embed) # (batch, l, dims)\n right_interaction = tf.reduce_sum(left_attentive * self.input_right_embed, axis=2) # (batch, r)\n left_interaction = tf.reduce_sum(right_attentive * self.input_left_embed, axis=2) # (batch, l)\n\n right_probs = tf.clip_by_value(\n tf.reduce_max(tf.sigmoid(right_interaction) * right_mask, 1), 1e-5, 1.0 - 1e-5\n ) # (batch,)\n left_probs = tf.clip_by_value(\n tf.reduce_max(tf.sigmoid(left_interaction) * left_mask, 1), 1e-5, 1.0 - 1e-5\n ) # (batch,)\n\n left_pos_fl = tf.reduce_sum(\n -self.alpha * tf.pow(1 - left_probs, self.gamma) * tf.log(left_probs)*self.targets\n )\n right_pos_fl = tf.reduce_sum(\n -self.alpha * tf.pow(1 - right_probs, self.gamma) * tf.log(right_probs)*self.targets\n )\n\n _pro = tf.clip_by_value(tf.sigmoid(logits), 1e-5, 1.0 - 1e-5)\n _3d_focal = (self.alpha - 1) * tf.pow(_pro, self.gamma) * tf.log(1 - _pro) * mask_matrix\n neg_fl = tf.reduce_sum(tf.reduce_sum(tf.reduce_sum(_3d_focal, axis=1), axis=1)*(1.0-self.targets))\n # neg_fl = tf.reduce_sum(\n # ((self.alpha - 1) * tf.pow(_pro, self.gamma) * tf.log(1 - _pro) * mask_matrix)*(1.0-self.targets)\n # )\n\n self.loss = tf.reduce_sum([left_pos_fl, right_pos_fl, neg_fl])\n # self.loss = tf.reduce_sum(tf.maximum(0.0, 1.0-pos_logits+neg_logits))\n\n # self.calculate_similar()\n self.global_steps = tf.Variable(0, trainable=False)\n self.train_op = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(\n self.loss, global_step=self.global_steps)\n self.init = tf.global_variables_initializer()\n self.sess.run(self.init)","sub_path":"causalvec/models/att.py","file_name":"att.py","file_ext":"py","file_size_in_byte":7472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"573118593","text":"from distutils.core import setup\nfrom setuptools import find_packages\n\n\nfiles = [\"Commercial/*\",\"Payment/*\",\"Util/*\",\"Commercial/ECRM/*\",\"Payment/Transfermovil/*\",\"Util/APIDevice/*\"]\n\nsetup(\n name = 'etecsa-sdk',\n packages = ['EtecsaSDK'],\n package_data = {'EtecsaSDK' : files },\n \n version = '1.5', \n license='MIT', \n description = 'Etecsa SDK', \n author = 'sebastian',\n author_email = 'sebastian.rodriguez@etecsa.cu', \n url = 'https://github.com/sebastiancuba/etecsa-sdk', \n download_url = 'https://github.com/sebastiancuba/etecsa-sdk/archive/v1.5.tar.gz', \n keywords = ['sdk'], \n install_requires=[\n 'requests',\n 'validators',\n 'pendulum',\n 'pyyaml',\n 'ua-parser',\n 'user-agents'\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.8',\n ],\n)\n","sub_path":"pypi_install_script/etecsa-sdk-1.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"150622217","text":"import json\nimport os\n\nimport requests\n\nfrom .error import DeadlineExceededError, XBusError\nfrom .ldict import LDict\n\n\nclass Config(object):\n def __init__(self, name, value, version, tag=None):\n self.name = name\n self.value = value\n self.version = version\n self.tag = tag\n\n @classmethod\n def from_dict(cls, d):\n return cls(d['name'], d['value'], d['version'], d.get('tag', None))\n\n def __repr__(self):\n return '' % (self.name, self.version)\n\n def dump(self):\n return dict(name=self.name,\n value=self.value,\n version=self.version,\n tag=self.tag)\n\n\nclass Configs(object):\n def __init__(self, total, configs, skip, limit):\n self.total = total\n self.configs = configs\n self.skip = skip\n self.limit = limit\n\n def __len__(self):\n return len(self.configs)\n\n def __iter__(self):\n for config in self.configs:\n yield config\n\n\nclass ConfigMix(object):\n def __init__(self):\n self._config_revisions = LDict(True)\n super(ConfigMix, self).__init__()\n\n def list_config(self, tag='', prefix='', skip=None, limit=None):\n url = '/api/configs?tag=%s&prefix=%s' % (tag, prefix)\n if skip is not None:\n url += '&skip=%d' % skip\n if limit is not None:\n url += '&limit=%s' % limit\n result = self._request('GET', url)\n return Configs(result['total'], result['configs'], result['skip'],\n result['limit'])\n\n def get_configs(self, *keys):\n url = '/api/configs?keys=%s' % json.dumps(keys)\n result = self._request('GET', url)\n return {\n item['name']: Config.from_dict(item)\n for item in result['configs']\n }\n\n def get_config(self, name):\n result = self._request('GET', '/api/configs/%s' % name)\n self._config_revisions[name] = result['revision']\n return Config.from_dict(result['config'])\n\n def put_config(self, name, value, version=None, tag=None, remark=None):\n data = dict(value=value)\n if version:\n data['version'] = version\n if tag:\n data['tag'] = tag\n if remark:\n data['remark'] = remark\n result = self._request('PUT', '/api/configs/%s' % name, data=data)\n self._config_revisions[name] = result['revision']\n\n def del_config(self, name):\n self._request('DELETE', '/api/configs/%s' % name)\n\n def watch_config(self, name, revision=None, timeout=None):\n params = dict(watch='true')\n if revision is None:\n revision = self._cofig_revisions.get(name, 0)\n if revision:\n revision += 1\n if revision:\n params['revision'] = revision\n if timeout:\n params['timeout'] = timeout\n\n while True:\n try:\n result = self._request('GET',\n '/api/configs/%s' % name,\n params=params)\n except DeadlineExceededError:\n if timeout:\n return\n continue\n self._config_revisions[name] = result['revision']\n return Config.from_dict(result['config'])\n\n\nclass ServiceEndpoint(object):\n def __init__(self, address, config=None):\n self.address = address\n self.config = config\n\n def dump(self):\n d = dict(address=self.address)\n if self.config:\n d['config'] = self.config\n return d\n\n def __repr__(self):\n return '' % self.address\n\n\nclass ZoneService(object):\n def __init__(self,\n service,\n type,\n zone='default',\n proto=None,\n description=None,\n endpoints=None,\n **kwargs):\n self.service = service\n self.type = type\n self.zone = zone\n self.proto = proto\n self.description = description\n self.endpoints = []\n if endpoints:\n for endpoint in endpoints:\n if isinstance(endpoint, ServiceEndpoint):\n self.endpoints.append(endpoint)\n else:\n self.endpoints.append(ServiceEndpoint(**endpoint))\n\n def dump(self):\n return dict(service=self.service,\n type=self.type,\n proto=self.proto,\n description=self.description,\n endpoints=[e.dump() for e in self.endpoints])\n\n\nclass Service(object):\n def __init__(self, service, zones=None):\n self.service = service\n self.zones = {}\n if zones:\n for zone, service in zones.items():\n if isinstance(service, ZoneService):\n self.zones[zone] = service\n else:\n self.zones[zone] = ZoneService(**service)\n\n def dump(self):\n return {\n 'service': self.service,\n 'zones': {k: v.dump()\n for k, v in self.zones.items()}\n }\n\n\nclass ServiceMix(object):\n def __init__(self):\n self._service_revisions = LDict(True, default=0)\n self._lease_ids = LDict(default=None)\n\n def get_service(self, service):\n result = self._request('GET', '/api/v1/services/%s' % service)\n self._service_revisions[service] = result['revision']\n return Service(**result['service'])\n\n def search_service(self, name, skip=0, limit=20):\n result = self._request(\n 'GET',\n '/api/v1/services?q=%s&skip=%d&limit=%d' % (name, skip, limit))\n return result\n\n def plug_service(self, service, endpoint, ttl=None, lease_id=None):\n assert isinstance(service, ZoneService)\n data = dict(desc=json.dumps(service.dump()),\n endpoint=json.dumps(endpoint.dump()))\n if ttl:\n data['ttl'] = ttl\n if lease_id:\n data['lease_id'] = lease_id\n result = self._request('POST',\n '/api/v1/services/%s' % service.service,\n data=data)\n self._lease_ids[service.service] = lease_id = result['lease_id']\n return result\n\n def delete_service(self, service, zone=None):\n self._request('DELETE',\n '/api/v1/services/%s?zone=%s' % (service, zone or ''))\n self._service_revisions.pop(service, None)\n self._lease_ids.pop(service, None)\n\n def plug_services(self, services, endpoint, ttl=None, lease_id=None):\n for service in services:\n assert isinstance(service, ZoneService)\n data = dict(endpoint=endpoint, desces=[x.dump() for x in services])\n if ttl:\n data['ttl'] = ttl\n if lease_id:\n data['lease_id'] = lease_id\n result = self._request('POST', '/api/v1/services', data=data)\n lease_id = result['lease_id']\n for service in services:\n self._lease_ids[service.service] = lease_id\n return result\n\n def keepalive_service(self, service):\n lease_id = self._lease_ids[service]\n if lease_id is None:\n raise Exception('%s is not pulgged' % service)\n self._request('POST', '/api/leases/%d' % lease_id)\n\n def watch_service(self, service, revision=None, timeout=None):\n params = dict(watch='true')\n if revision is None:\n revision = self._service_revisions[service]\n if revision:\n revision += 1\n if revision:\n params['revision'] = revision\n if timeout:\n params['timeout'] = timeout\n while True:\n try:\n result = self._request('GET',\n '/api/v1/services/%s' % service,\n params=params)\n except DeadlineExceededError:\n if timeout:\n return\n continue\n self._service_revisions[service] = result['revision']\n return Service.from_dict(service, result['service'])\n\n def service_session(self, ttl=None):\n return ServiceSession(self, ttl)\n\n\nclass ServiceSession(object):\n def __init__(self, client, ttl=None):\n self.client = client\n self.ttl = ttl\n self.lease_id = None\n\n def _wrap_call(self, f, *argv, **kwargs):\n if self.lease_id is not None:\n kwargs['lease_id'] = self.lease_id\n result = f(*argv, **kwargs)\n if result['lease_id'] != self.lease_id:\n raise Exception('new lease generated')\n else:\n if self.ttl is not None:\n kwargs['ttl'] = self.ttl\n result = f(*argv, **kwargs)\n self.lease_id = result['lease_id']\n self.ttl = result['ttl']\n\n def plug_service(self, service, endpoint, **kwargs):\n self._wrap_call(self.client.plug_service, service, endpoint, **kwargs)\n\n def plug_services(self, services, endpoint, **kwargs):\n self._wrap_call(self.client.plug_services, services, endpoint,\n **kwargs)\n\n def keepalive(self):\n if self.lease_id is not None:\n self.client.keepalive_lease(self.lease_id)\n\n def close(self):\n if self.lease_id is not None:\n self.client.revoke_lease(self.lease_id)\n self.lease_id = None\n\n\nclass AppMix(object):\n def list_app(self, skip=None, limit=20):\n params = {'limit': limit}\n if skip is not None:\n params['skip'] = skip\n result = self._request('GET', '/api/apps', params=params)\n return result\n\n def add_app(self, name, description, key_bits=2048, days=3650):\n data = dict(name=name,\n description=description,\n key_bits=key_bits,\n days=days)\n result = self._request('PUT', '/api/apps', data=data)\n return result\n\n\nclass XBusClient(ConfigMix, ServiceMix, AppMix):\n def __init__(self,\n endpoint,\n cert=None,\n key=None,\n dev_app=None,\n verify=None):\n if not dev_app:\n if key is None and cert is None:\n app_name = os.environ.get('APP_NAME', None)\n if app_name:\n dev_app = app_name\n if verify is None and os.path.exists('cacert.pem'):\n verify = 'cacert.pem'\n self.endpoint = endpoint\n self.cert = cert\n self.key = key\n self.verify = verify\n self.dev_app = dev_app\n super(XBusClient, self).__init__()\n\n def _request(self, method, path, params=None, data=None):\n headers = {}\n if self.dev_app:\n headers['Dev-App'] = self.dev_app\n rep = requests.request(method,\n self.endpoint + path,\n params=params,\n data=data,\n cert=(self.cert, self.key),\n verify=self.verify,\n headers=headers)\n result = rep.json()\n if result['ok']:\n return result.get('result', None)\n raise XBusError.new_error(result['error']['code'],\n result['error'].get('message', None))\n\n def revoke_lease(self, lease_id):\n self._request('DELETE', '/api/leases/%d' % lease_id)\n\n def keepalive_lease(self, lease_id):\n self._request('POST', '/api/leases/%d' % lease_id)\n","sub_path":"python/xbus/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":11705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"502892504","text":"import numpy as np\n\nfrom td3fd.ddpg.ddpg import DDPG\n\ndefault_params = {\n # config summary\n \"config\": \"default\",\n # environment config\n \"env_name\": \"FetchReach-v1\",\n \"r_scale\": 1.0, # scale the reward of the environment down\n \"r_shift\": 0.0, # shift the reward of the environment up\n \"eps_length\": 0, # overwrite the default length of the episode provided in _max_episode_steps\n \"env_args\": {}, # extra arguments passed to the environment\n \"fix_T\": True, # whether or not to fix episode length for all rollouts (if false, then use the ring buffer)\n # DDPG config\n \"ddpg\": {\n \"num_epochs\": 10,\n \"num_cycles\": 10, # per epoch\n \"num_batches\": 40, # training batches per cycle\n # replay buffer setup\n \"buffer_size\": int(1e6),\n # actor critic networks\n \"scope\": \"ddpg\",\n \"use_td3\": 1, # whether or not to use td3\n \"layer_sizes\": [256, 256, 256], # number of neurons in each hidden layer\n \"q_lr\": 0.001, # critic learning rate\n \"pi_lr\": 0.001, # actor learning rate\n \"action_l2\": 1.0, # quadratic penalty on actions (before rescaling by max_u)\n \"batch_size\": 256, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.\n # double q learning\n \"polyak\": 0.95, # polyak averaging coefficient for double q learning\n # use demonstrations\n \"sample_demo_buffer\": 0, # whether or not to sample from demonstration buffer\n \"batch_size_demo\": 128, # number of samples to be used from the demonstrations buffer, per mpi thread\n \"use_demo_reward\": 0, # whether or not to assume that demonstrations have rewards, and train it on the critic\n \"num_demo\": 0, # number of expert demo episodes\n \"demo_strategy\": \"none\", # choose between [\"none\", \"bc\", \"nf\", \"gan\"]\n \"bc_params\": {\n \"q_filter\": 1, # whether or not a Q value filter should be used on the actor outputs\n \"prm_loss_weight\": 0.001, # weight corresponding to the primary loss\n \"aux_loss_weight\": 0.0078, # weight corresponding to the auxilliary loss (also called the cloning loss)\n },\n \"shaping_params\": {\n \"batch_size\": 128, # batch size for training the potential function (gan and nf)\n \"num_epochs\": int(1e3),\n \"nf\": {\n \"num_ens\": 1, # number of nf ensembles\n \"nf_type\": \"maf\", # choose between [\"maf\", \"realnvp\"]\n \"lr\": 1e-4,\n \"num_masked\": 2, # used only when nf_type is set to realnvp\n \"num_bijectors\": 6, # number of bijectors in the normalizing flow\n \"layer_sizes\": [512, 512], # number of neurons in each hidden layer\n \"prm_loss_weight\": 1.0,\n \"reg_loss_weight\": 500.0,\n \"potential_weight\": 5.0,\n },\n \"gan\": {\n \"num_ens\": 1, # number of gan ensembles\n \"layer_sizes\": [256, 256], # number of neurons in each hidden layer (both generator and discriminator)\n \"latent_dim\": 6, # generator latent space dimension\n \"gp_lambda\": 0.1, # weight on gradient penalty (refer to WGAN-GP)\n \"critic_iter\": 5,\n \"potential_weight\": 3.0,\n },\n },\n # normalize observation\n \"norm_eps\": 0.01, # epsilon used for observation normalization\n \"norm_clip\": 5, # normalized observations are cropped to this values\n # i/o clippings\n \"clip_obs\": 200.0,\n \"clip_pos_returns\": False, # whether or not this environment has positive return.\n \"clip_return\": False,\n },\n # rollouts config\n \"rollout\": {\n \"rollout_batch_size\": 4,\n \"noise_eps\": 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u\n \"polyak_noise\": 0.0, # use polyak_noise * last_noise + (1 - polyak_noise) * curr_noise\n \"random_eps\": 0.3, # percentage of time a random action is taken\n \"compute_q\": False,\n \"history_len\": 10, # make sure that this is same as number of cycles\n },\n \"evaluator\": {\n \"rollout_batch_size\": 20,\n \"noise_eps\": 0.0,\n \"polyak_noise\": 0.0,\n \"random_eps\": 0.0,\n \"compute_q\": True,\n \"history_len\": 1,\n },\n \"seed\": 0,\n}\n\n\ndef configure_ddpg(params):\n # Extract relevant parameters.\n ddpg_params = params[\"ddpg\"]\n\n # Update parameters\n ddpg_params.update(\n {\n \"max_u\": params[\"max_u\"],\n \"input_dims\": params[\"dims\"].copy(), # agent takes an input observations\n \"eps_length\": params[\"eps_length\"],\n \"fix_T\": params[\"fix_T\"],\n \"clip_return\": (1.0 / (1.0 - params[\"gamma\"])) if params[\"ddpg\"][\"clip_return\"] else np.inf,\n \"gamma\": params[\"gamma\"],\n \"info\": {\n \"env_name\": params[\"env_name\"],\n \"r_scale\": params[\"r_scale\"],\n \"r_shift\": params[\"r_shift\"],\n \"eps_length\": params[\"eps_length\"],\n \"env_args\": params[\"env_args\"],\n },\n }\n )\n policy = DDPG(**ddpg_params)\n return policy\n\n","sub_path":"Package/td3fd/td3fd/ddpg/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"192877595","text":"import gzip\nimport json\nimport struct\n\nfrom MediaPlayer.Subtitles.SubtitleSourceBase import SubtitleSourceBase\nfrom Shared.Logger import Logger, LogVerbosity\nfrom Shared.Network import RequestFactory\n\n\nclass SubtitlesOpenSubtitles(SubtitleSourceBase):\n def __init__(self):\n super().__init__()\n\n def get_subtitles(self, size, file_length, file_name, first_64k, last_64k):\n result_raw = RequestFactory.make_request(\"https://rest.opensubtitles.org/search/moviebytesize-\" + str(size) + \"/moviehash-\" + str(self.get_hash(size, first_64k, last_64k))\n + \"/sublanguageid-eng\", \"GET\", useragent=\"mediaplayerjk\")\n if not result_raw:\n Logger().write(LogVerbosity.Info, \"Failed to get subtitles\")\n return []\n\n result = json.loads(result_raw.decode('utf8'))\n paths = []\n\n results_correct_name = [x for x in result if x['MovieReleaseName'] in file_name]\n Logger().write(LogVerbosity.Debug, \"Subs with correct name (\" + file_name + \"): \" + str(len(results_correct_name)))\n added = 0\n for sub in results_correct_name:\n path = self.download_sub(sub)\n paths.append(path)\n added += 1\n if added == 2:\n break\n\n results_correct_size = [x for x in result if abs(int(x['MovieTimeMS']) - file_length) < 10]\n Logger().write(LogVerbosity.Debug, \"Subs with correct size (\" + str(file_length) + \"): \" + str(len(results_correct_size)))\n added = 0\n for sub in results_correct_size:\n path = self.download_sub(sub)\n paths.append(path)\n added += 1\n if added == 2:\n break\n\n results_other = [x for x in result if x not in results_correct_size and x not in results_correct_name]\n Logger().write(LogVerbosity.Debug, \"Subs other: \" + str(len(results_other)))\n added = 0\n for sub in results_other:\n path = self.download_sub(sub)\n paths.append(path)\n added += 1\n if added == 2:\n break\n return paths\n\n @staticmethod\n def download_sub(sub):\n download_link = sub['SubDownloadLink']\n download_result = RequestFactory.make_request(download_link)\n sub_act_data = gzip.decompress(download_result)\n return SubtitleSourceBase.save_file(sub['IDSubtitleFile'], sub_act_data)\n\n @staticmethod\n def get_hash(size, first, last):\n longlongformat = '2} | {:^15} | {:^3} | {:^3} | {:^3} |\".format(\n 'i',\n 'Name',\n '*C',\n '*H',\n '*W'))\n print('----------------------------------------')\n for i, op in enumerate(operation_set):\n print(\"{:>2} | {:<15} | {:^3} | {:^3} | {:^3} |\".format(\n i,\n op.name,\n op.mod[1],\n op.mod[2],\n op.mod[3],\n ))\n return operation_set\n\n\n# === OPERATIONS =======================================================================================================\n# build 2d grouped convolution from input channels, output channels, kernel_size, and stride\n# just a handy shortcut to avoid the lengthy function call each time\ndef conv2d(c_in, c_out, k, s=1):\n return nn.Conv2d(c_in,\n c_out,\n kernel_size=k,\n stride=s,\n padding=padsize(k=k, s=s),\n groups=c_in,\n bias=False)\n\n\n# operation object\nOperation = namedtuple('Operation', ['name', 'function', 'mod'])\n\n\n# build operation object\ndef build_operation(name, function, mod=None):\n if mod is None:\n mod = [1, 1, 1]\n mod = [1] + mod\n return Operation(name=name, function=function, mod=mod)\n\n\n# === CELL VISUALIZER ==================================================================================================\ndef cell_visualizer(cell):\n if not viz:\n return None\n G = graphviz.Digraph()\n for key, val in cell.items():\n a, b = str(key[0]), str(key[1])\n G.node(a)\n G.node(b)\n for op in val:\n G.edge(a,b,label=op)\n return G\n\n\n# === OUTPUT FUNCTIONS =================================================================================================\n# sample classification output module\nclass Classifier(nn.Module):\n def __init__(self, dim, output_size):\n super().__init__()\n self.pool = nn.AdaptiveAvgPool2d(1)\n self.linear = nn.Linear(int(dim[1]), output_size)\n\n def forward(self, x):\n x = self.pool(x)\n x = x.view(x.size(0), -1)\n return self.linear(x)\n\n\n# sample regression output module\nclass Regressor(nn.Module):\n def __init__(self, dim, output_size):\n super().__init__()\n self.pool = nn.AdaptiveAvgPool2d(1)\n self.linear = nn.Linear(int(dim[1]), output_size)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x = self.pool(x)\n x = x.view(x.size(0), -1)\n x = self.linear(x)\n return self.sigmoid(x)\n\n\n# convert output module into curried function for input into the model encoder\ndef build_output(output_type, output_size):\n return lambda dim: output_type(dim, output_size=output_size)\n\n\n# === SOME COMMON OPERATIONS PROVIDED ==================================================================================\nIdentity = build_operation('Identity', lambda c: nn.Sequential())\nReLU = build_operation('ReLU', lambda c: nn.ReLU())\nBatchNorm = build_operation('BatchNorm', lambda c: nn.BatchNorm2d(c, affine=True))\nConv1x1 = build_operation('Conv_3x3', lambda c: conv2d(c, c, k=1))\nConv3x3 = build_operation('Conv3x3', lambda c: conv2d(c, c, k=3))\nMaxPool3x3 = build_operation('Max_Pool3x3', lambda c: nn.MaxPool2d(3, stride=1, padding=padsize(s=1)))\n\n\n","sub_path":"model_encoder/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"640355665","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2005, TUBITAK/UEKAE\n#\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 2 of the License, or (at your\n# option) any later version. Please read the COPYING file.\n#\n\nimport os\nimport array\nimport fcntl\nimport struct\nimport socket\nimport csapi\nimport popen2\nfrom glob import glob\n\nclass ifconfig:\n \"\"\" ioctl stuff \"\"\"\n\n IFNAMSIZ = 16 # interface name size\n\n # From \n\n SIOCGIFADDR = 0x8915 # get PA address\n SIOCGIFBRDADDR = 0x8919 # get broadcast PA address\n SIOCGIFCONF = 0x8912 # get iface list\n SIOCGIFFLAGS = 0x8913 # get flags\n SIOCGIFMTU = 0x8921 # get MTU size\n SIOCGIFNETMASK = 0x891b # get network PA mask\n SIOCSIFADDR = 0x8916 # set PA address\n SIOCSIFBRDADDR = 0x891a # set broadcast PA address\n SIOCSIFFLAGS = 0x8914 # set flags\n SIOCSIFMTU = 0x8922 # set MTU size\n SIOCSIFNETMASK = 0x891c # set network PA mask\n\n # From \n\n IFF_UP = 0x1 # Interface is up.\n IFF_BROADCAST = 0x2 # Broadcast address valid.\n IFF_DEBUG = 0x4 # Turn on debugging.\n IFF_LOOPBACK = 0x8 # Is a loopback net.\n IFF_POINTOPOINT = 0x10 # Interface is point-to-point link.\n IFF_NOTRAILERS = 0x20 # Avoid use of trailers.\n IFF_RUNNING = 0x40 # Resources allocated.\n IFF_NOARP = 0x80 # No address resolution protocol.\n IFF_PROMISC = 0x100 # Receive all packets.\n IFF_ALLMULTI = 0x200 # Receive all multicast packets.\n IFF_MASTER = 0x400 # Master of a load balancer.\n IFF_SLAVE = 0x800 # Slave of a load balancer.\n IFF_MULTICAST = 0x1000 # Supports multicast.\n IFF_PORTSEL = 0x2000 # Can set media type.\n IFF_AUTOMEDIA = 0x4000 # Auto media select active.\n\n\n def __init__(self):\n # create a socket to communicate with system\n self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n def _ioctl(self, func, args):\n return fcntl.ioctl(self.sockfd.fileno(), func, args)\n\n def _call(self, ifname, func, ip = None):\n\n if ip is None:\n data = (ifname + '\\0'*32)[:32]\n else:\n ifreq = (ifname + '\\0' * self.IFNAMSIZ)[:self.IFNAMSIZ]\n data = struct.pack(\"16si4s10x\", ifreq, socket.AF_INET, socket.inet_aton(ip))\n\n try:\n result = self._ioctl(func, data)\n except IOError:\n return None\n\n return result\n\n def _readsys(self, ifname, f):\n try:\n fp = file(os.path.join(\"/sys/class/net\", ifname, f))\n result = fp.readline().rstrip('\\n')\n fp.close()\n except IOError:\n return None\n \n return result\n\n def getInterfaceList(self):\n \"\"\" Get all interface names in a list \"\"\"\n # get interface list\n buffer = array.array('c', '\\0' * 1024)\n ifconf = struct.pack(\"iP\", buffer.buffer_info()[1], buffer.buffer_info()[0])\n result = self._ioctl(self.SIOCGIFCONF, ifconf)\n\n # loop over interface names\n iflist = []\n size, ptr = struct.unpack(\"iP\", result)\n for idx in range(0, size, 32):\n ifconf = buffer.tostring()[idx:idx+32]\n name, dummy = struct.unpack(\"16s16s\", ifconf)\n name, dummy = name.split('\\0', 1)\n iflist.append(name)\n\n return iflist\n\n def getAddr(self, ifname):\n \"\"\" Get the inet addr for an interface \"\"\"\n result = self._call(ifname, self.SIOCGIFADDR)\n return socket.inet_ntoa(result[20:24])\n\n def getNetmask(self, ifname):\n \"\"\" Get the netmask for an interface \"\"\"\n result = self._call(ifname, self.SIOCGIFNETMASK)\n return socket.inet_ntoa(result[20:24])\n\n def getBroadcast(self, ifname):\n \"\"\" Get the broadcast addr for an interface \"\"\"\n result = self._call(ifname, self.SIOCGIFBRDADDR)\n return socket.inet_ntoa(result[20:24])\n\n def getStatus(self, ifname):\n \"\"\" Check whether interface is UP \"\"\"\n result = self._call(ifname, self.SIOCGIFFLAGS)\n flags, = struct.unpack('H', result[16:18])\n return (flags & self.IFF_UP) != 0\n\n def getMTU(self, ifname):\n \"\"\" Get the MTU size of an interface \"\"\"\n data = self._call(ifname, self.SIOCGIFMTU)\n mtu = struct.unpack(\"16si12x\", data)[1]\n return mtu\n\n def getMAC(self, ifname):\n \"\"\" Get MAC address of an interface \"\"\"\n mac = self._readsys(ifname, \"address\")\n return mac\n\n def getRX(self, ifname):\n \"\"\" Get received bytes of an interface \"\"\"\n rx = self._readsys(ifname, \"statistics/rx_bytes\")\n return int(rx)\n\n def getTX(self, ifname):\n \"\"\" Get transferred bytes of an interface \"\"\"\n tx = self._readsys(ifname, \"statistics/tx_bytes\")\n return int(tx)\n\n def setAddr(self, ifname, ip):\n \"\"\" Set the inet addr for an interface \"\"\"\n result = self._call(ifname, self.SIOCSIFADDR, ip)\n\n if socket.inet_ntoa(result[20:24]) is ip:\n return True\n else:\n return None\n\n def setNetmask(self, ifname, ip):\n \"\"\" Set the netmask for an interface \"\"\"\n result = self._call(ifname, self.SIOCSIFNETMASK, ip)\n\n if socket.inet_ntoa(result[20:24]) is ip:\n return True\n else:\n return None\n\n def setBroadcast(self, ifname, ip):\n \"\"\" Set the broadcast addr for an interface \"\"\"\n result = self._call(ifname, self.SIOCSIFBRDADDR, ip)\n\n if socket.inet_ntoa(result[20:24]) is ip:\n return True\n else:\n return None\n\n def setStatus(self, ifname, status):\n \"\"\" Set interface status (UP/DOWN) \"\"\"\n ifreq = (ifname + '\\0' * self.IFNAMSIZ)[:self.IFNAMSIZ]\n\n if status is \"UP\":\n flags = self.IFF_UP\n flags |= self.IFF_RUNNING\n flags |= self.IFF_BROADCAST\n flags |= self.IFF_MULTICAST\n flags &= ~self.IFF_NOARP\n flags &= ~self.IFF_PROMISC\n elif status is \"DOWN\":\n result = self._call(ifname, self.SIOCGIFFLAGS)\n flags, = struct.unpack('H', result[16:18])\n flags &= ~self.IFF_UP\n else:\n return None\n\n data = struct.pack(\"16sh\", ifreq, flags)\n result = self._ioctl(self.SIOCSIFFLAGS, data)\n return result\n\n def setMTU(self, ifname, mtu):\n \"\"\" Set the MTU size of an interface \"\"\"\n ifreq = (ifname + '\\0' * self.IFNAMSIZ)[:self.IFNAMSIZ]\n\n data = struct.pack(\"16si\", ifreq, mtu)\n result = self._ioctl(self.SIOCSIFMTU, data)\n\n if struct.unpack(\"16si\", result)[1] is mtu:\n return True\n else:\n return None\n\n\nclass Route:\n \"\"\" ioctl stuff \"\"\"\n\n # From \n\n SIOCADDRT = 0x890B # add routing table entry\n SIOCDELRT = 0x890C # delete routing table entry\n SIOCRTMSG = 0x890D # call to routing system\n INADDR_ANY = '\\0' * 4 # Any Internet Address\n\n def delRoute(self, gw, dst = \"0.0.0.0\", mask = \"0.0.0.0\"):\n \"\"\" Delete a route entry from kernel routing table \"\"\"\n try:\n csapi.changeroute(self.SIOCDELRT, gw, dst, mask)\n except:\n pass\n\n def delDefaultRoute(self):\n \"\"\" Delete the default gw, which is a route entry with gateway to Any Internet Address \"\"\"\n self.delRoute(\"0.0.0.0\")\n\n def setDefaultRoute(self, gw, dst = \"0.0.0.0\", mask = \"0.0.0.0\"):\n \"\"\" Set the default gateway. To do this we must delete the previous default gateway\n and the route entry set for gw, if any, or we will end up with multiple entries \"\"\"\n\n self.delDefaultRoute()\n self.delRoute(gw)\n try:\n csapi.changeroute(self.SIOCADDRT, gw, dst, mask)\n except:\n pass\n\n\nclass Dhcp:\n def _run(self, args):\n cmd = \"/sbin/dhcpcd \" + args\n a = popen2.Popen4(cmd)\n\n return a.wait() \n\n def start(self, ifname, timeout = \"30\"):\n \"\"\" Start the DHCP client daemon \"\"\"\n # Maybe we should leave this to GUI\n if ifname in self.getRunning():\n self.stop(ifname)\n\n # -R -Y -N to prevent dhcpcd rewrite nameservers\n #  we should add nameservers, not rewrite them\n # -H to set hostname due to info from server\n # -t for timeout\n\n args = \"-R -Y -N -H -t \" + timeout + \" \" + ifname\n return self._run(args)\n\n def stop(self, ifname):\n \"\"\" Stop DHCP client daemon \"\"\"\n args = \"-k \" + ifname\n return self._run(args)\n\n def getNameServers(self, ifname):\n \"\"\" Get DNS server list provided by the server \"\"\"\n info_file = \"/var/lib/dhcpc/dhcpcd-\" + ifname + \".info\"\n\n try:\n f = file(info_file)\n for line in f.readlines():\n if not line.find(\"DNS=\"):\n return line[line.find(\"DNS=\")+4:].rstrip('\\n').split(',')\n f.close()\n except IOError:\n return \"Could not open file\" # FIXME: put an error message here\n\n def getRunning(self):\n d = []\n for i in glob(\"/var/run/dhcpcd-*.pid\"):\n d.append(i.rstrip(\".pid\").lstrip(\"/var/run/dhcpcd-\"))\n return d\n\n\n\ndef sysValue(path, dir, file_):\n f = file(os.path.join(path, dir, file_))\n data = f.read().rstrip('\\n')\n f.close()\n return data\n\ndef queryUSB(vendor, device):\n # dependency to pciutils!\n f = file(\"/usr/share/misc/usb.ids\")\n flag = 0\n company = \"\"\n for line in f.readlines():\n if flag == 0:\n if line.startswith(vendor):\n flag = 1\n company = line[5:].strip()\n else:\n if line.startswith(\"\\t\"):\n if line.startswith(\"\\t\" + device):\n return \"%s - %s\" % (line[6:].strip(), company)\n elif not line.startswith(\"#\"):\n flag = 0\n if company != \"\":\n return \"%s (%s)\" % (company, device)\n else:\n return \"Unknown (%s:%s)\" % (vendor, device)\n\ndef queryPCI(vendor, device):\n # dependency to pciutils!\n f = file(\"/usr/share/misc/pci.ids\")\n flag = 0\n company = \"\"\n for line in f.readlines():\n if flag == 0:\n if line.startswith(vendor):\n flag = 1\n company = line[5:].strip()\n else:\n if line.startswith(\"\\t\"):\n if line.startswith(\"\\t\" + device):\n return \"%s - %s\" % (line[6:].strip(), company)\n elif not line.startswith(\"#\"):\n flag = 0\n return \"Unknown (%s:%s)\" % (vendor, device)\n\n# Internal functions\n\nARPHRD_ETHER = 1\nsysfs_path = \"/sys/class/net\"\n\ndef lremove(str, pre):\n\tif str.startswith(pre):\n\t\treturn str[len(pre):]\n\treturn str\n\ndef _device_uid_internal(dev):\n type, rest = sysValue(sysfs_path, dev, \"device/modalias\").split(\":\", 1)\n if type == \"pci\":\n vendor = lremove(sysValue(sysfs_path, dev, \"device/vendor\"), \"0x\")\n device = lremove(sysValue(sysfs_path, dev, \"device/device\"), \"0x\")\n id = \"pci:%s_%s_%s\" % (vendor, device, dev)\n elif type == \"usb\":\n for file_ in os.listdir(os.path.join(sysfs_path, dev, \"device/driver\")):\n if \":\" in file_:\n path = dev + \"/device/bus/devices/%s\" % file_.split(\":\", 1)[0]\n vendor = sysValue(sysfs_path, path, \"idVendor\")\n device = sysValue(sysfs_path, path, \"idProduct\")\n id = \"usb:%s_%s_%s\" % (vendor, device, dev)\n break\n else:\n id = \"usb:unknown_%s\" % dev\n else:\n id = \"%s:unknown_%s\" % (type, dev)\n \n return id\n\ndef _device_uid(dev):\n try:\n id = _device_uid_internal(dev)\n except:\n id = \"unk:unknown_%s\" % dev\n \n return id\n\ndef _device_check(dev, uid):\n dev_uid = _device_uid(dev)\n t1 = dev_uid.rsplit(\"_\", 1)\n t2 = uid.rsplit(\"_\", 1)\n return t1[0] == t2[0]\n\ndef _device_dev(uid):\n t = uid.rsplit(\"_\", 1)\n if _device_check(t[1], uid):\n return t[1]\n iflist = []\n for iface in os.listdir(sysfs_path):\n if csapi.atoi(sysValue(sysfs_path, iface, \"type\")) == ARPHRD_ETHER:\n iflist.append(_device_uid(iface))\n for dev in iflist:\n if _device_check(dev, uid):\n return dev\n return None\n\ndef _device_info(uid):\n t = uid.split(':', 1)\n if len(t) < 2:\n return \"Unknown (%s)\" % uid\n vendor, device, dev = t[1].split('_')\n if t[0] == \"pci\":\n return queryPCI(vendor, device)\n elif t[0] == \"usb\":\n return queryUSB(vendor, device)\n return \"Unknown (%s)\"\n\ndef _get(dict, key, default):\n val = default\n if dict and dict.has_key(key):\n val = dict[key]\n return val\n\n\nclass Dev:\n def __init__(self, name):\n dict = get_instance(\"name\", name)\n self.uid = _get(dict, \"device\", None)\n self.name = name\n self.dev = None\n if self.uid:\n self.dev = _device_dev(self.uid)\n self.state = _get(dict, \"state\", \"down\")\n self.mode = _get(dict, \"mode\", \"auto\")\n self.address = _get(dict, \"address\", None)\n self.gateway = _get(dict, \"gateway\", None)\n self.mask = _get(dict, \"mask\", None)\n \n def up(self):\n ifc = ifconfig()\n if self.mode == \"manual\":\n if self.address:\n ifc.setAddr(self.dev, self.address)\n ifc.setStatus(self.dev, \"UP\")\n if self.gateway:\n route = Route()\n route.setDefaultRoute(self.gateway)\n notify(\"Net.Link.stateChanged\", self.name + \"\\nup\")\n else:\n dd = Dhcp()\n notify(\"Net.Link.stateChanged\", self.name + \"\\nconnecting\")\n dd.start(self.dev, timeout=\"20\")\n if ifc.getStatus(self.dev):\n notify(\"Net.Link.stateChanged\", self.name + \"\\nup\")\n else:\n notify(\"Net.Link.stateChanged\", self.name + \"\\ndown\")\n fail(\"DHCP failed\")\n \n def down(self):\n if self.mode != \"manual\":\n dd = Dhcp()\n dd.stop(self.dev)\n ifc = ifconfig()\n ifc.setStatus(self.dev, \"DOWN\")\n notify(\"Net.Link.stateChanged\", self.name + \"\\ndown\")\n\n\n# Net.Link API\n\ndef kernelEvent(data):\n type, dir = data.split(\"@\", 1)\n devname = lremove(dir, \"/class/net/\")\n flag = 1\n \n if type == \"add\":\n if os.path.exists(os.path.join(sysfs_path, devname, \"wireless\")):\n return\n devuid = _device_uid(devname)\n notify(\"Net.Link.deviceChanged\", \"added net %s %s\" % (devuid, _device_info(devuid)))\n conns = instances(\"name\")\n for conn in conns:\n dev = Dev(conn)\n if dev.uid and devuid == dev.uid:\n if dev.state == \"up\":\n dev.up()\n return\n flag = 0\n if flag:\n notify(\"Net.Link.deviceChanged\", \"new net %s %s\" % (devuid, _device_info(devuid)))\n \n elif type == \"remove\":\n conns = instances(\"name\")\n for conn in conns:\n dev = Dev(conn)\n if dev.uid and dev.uid.rsplit(\"_\", 1)[1] == devname:\n if dev.state == \"up\":\n notify(\"Net.Link.stateChanged\", dev.name + \"\\ndown\")\n notify(\"Net.Link.deviceChanged\", \"removed net %s\" % devname)\n\ndef modes():\n return \"device,net,auto\"\n\ndef linkInfo():\n return \"\\n\".join([\n \"net\",\n \"Ethernet network\",\n \"\"\n ])\n\ndef deviceList():\n iflist = []\n for iface in os.listdir(sysfs_path):\n if csapi.atoi(sysValue(sysfs_path, iface, \"type\")) == ARPHRD_ETHER:\n if not os.path.exists(os.path.join(sysfs_path, iface, \"wireless\")):\n uid = _device_uid(iface)\n info = _device_info(uid)\n iflist.append(\"%s %s\" % (uid, info))\n return \"\\n\".join(iflist)\n\ndef scanRemote():\n fail(\"Not supported\")\n\ndef setConnection(name=None, device=None):\n dict = get_instance(\"name\", name)\n if dict and dict.has_key(\"device\"):\n notify(\"Net.Link.connectionChanged\", \"configured device \" + name)\n else:\n notify(\"Net.Link.connectionChanged\", \"added \" + name)\n\ndef deleteConnection(name=None):\n dev = Dev(name)\n if dev.dev and dev.state == \"up\":\n dev.down()\n notify(\"Net.Link.connectionChanged\", \"deleted \" + name)\n\ndef setAddress(name=None, mode=None, address=None, mask=None, gateway=None):\n dev = Dev(name)\n if dev.state == \"up\":\n dev.address = address\n dev.gateway = gateway\n dev.up()\n notify(\"Net.Link.connectionChanged\", \"configured address \" + name)\n\ndef setRemote(name=None, remote=None):\n fail(\"Not supported\")\n\ndef setState(name=None, state=None):\n dev = Dev(name)\n if state != \"up\" and state != \"down\":\n fail(\"unknown state\")\n \n notify(\"Net.Link.connectionChanged\", \"configured state \" + name)\n \n if not dev.dev:\n fail(\"Device not found\")\n \n if state == \"up\":\n dev.up()\n else:\n if dev.state == \"up\":\n dev.down()\n\ndef connections():\n list = instances(\"name\")\n if list:\n return \"\\n\".join(list)\n return \"\"\n\ndef connectionInfo(name=None):\n dict = get_instance(\"name\", name)\n if not dict:\n fail(\"No such connection\")\n s = \"\\n\".join([name, dict[\"device\"], _device_info(dict[\"device\"])])\n return s\n\ndef getAddress(name=None):\n dev = Dev(name)\n if not dev:\n fail(\"No such connection\")\n\n if dev.mode == \"auto\":\n # FIXME: query interface\n s = \"\\n\".join([name, dev.mode, '', ''])\n else:\n s = \"\\n\".join([name, dev.mode, dev.address, dev.gateway])\n if dev.mask:\n s += \"\\n\" + dev.mask\n return s\n\ndef getState(name=None):\n dev = Dev(name)\n if not dev:\n fail(\"No such connection\")\n return name + \"\\n\" + dev.state\n","sub_path":"system/base/net-tools/comar/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":18111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"46252485","text":"from enum import unique\nfrom json.decoder import JSONDecodeError\nimport os\nimport pathlib\nimport re\nfrom flask.helpers import get_load_dotenv\nimport requests\nfrom datetime import datetime, time\nimport timedelta\nfrom flask import Flask, session, abort, redirect, request, render_template, url_for\nfrom google.oauth2 import id_token\nfrom google_auth_oauthlib.flow import Flow\nfrom pip._vendor import cachecontrol\nimport google.auth.transport.requests\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.orm import backref\napp = Flask(\"Google Login App\")\napp.secret_key = \"ddsdadw\"\napp.config['SQLALCHEMY_DATABASE_URI']=os.environ.get('DATABASE_URL')\n# app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///db.sqlite3'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb=SQLAlchemy(app)\nimport os.path\n\n\nimport os\nfrom flask import send_from_directory\nimport string\nALPHABET = string.ascii_uppercase + string.ascii_lowercase + \\\n string.digits + '-_'\nALPHABET_REVERSE = dict((c, i) for (i, c) in enumerate(ALPHABET))\nBASE = len(ALPHABET)\nSIGN_CHARACTER = '$'\n\ndef num_encode(n):\n if n < 0:\n return SIGN_CHARACTER + num_encode(-n)\n s = []\n while True:\n n, r = divmod(n, BASE)\n s.append(ALPHABET[r])\n if n == 0: break\n return ''.join(reversed(s))\n\ndef num_decode(s):\n if s[0] == SIGN_CHARACTER:\n return -num_decode(s[1:])\n n = 0\n for c in s:\n n = n * BASE + ALPHABET_REVERSE[c]\n return n\n\n\n\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'favicon.ico', mimetype='image/vnd.microsoft.icon')\n\n \n\nclass User(db.Model):\n __tablename__ = 'user'\n\n id = db.Column(db.Integer, primary_key=True)\n name=db.Column(db.String(50), nullable=False)\n google_id=db.Column(db.String(30), unique=True, nullable=False)\n\n chats = db.relationship('Chat', backref='user')\n\nclass Chat(db.Model):\n __tablename__ = 'chat'\n id = db.Column(db.Integer, primary_key=True)\n message=db.Column(db.String(1000), nullable=False)\n from_id=db.Column(db.String(30), db.ForeignKey('user.google_id'), nullable=False)\n to_id=db.Column(db.String(30), nullable=False)\n time=db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n\nclass Report(db.Model):\n __tablename__ = 'reported'\n id = db.Column(db.Integer, primary_key=True)\n reported_by=db.Column(db.String(30), nullable=False)\n reported=db.Column(db.String(30), nullable=False)\n time=db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n\n\ndef get_chats(from_id, to_id):\n timenow = datetime.utcnow()\n# # printing initial_date\n# print (ini_time_for_now)\n \n my_list=[from_id, to_id]\n reversed_list=my_list\n reversed_list.reverse()\n # my_dict={{}}\n chats_dict={}\n get_chat=Chat.query.filter(Chat.from_id.in_(my_list),Chat.to_id.in_(reversed_list)).order_by(Chat.time.asc()).all()\n # get_chat=Chat.query.filter(Chat.from_id.in_(my_list),Chat.to_id.in_(reversed_list)).order_by(Chat.time.asc())\n index=0\n for chat in get_chat:\n chats_dict[index]={}\n # print(f'{chat.message}, Sender = {chat.user.name}, time = {chat.time}')\n td = timedelta.Timedelta(timenow - chat.time)\n if td.total.hours<=1:\n chats_dict[index][\"timestamp\"]=str(td.total.minutes) + ' minutes '\n elif td.total.hours<=24:\n chats_dict[index][\"timestamp\"]=str(td.total.hours) + ' hour '\n else:\n chats_dict[index][\"timestamp\"]=str(td.total.days) + ' days '\n chats_dict[index][\"sender\"] = chat.from_id\n chats_dict[index][\"message\"] = chat.message \n index=index+1 \n return chats_dict\n # print(chats_dict)\n \n\n\n#get all chats for one user\ndef all_chats(to_id):\n # get_chat=Chat.query.filter_by(to_id=to_id).group_by(Chat.from_id).all()\n my_dict={}\n user_received_chats_from=[]\n for value in db.session.query(Chat.from_id).distinct().filter_by(to_id=to_id): \n user_received_chats_from.append(value[0])\n for sender in user_received_chats_from:\n # print(f'Chats between {to_id} and {sender}')\n my_dict[sender]=get_chats(sender, to_id) \n print(\"\")\n # print(my_dict)\n return my_dict\n print(user_received_chats_from)\n\nos.environ[\"OAUTHLIB_INSECURE_TRANSPORT\"] = \"1\"\n\nGOOGLE_CLIENT_ID = \"143969115563-9u8ebmsoo2oj1ugc39p14ffhrktes5jr.apps.googleusercontent.com\"\nclient_secrets_file = \"client_secret.json\"\n\nflow = Flow.from_client_secrets_file(\n client_secrets_file=client_secrets_file,\n scopes=[\"https://www.googleapis.com/auth/userinfo.profile\", \"https://www.googleapis.com/auth/userinfo.email\", \"openid\"],\n redirect_uri=\"https://anotext.herokuapp.com/callback\"\n)\n\n\n# def login_is_required(function):\n# def wrapper(*args, **kwargs):\n# if \"google_id\" not in session:\n# return redirect('/login') # Authorization required\n# else:\n# return function()\n# return wrapper\n\n\ndef login_is_required(function):\n def wrapper(*args, **kwargs):\n if \"google_id\" not in session:\n session['requestor']=request.path\n # print('Requestor: ' + session['requestor'], flush=True)\n return redirect('/login') # Authorization required\n else:\n return function(*args, **kwargs)\n wrapper.__name__ = function.__name__\n return wrapper\n\n\n\n@app.route(\"/login\")\ndef login():\n authorization_url, state = flow.authorization_url()\n session[\"state\"] = state\n return redirect(authorization_url)\n\n\n@app.route(\"/callback\")\ndef callback():\n try:\n flow.fetch_token(authorization_response=request.url)\n except:\n return redirect('/logout')\n if \"state\" in session:\n if not session[\"state\"] == request.args[\"state\"]:\n # abort(500) # State does not match!\n return redirect('/logout')\n\n credentials = flow.credentials\n request_session = requests.session()\n cached_session = cachecontrol.CacheControl(request_session)\n token_request = google.auth.transport.requests.Request(session=cached_session)\n\n id_info = id_token.verify_oauth2_token(\n id_token=credentials._id_token,\n request=token_request,\n audience=GOOGLE_CLIENT_ID\n )\n \n session[\"google_id\"] = id_info.get(\"sub\")\n session[\"name\"] = id_info.get(\"name\")\n # print(session[\"name\"], session[\"google_id\"], flush=True)\n query_user=User.query.filter_by(google_id=session['google_id']).first()\n if query_user is None:\n new_user=User(name=session['name'], google_id=session['google_id'])\n db.session.add(new_user)\n db.session.commit()\n # print('Requestor is ' + session['requestor'], flush=True)\n if \"requestor\" in session:\n return redirect(session['requestor'])\n # print(\"Tring to access \" + '/' + session['google_id'], flush=True)\n my_id=num_encode(int(session['google_id']))\n # print(my_id, flush=True)\n return redirect('/' + my_id)\n\n\n\n\n@app.route(\"/logout\")\ndef logout():\n session.clear()\n return redirect('/')\n\n\n@app.route(\"/\")\ndef index():\n if \"google_id\" not in session:\n return render_template('login.html')\n else:\n return redirect(url_for('user_dashboard', userstr=num_encode(int(session['google_id']))))\n\n@app.route('/', methods=['GET', 'POST'])\n@login_is_required\ndef user_dashboard(userstr):\n userstr=userstr.replace('#', '')\n user_id=str(num_decode(userstr))\n count=Report.query.filter_by(reported=session['google_id']).count()\n if count>5:\n return \"You have been very naughty. And you got reported a lot\"\n guser=User.query.filter_by(google_id=session[\"google_id\"]).first()\n gname=guser.name.split(' ')[0]\n if session[\"google_id\"]==user_id:\n if request.method == 'POST':\n message = request.form['message']\n sendto=request.form['send']\n sendto=sendto.replace('#', '')\n # print(sendto, flush=True)\n newchat=Chat(message=message, from_id=session[\"google_id\"], to_id=sendto)\n db.session.add(newchat)\n db.session.commit()\n # print('Redirect to user id ' + userstr, flush=True)\n return redirect(url_for('user_dashboard', userstr=userstr))\n chats=all_chats(to_id=session[\"google_id\"])\n return render_template('dashboard.html', chats = chats, me=session[\"google_id\"], name=gname, usertag=userstr)\n\n else:\n query_user=User.query.filter_by(google_id=user_id).first()\n try:\n \n length=len(query_user.name.split(' ')[0])\n user=query_user.name.split(' ')[0][0] + '*' * (length-1)\n except:\n return \"User does not exist\"\n if request.method == 'POST':\n message = request.form['message']\n sendto=request.form['send']\n sendto=sendto.replace('#', '')\n sendtoid=str(num_decode(sendto))\n # print(type(sendtoid) ,sendtoid, flush=True)\n # print(sendto, flush=True)\n newchat=Chat(message=message, from_id=session[\"google_id\"], to_id=sendtoid)\n db.session.add(newchat)\n db.session.commit()\n return redirect(url_for('user_dashboard', userstr=userstr))\n chatsBetweenUser=get_chats(user_id,session['google_id'])\n # print(chatsBetweenUser, flush=True)\n # return f\"

Hello {query_user.name[0]}

\"\n return render_template('otheruser.html', chats = chatsBetweenUser, me=session[\"google_id\"], user=user, name=gname, usertag=userstr)\n # return \"Hello\"\n # print(chats)\n\n@app.route('/report/')\n@login_is_required\ndef report(num):\n try:\n user_id=str(int(num))\n except:\n user_id=str(num_decode(num))\n \n timenow = datetime.utcnow()\n latestreported=Report.query.filter_by(reported_by=session['google_id'], reported=user_id).order_by(Report.id.desc()).first()\n try:\n timedifference = timedelta.Timedelta(timenow - latestreported.time)\n if timedifference.total.hours<1:\n return \"You must wait 1 hour before you can report the user again.\"\n except:\n pass\n \n reports=Report(reported_by=session['google_id'], reported=user_id)\n db.session.add(reports)\n db.session.commit()\n my_list=[session['google_id'], user_id]\n reversed_list=my_list\n reversed_list.reverse()\n getchat=Chat.query.filter((Chat.from_id == user_id) | (Chat.from_id == session['google_id']),(Chat.to_id == session['google_id']) | (Chat.to_id == user_id)).all()\n for chat in getchat:\n db.session.delete(chat)\n db.session.commit()\n return \"Reported\"\n\n \n\n\n# @app.route('/')\n# def home():\n# return redirect('/' + session['google_id'])\n\n\n@app.route(\"/def/protected_area\")\n@login_is_required\ndef protected_area():\n return f\"Hello {session['name']}!
\"\n\n@app.route('/copy/copy')\ndef copy():\n return render_template('login.html')\n\n@app.route(\"/req\")\ndef req():\n return f'

Requestor is {session[\"requestor\"]}

'\nif __name__ == \"__main__\":\n app.run()\n\n\n\n# latestreported=Report.query.filter_by(reported_by='112342845020655906267', reported='117634559943903595921').order_by(Report.id.desc()).first()\n\n# db.session.query(Report).delete()\n\n# db.session.commit()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"40893219","text":"#!/usr/bin/env python3\nn, t = map(int,input().split())\nopn = 0\ncls = 0\nfor i in range(n):\n a = int(input())\n if cls < a:\n opn += t\n else:\n opn += a+t - cls\n cls = a + t\nprint(opn)\n","sub_path":"abc/024/b/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"553814530","text":"\"\"\"medicalproject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom app import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',views.index,name='main'),\n path('user_registration/',views.user_registration.as_view(),name='user_registration'),\n path('adminpage/',views.adminpage.as_view(),name='admin'),\n path('disease_home_page/',views.disease_home_page.as_view(),name='disease_home_page'),\n path('delete_des/',views.delete_des,name='delete_des'),\n path('update_des/',views.update_des,name='update_des'),\n path('updated_des/',views.updated_des,name='updated_des'),\n path('medicene_page/',views.medicene_page.as_view(),name='medicene_page'),\n path('delete_med/',views.delete_med,name='delete_med'),\n path('update_med/',views.update_med,name='update_med'),\n path('updated_med/',views.updated_med,name='updated_med'),\n path('reports_adm/',views.reports_adm,name='reports_adm'),\n path('user_login/',views.user_login.as_view(),name='user_login'),\n path('report_usr/',views.report_usr,name='report_usr'),\n path('srch_med/',views.srch_med.as_view(),name='srch_med'),\n path('change_pswd/',views.change_pswd.as_view(),name='change_pswd')\n]\n","sub_path":"medicalproject/medicalproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"633237861","text":"\"\"\"\n Collections Exercise: Contact Book\n Use collections module to do the following:\n Users should be able to:\n * add new entry\n * edit\n * search\n * remove, and \n * view all contact(s) in the contact book.\n\n You can make use of Ordereddict, Userdict or any class you prefer\n for storing and manipulating the data\n \n sample data:\n {\n \"Elijah Rey Montefalco\": {\n \"name\": \"Elijah Rey Montefalco\",\n \"phone\": \"0912324254345\",\n \"email\": \"ej_m@sample.com\",\n \"address\": \"Siargao, Surigao City\",\n },\n \"Adler\": {\n \"name\": \"Adler\",\n \"phone\": \"094353523\",\n \"email\": \"adler@vhrv.com\",\n \"address\": \"Costa Leona\",\n },\n \"Amanda Seyfried\": {\n \"name\": \"Amanda Seyfried\",\n \"phone\": \"09123423465\",\n \"email\": \"a_seyfried@samp.com\",\n \"address\": \"Allentown, Pennsylvania\",\n },\n \"Adam Vincent Hidalgo\": {\n \"name\": \"Adam Vincent Hidalgo\",\n \"phone\": \"2344-4535\",\n \"email\": \"av_hidalgo@vhrv.com\",\n \"address\": \"Costa Leona\",\n },\n }\n\n OrderedDict(\n [\n (\n \"Adler\",\n {\n \"name\": \"Adler\",\n \"phone\": \"094353523\",\n \"email\": \"adler@vhrv.com\",\n \"address\": \"Costa Leona\",\n },\n ),\n (\n \"Adam Vincent Hidalgo\",\n {\n \"name\": \"Adam Vincent Hidalgo\",\n \"phone\": \"2344-4535\",\n \"email\": \"av_hidalgo@vhrv.com\",\n \"address\": \"Costa Leona\",\n },\n ),\n (\n \"Elijah Rey Montefalco\",\n {\n \"name\": \"Elijah Rey Montefalco\",\n \"phone\": \"0912324254345\",\n \"email\": \"ej_m@sample.com\",\n \"address\": \"Siargao, Surigao City\",\n },\n ),\n (\n \"Amanda Seyfried\",\n {\n \"name\": \"Amanda Seyfried\",\n \"phone\": \"09123423465\",\n \"email\": \"a_seyfried@samp.com\",\n \"address\": \"Allentown, Pennsylvania\",\n },\n ),\n ]\n )\n\n\"\"\"\n\n\nfrom collections import *\nimport sys\nfrom itertools import *\nimport time\nimport json\n\nc0 = \"\\33[92m\"\nc1 = \"\\33[91m\"\nc2 = \"\\33[0m\"\nc3 = \"\\33[7m\"\nc4 = \"\\33[97m\"\n\n\n# class Edit(UserString):\n# def append(self, s):\n# self.data = self.data + s\n\n# def insert(self, index, s):\n# self.data = self.data[index:] + s + self.data[index:]\n\n# def replace(self, s, to_be_replaced):\n# self.data = self.data.replace(s, to_be_replaced)\n\n\ndef loader(seconds):\n \"\"\"Show an animated spinner while we sleep.\"\"\"\n symbols = cycle(\"-\\|/\")\n tend = time.time() + seconds\n while time.time() < tend:\n sys.stdout.write(f\"\\r\\33[44mL O A D I N G...{c2} {next(symbols)}\")\n sys.stdout.flush()\n time.sleep(0.1)\n print()\n\n\ndef table():\n print(end=\"\\t\")\n print(\"~\" * 89)\n print(f\"{'C O N T A C T B O O K':>65}\")\n print(f\"\\t{c3}{'NAME':<25} {'PHONE':<20} {'EMAIL':<20} {'ADDRESS':<20} {c2}\")\n\n\ndef view_all():\n loader(0.5)\n try:\n with open(\"files/contact_book.json\") as file_in:\n content = json.load(file_in)\n table()\n for each in list(content.keys()):\n print(\n f'\\t{content[each][\"name\"]:<25} {content[each][\"phone\"]:<20} {content[each][\"email\"]:<20} {content[each][\"address\"]:<20}'\n )\n print(end=\"\\t\")\n print(\"~\" * 89, \"\\n\\n\")\n except Exception:\n table()\n print(f\"\\n{'No data found':>60}\")\n print(end=\"\\t\")\n print(\"~\" * 89)\n\n\ndef edit(name):\n try:\n with open(\"files/contact_book.json\") as file_in:\n content = json.load(file_in)\n\n # book = Edit(content)\n # book.replace(content[name][\"name\"], input(f\"{content[name]['name']} => \"))\n # book.replace(content[name][\"phone\"], input(f\"{content[name]['phone']} => \"))\n # book.replace(content[name][\"email\"], input(f\"{content[name]['email']} => \"))\n # book.replace(\n # content[name][\"address\"], input(f\"{content[name]['address']} => \")\n # )\n # if name in content[name[:2]][\"name\"]:\n content[name][\"name\"] = input(f\"Name ({content[name]['name']}) : \")\n content[name][\"phone\"] = input(f\"Phone ({content[name]['phone']}) : \")\n content[name][\"email\"] = input(f\"Email ({content[name]['email']}) : \")\n content[name][\"address\"] = input(f\"Address ({content[name]['address']}) : \")\n\n with open(\"files/contact_book.json\", \"w\") as file_out:\n json.dump(content, file_out)\n print(f\"{c0}Updated successfully ✅{c2}\")\n except Exception:\n print(f\"{c4}{name} is not found on the list{c2}\")\n\n\ndef search(name):\n try:\n with open(\"files/contact_book.json\") as file_in:\n content = json.load(file_in)\n table()\n for each in list(content.keys()):\n if (\n each.lower().startswith(name.lower())\n or each.lower() == name.lower()\n ):\n print(\n f'\\t{content[each][\"name\"]:<25} {content[each][\"phone\"]:<16} {content[each][\"email\"]:<20} {content[each][\"address\"]:<20}'\n )\n else:\n pass\n print(end=\"\\t\")\n print(\"~\" * 89)\n except Exception:\n print(f\"'{name}' is not found on contact list\")\n\n\ndef remove(name):\n try:\n with open(\"files/contact_book.json\") as file_in:\n content = json.load(file_in)\n for each in list(content.keys()):\n if each == name:\n o = input(f\"Are you sure you want to remove {name} (y/n)? \")\n if o == \"y\" or o == \"Y\":\n del content[name]\n else:\n pass\n else:\n pass\n with open(\"files/contact_book.json\", \"w\") as file_out:\n json.dump(content, file_out)\n print(f\"{c0}Successfully Deleted{c2}\")\n except Exception:\n print(f\"'{name}' is not found on contact list\")\n\n\ndef menu():\n\n options = \"\"\"\n [1] Add New Contact\n [2] View All Contacts\n [3] Edit Contacts\n [4] Search\n [5] Remove\n [6] Exit\n \"\"\"\n contactList = OrderedDict()\n\n while True:\n print(\n \"\"\"\n ------------------------\n ' OPTIONS '\n ------------------------\n \"\"\",\n end=\"\",\n )\n print(options)\n\n try:\n action = int(input(\"\\nSelect -> \"))\n\n if action == 1:\n \"\"\" Add New Contact \"\"\"\n try:\n with open(\"files/contact_book.json\", \"w\") as file_out:\n name = input(\"Enter Name: \")\n contactList[name] = {\n \"name\": name,\n \"phone\": input(\"Enter phone number: \"),\n \"email\": input(\"Enter email: \").lower(),\n \"address\": input(\"Enter address: \"),\n }\n json.dump(contactList, file_out)\n print(f\"{c0}Added Successfully ✅{c2}\")\n # print(contactList)\n except Exception:\n print(f\"{c1}Failed to add{c2}\")\n\n elif action == 2:\n \"\"\" View Contacts \"\"\"\n view_all()\n elif action == 3:\n \"\"\" Edit Contacts \"\"\"\n edit(input(\"Enter name: \"))\n elif action == 4:\n \"\"\" Search \"\"\"\n search(input(\"Enter name: \"))\n elif action == 5:\n \"\"\" Remove Contact \"\"\"\n remove(input(\"Enter name: \"))\n elif action == 6:\n exit()\n else:\n print(\"Wrong input\\n\")\n except ValueError:\n print(f\"{c1} Please enter numbers only {c2}\")\n\n\nmenu()\n","sub_path":"activities/collect_contacts.py","file_name":"collect_contacts.py","file_ext":"py","file_size_in_byte":8339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"189110528","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"This module contains class MRT_Announcements_Table\n\nAnnouncements_Table inherits from the Generic_Table class. The Generic_Table\nclass allows for the conection to a database upon initialization. Also\nupon initialization the _create_tables function is called to initialize\nany tables if they do not yet exist. Beyond that the class can clear the\ntable, create an index, and has the name and columns properties that are\nused in utils function to insert CSVs into the database. This class does\nnot contain an index creation function, because it would only ever be\nused when combining with the roas table, which does a parallel seq_scan,\nthus any indexes are not used since they are not efficient. Each table\nfollows the table name followed by a _Table since it inherits from the\ndatabase class.\n\"\"\"\n\n__author__ = \"Justin Furuness\"\n__credits__ = [\"Justin Furuness\"]\n__Lisence__ = \"BSD\"\n__maintainer__ = \"Justin Furuness\"\n__email__ = \"jfuruness@gmail.com\"\n__status__ = \"Production\"\n\nfrom ....utils.database import Generic_Table\n\n\nclass MRT_Announcements_Table(Generic_Table):\n \"\"\"Class with database functionality.\n\n In depth explanation at the top of the file.\"\"\"\n\n __slots__ = []\n\n name = \"mrt_announcements\"\n\n columns = [\"prefix\", \"as_path\", \"origin\", \"time\"]\n\n def _create_tables(self):\n \"\"\"Creates tables if they do not exist.\n\n Called during initialization of the database class.\n \"\"\"\n\n sql = f\"\"\"CREATE UNLOGGED TABLE IF NOT EXISTS {self.name} (\n prefix INET,\n as_path bigint ARRAY,\n origin BIGINT,\n time BIGINT\n );\"\"\"\n self.execute(sql)\n","sub_path":"lib_bgp_data/collectors/mrt/mrt_base/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"545835397","text":"# -*- coding: utf-8 -*-\nimport os, sys, time\nimport subprocess\nfrom PIL import Image, ImageDraw, ImageFont\nimport operator\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nBACKGROUND = (255,255,255)\n\ndef _add_to_hash(r, p):\n if r.has_key(p):\n r[p] += 1\n else:\n r[p] = 0\n\ndef get_background_color(im):\n (width, height) = im.size\n #print width, height\n #data = im.getdata()\n r = {}\n for i in range(0,width):\n p = im.getpixel((i,0))\n _add_to_hash(r,p) # {gbk:0}\n p = im.getpixel((i,height-1))\n _add_to_hash(r,p)\n\n for j in range(0,height):\n p = im.getpixel((0,j))\n _add_to_hash(r,p)\n p = im.getpixel((width-1,j))\n _add_to_hash(r,p)\n\n max = 0\n background = None\n for p in r.keys():\n value = r[p]\n if value>max:\n max = value\n background = p\n #print max, background\n return background\n\n\ndef _calc_distance(p1,p2):\n (x1,x2,x3) = p1\n (y1,y2,y3) = p2\n dis = ((x1-y1)*(x1-y1)+(x2-y2)*(x2-y2)+(x3-y3)*(x3-y3))**0.5\n return dis\n\ndef erease_background(im):\n (width, height) = im.size\n background_color = get_background_color(im)\n data1 = []\n data = im.getdata()\n # print(background_color,width,height,data)\n for p in data:\n if p == background_color:\n data1.append(BACKGROUND)\n elif _calc_distance(p, background_color)<=80:\n data1.append(BACKGROUND)\n else:\n data1.append(p)\n bmp = Image.new(\"RGB\", (width, height))\n bmp.putdata(data1)\n # bmp.show()\n return bmp\n\ndef _get_close_color_set(color_set, p):\n dis = 255**3\n close_color = None\n for color in color_set:\n center = color[\"center\"]\n dis1 = _calc_distance(center,p)\n if dis1 < 80 and dis1 < dis:\n dis = dis1\n close_color = color\n return close_color\n\ndef _adjust_color_set(color, p):\n color[\"all\"].append(p)\n x=0; y=0; z=0; cnt=0\n for p in color[\"all\"]:\n (a,b,c) = p\n x += a\n y += b\n z += c\n cnt += 1\n color[\"center\"] = ((int)(x/cnt), (int)(y/cnt), (int)(z/cnt))\n\ndef find_main_color(im):\n (width, height) = im.size\n data = im.getdata()\n color_set = [] # [{\"center\":(1,2,3), \"all\":[(1,2,3)]}]\n color_no = 1\n for p in data:\n if p == BACKGROUND:\n continue\n color = _get_close_color_set(color_set, p)\n if color:\n _adjust_color_set(color, p)\n else:\n color_set.append({\"center\":p, \"all\":[p]})\n return color_set\n\n\ndef avg_color(data, x, y, width, height):\n pos = y*width + x\n p0 = data[pos]\n\n p11 = (-1,-1,-1)\n pos11 = pos - width - 1\n if x-1>=0 and y-1>=0:\n p11 = data[pos11]\n\n p12 = (-1,-1,-1)\n pos12 = pos - width\n if y-1 >= 0:\n p12 = data[pos12]\n\n p13 = (-1,-1,-1)\n pos13 = pos - width + 1\n if x+1 < width and y-1 >= 0:\n p13 = data[pos13]\n\n p21 = (-1,-1,-1)\n pos21 = pos -1\n if x-1 >= 0:\n p21 = data[pos21]\n\n p23 = (-1,-1,-1)\n pos21 = pos -1\n if x+1 < width:\n p21 = data[pos21]\n\n p31 = (-1,-1,-1)\n pos31 = pos + width - 1\n if x-1 >= 0 and y+1 < height:\n p31 = data[pos31]\n\n p32 = (-1,-1,-1)\n pos32 = pos + width\n if y+1 < height:\n p32 = data[pos32]\n\n p33 = (-1,-1,-1)\n pos33 = pos + width + 1\n if x+1 < width and y+1 < height:\n p33 = data[pos33]\n\n ps = [p11,p12,p13,p21,p0,p23,p31,p32,p33]\n x = 0; y = 0; z = 0;\n cnt = 0\n for p in ps:\n (a, b, c) = p\n if a==-1 and b==-1 and c==-1:\n continue\n if a==255 and b==255 and c==255:\n continue\n x += a\n y += b\n z += c\n cnt += 1\n\n return ((int)(x/cnt), (int)(y/cnt), (int)(z/cnt))\n\n\ndef fill_main_color(im):\n (width, height) = im.size\n color_set = find_main_color(im)\n #for p in color_set:\n # print p[\"center\"]\n data1 = []\n data = im.getdata()\n i = 0\n for p in data:\n if p == BACKGROUND:\n data1.append(BACKGROUND)\n else:\n x = (int)(i%width)\n y = (int)(i/width)\n p = avg_color(data, x, y, width, height)\n color = _get_close_color_set(color_set,p)\n if color:\n data1.append(color[\"center\"])\n else:\n data1.append(BACKGROUND)\n i += 1\n bmp = Image.new(\"RGB\", (width, height))\n bmp.putdata(data1)\n # bmp.show()\n return bmp\n\ndef crop_image(im, start, end):\n (width, height) = im.size\n data = im.getdata()\n data1 = []\n h = 0\n for j in range(height):\n need = False\n for i in range(start,end):\n if data[j*width + i] != BACKGROUND:\n need = True\n break\n if need:\n h += 1\n for i in range(start,end):\n data1.append(data[j*width + i])\n bmp = Image.new(\"RGB\", (end-start, h))\n bmp.putdata(data1)\n return bmp\n\ndef split_numbers(im):\n images = []\n (width, height) = im.size\n data = im.getdata()\n splits = [0 for i in range(width)]\n for i in range(width):\n value = 0\n for j in range(height):\n (x,y,z) = data[j*width + i]\n if x==255 and y==255 and z==255:\n continue\n value += 1\n splits[i] = value\n\n for i in range(width):\n if splits[i] <= 3:\n splits[i] = 0\n\n start = -1\n end = 0\n for i in range(width):\n if start == -1 and splits[i] > 0:\n start = i\n end = -1\n if end == -1 and splits[i] == 0:\n end = i\n # print start, end\n num_img = crop_image(im,start,end)\n (w,h) = num_img.size\n if w>=3 and h>10:\n images.append(num_img)\n # num_img.show()\n start = -1\n\n if len(images) == 6:\n return images\n\n _images = []\n if len(images) < 6:\n for image in images:\n #image.show()\n x,y = image.size\n if x < 20:\n _images.append(image)\n continue\n\n colors = find_important_colors(image)\n if len(colors) > 1:\n #image.show()\n candidate_imgs = []\n for color in colors:\n img = pick_color_image(image, color)\n #img.show()\n candidate_imgs.append(img)\n candidate_imgs = sort_candidate_imgs(candidate_imgs)\n for img in candidate_imgs:\n _images.append(img)\n else:\n _images.append(image)\n\n return _images\n\ndef sort_candidate_imgs(candidate_imgs):\n sort = {}\n for index in range(len(candidate_imgs)):\n img = candidate_imgs[index]\n (width, height) = img.size\n sum=0; cnt=0\n for j in range(height):\n for i in range(width):\n p = img.getpixel((i,j))\n if p != BACKGROUND:\n cnt += 1\n sum += i\n avg = (int)(sum/cnt)\n sort[index] = avg\n # print sort\n sorted_x = sorted(sort.iteritems(), key=operator.itemgetter(1))\n # print sorted_x\n _candidate_imgs=[]\n for index,avg in sorted_x:\n _candidate_imgs.append(candidate_imgs[index])\n return _candidate_imgs\n\n\ndef pick_color_image(im, color):\n (width, height) = im.size\n data = im.getdata()\n data1 = []\n for p in data:\n if p == color:\n data1.append(p)\n else:\n data1.append(BACKGROUND)\n bmp = Image.new(\"RGB\", (width, height))\n bmp.putdata(data1)\n # bmp.show()\n return bmp\n\n\ndef find_important_colors(im):\n (width, height) = im.size\n data = im.getdata()\n colors = {}\n for p in data:\n if p == BACKGROUND:\n continue\n if colors.has_key(p):\n colors[p] += 1\n else:\n colors[p] = 1\n num = len(colors)\n total = 0\n for p in colors.keys():\n total += colors[p]\n avg = total/num\n\n important_colors = []\n for p in colors.keys():\n if colors[p] > avg/2:\n important_colors.append(p)\n\n return important_colors\n\ndef erease_line(im):\n (width, height) = im.size\n white = (255,255,255)\n data1 = []\n data = im.getdata()\n n = 0\n for p in data:\n line = False\n i = (int)(n % width)\n j = (int)(n / width)\n # print i,j\n if im.getpixel((i,j)) != white:\n ps = [im.getpixel((i,mj)) for mj in [j-2,j-1,j,j+1,j+2] if mj>=0 and mj<=height-1]\n # print ps\n if len(ps) == 5:\n # if ps[0]==white and ps[4]==white and white in [ps[1],ps[3]] and ps[2] in [ps[1],ps[3]]:\n if ps[0]==white and ps[4]==white and white in [ps[1],ps[3]]:\n #print i,j,ps\n line = True\n elif ps[1] == white and ps[3] == white:\n line = True\n # elif len(ps) == 4:\n # if ps.count(white) >= 2 and ps.count(im.getpixel((i, j))) >= 1:\n # line = True\n elif ps.count(white) >= 2:\n #print i,j,ps\n line = True\n if line is False:\n data1.append(p)\n else:\n data1.append(white)\n\n n += 1\n\n bmp = Image.new(\"RGB\", (width, height))\n bmp.putdata(data1)\n return bmp\n\ndef erease_noise(im, window=9):\n (width, height) = im.size\n white = (255, 255, 255)\n pxs = []\n for i in range(window/2,width-window/2):\n for j in range(window/2,height-window/2):\n bound = 0\n inner = 0\n for mi in [si for si in range(i-window/2,i+window/2+1)]:\n bk = False\n for mj in [sj for sj in range(j-window/2,j+window/2+1)]:\n p = im.getpixel((mi, mj))\n dis = _calc_distance(white, p)\n if mi == i-window/2 or mi == i+window/2 or mj == j-window/2 or mj == j+window/2:\n if dis > 0:\n bk = True\n break\n bound += dis\n else:\n inner += dis\n if bk is True:\n bound = 1\n break\n if bound == 0 and inner > 0:\n for mi in [si for si in range(i-window/2+1, i+window/2)]:\n for mj in [sj for sj in range(j-window/2+1, j+window/2)]:\n if [mi,mj] not in pxs:\n pxs.append([mi,mj])\n # print pxs\n bmp = set_white(im,pxs)\n return bmp\n\ndef set_white(im, white_pxs):\n (width, height) = im.size\n white = (255, 255, 255)\n data1 = []\n data = im.getdata()\n n = 0\n for p in data:\n i = (int)(n % width)\n j = (int)(n / width)\n if [i,j] in white_pxs:\n # print \"to be white!\"\n data1.append(white)\n else:\n data1.append(p)\n n += 1\n bmp = Image.new(\"RGB\", (width, height))\n bmp.putdata(data1)\n return bmp\n\n\ndef cut_image_top_bottom_margin(im):\n (width, height) = im.size\n data = im.getdata()\n data1 = []\n h = 0\n for j in range(height):\n need = False\n for i in range(width):\n if data[j*width + i] != BACKGROUND:\n need = True\n break\n if need:\n h += 1\n for i in range(width):\n data1.append(data[j*width + i])\n bmp = Image.new(\"RGB\", (width, h))\n bmp.putdata(data1)\n return bmp\n\n\ndef cut_image_margin(im):\n im = cut_image_top_bottom_margin(im)\n im = im.rotate(90, expand=1)\n im = cut_image_top_bottom_margin(im)\n im = im.rotate(-90, expand=1)\n return im\n\n\ndef black(im):\n (width, height) = im.size\n data = im.getdata()\n data1 = []\n for p in data:\n if p != BACKGROUND:\n data1.append((0,0,0))\n #data1.append(BACKGROUND)\n else:\n data1.append(BACKGROUND)\n #data1.append((0,0,0))\n bmp = Image.new(\"RGB\", (width, height))\n bmp.putdata(data1)\n return bmp\n\ndef revert_color(im):\n (width, height) = im.size\n data = im.getdata()\n data1 = []\n for p in data:\n if p != BACKGROUND:\n #data1.append((0,0,0))\n data1.append(BACKGROUND)\n else:\n #data1.append(BACKGROUND)\n data1.append((0,0,0))\n bmp = Image.new(\"RGB\", (width, height))\n bmp.putdata(data1)\n return bmp\n\n\ndef rotate(im):\n (width, height) = im.size\n best = im\n for i in range(-15, 15):\n imnew = revert_color(im)\n imnew = imnew.rotate(i, expand=1)\n imnew = revert_color(imnew)\n imnew = cut_image_margin(imnew)\n #imnew.show()\n (newwidth, newheight) = imnew.size\n if newwidth < width:\n width = newwidth\n best = imnew\n return best\n\n\ndef convert_2_tiff(source, dest):\n os.system(\"convert %s -bordercolor white -border 10x10 %s\" % (source, dest))\n\ndef tesseract(img_file):\n os.system(\"tesseract %s %s -l eng -psm 10 \" % (img_file,img_file))\n\ndef get_character(img_file):\n f_name = \"%s.txt\" % img_file\n f = open(f_name)\n content = f.readline()\n f.close()\n if content != \"\\r\":\n return content.strip()\n return None\n\n\ndef process(image_path):\n org_im = Image.open(image_path)\n # org_im.show()\n # 去背景\n im = erease_background(org_im)\n # 填充主体部分颜色 使更深\n im = fill_main_color(im)\n #im.show()\n # 去掉斜线\n for i in range(2):\n im = erease_line(im)\n im = im.rotate(90, expand=1)\n im = erease_line(im)\n im = im.rotate(-90, expand=1)\n # im.show()\n # 去燥\n im = erease_noise(im)\n # im.show()\n #\n test_image = Image.new(\"RGB\", (400,200))\n test_image.paste((150,150,150),(0,0,400,200))\n test_image.paste(org_im, (0,0))\n images = split_numbers(im)\n i = 0\n # flag = True\n result = \"\"\n for img in images:\n img = cut_image_margin(img)\n # img.show()\n img = black(img)\n # img.show()\n # img = rotate(img)\n source = \"output/%s.png\"%(i+1)\n dest = \"output/%s.tiff\"%(i+1)\n img.save(source)\n convert_2_tiff(source, dest)\n tesseract(dest)\n c = get_character(dest)\n if c is None:\n flag = False\n result += \"*\"\n else:\n if c == \":\":\n c = \"B\"\n result += c\n #img.show()\n test_image.paste(img, (50*i, 100))\n #time.sleep(1)\n i += 1\n pass\n print('result:%s'%result)\n draw = ImageDraw.Draw(test_image)\n # font = ImageFont.truetype('/Users/hush/.virtualenvs/codes/lib/python2.7/site-packages/matplotlib/mpl-data/fonts/ttf/cmb10.ttf', 36)\n font = ImageFont.truetype('/opt/py-env/lib/python2.7/site-packages/matplotlib/mpl-data/fonts/ttf/cmb10.ttf', 36) #todo\n draw.text((10,150),result, fill=(0,0,0), font=font)\n # test_image.show()\n return test_image,result\n\n # if flag:\n # print \"Good: \", result\n # else:\n # print \"Bad: \", result\n\n # return (test_image, result)\n\n\nif __name__ == \"__main__\":\n # if len(sys.argv) == 2:\n # image_path = sys.argv[1]\n # test_image, result = process(image_path)\n # test_image.show()\n # else:\n # for i in range(74):\n # image_path = \"../crawler/beian/vfimg/%s.jpeg\" % i\n # test_image, result = process(image_path)\n # # test_image.save(\"result/result%02d.png\" % i)\n # print result\n # process(\"vfimg/test.png\")\n pass\n\n\n","sub_path":"data/spider2/crawler/beian_icp/captcha_miit.py","file_name":"captcha_miit.py","file_ext":"py","file_size_in_byte":15752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"244945498","text":"# 用户:李航\r\n# 开发时间:2021/4/30 11:23\r\n# 编写一个程序,找到两个单链表相交的起始节点。\r\nclass Solution(object):\r\n\tdef getIntersectionNode(self, headA, headB):\r\n\t\t\"\"\"\r\n\t\t:type head1, head1: ListNode\r\n\t\t:rtype: ListNode\r\n\t\t\"\"\"\r\n\t\ta,b = headA,headB\r\n\t\t# 定义了两个节点a和b,只要a和b不等就继续遍历\r\n\t\twhile a!=b:\r\n\t\t\t# 这步很关键,请对照动态图配合理解,\r\n\t\t\t#当a的下一个为空时,就a就从b链表头开始遍历\r\n\t\t\ta = a.next if a else headB\r\n\t\t\t# 同理,b也是类似的\r\n\t\t\tb = b.next if b else headA\r\n\t\treturn a","sub_path":"leecode/双指针法/05.相交链表.py","file_name":"05.相交链表.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"509194697","text":"\"\"\"\r\nPython script for Part 1c of Project 2a\r\n\r\nOriginal Author:\tVinamra Agrawal\r\nDate:\t\t\t\tJanuary 25, 2019\r\n\r\nEdited By:\t\t\tOmkar Mulekar\r\nDate:\t\t\t\tFebruary 28, 2019\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nfrom fenics import *\r\nimport matplotlib\r\nmatplotlib.use(\"Agg\")\r\nimport matplotlib.pyplot as plt\r\nfrom ufl import nabla_div\r\nimport math\r\nimport numpy as np\r\nfrom scipy.signal import argrelextrema\r\n\r\n#==============================================================\r\n# Define System Properties\r\n#==============================================================\r\nlength = 1;\r\nW = 0.2;\r\nH = 0.2;\r\n\r\na = 0.04*length;\r\nb = 0.4*H;\r\narea = a*b;\r\n\r\nF = -100\r\n\r\nyoungs = 200e9 # Youngs\r\nnu = 0.3 # Poisson\r\nrho = 7800 # Density\r\n\r\n\r\n# Lame parameters\r\nmu = (youngs)/(2*(1+nu))\r\nlambda_ = (nu*youngs)/((1+nu)*(1-2*nu))\r\n\r\ng = 10\r\n\r\ntraction_applied = F/area\r\n\r\n#==============================================================\r\n#\tDimensionless parameters\r\n#==============================================================\r\nl_nd = length/length\r\nw_nd = W/length\r\nh_nd = H/length\r\n\r\nbar_speed = math.sqrt(youngs/rho)\r\nt_char = length/bar_speed\r\nt = 0\r\nt_i = 0.5\r\ndt = 0.1\r\nnum_steps = 150\r\n\r\nmu_nd = mu/youngs\r\nlambda_nd = lambda_/youngs\r\n\r\ntraction_nd = traction_applied/youngs\r\n\r\n#============================================================\r\n# Boundaries and Geometry\r\n#============================================================\r\nmesh = BoxMesh(Point(0,0,0),Point(l_nd,w_nd,h_nd),20,6,6)\r\nV = VectorFunctionSpace(mesh,'P',1)\r\n\r\ntol = 1E-14\r\n\r\ndef boundary_left(x,on_boundary):\r\n\treturn (on_boundary and near(x[0],0,tol))\r\n\r\ndef boundary_right(x,on_boundary):\r\n\treturn on_boundary and near(x[0],l_nd,tol)\r\n\r\nbc_left = DirichletBC(V,Constant((0,0,0)),boundary_left)\r\nbc_right = DirichletBC(V,Constant((0,0,0)),boundary_right)\r\n\r\n\r\n#============================================================\r\ndef epsilon(u):\r\n\treturn 0.5*(nabla_grad(u) + nabla_grad(u).T)\r\n\r\ndef sigma(u):\r\n\treturn lambda_nd*nabla_div(u)*Identity(d) + mu_nd*(epsilon(u) + epsilon(u).T)\r\n\r\n#============================================================\r\n# First we solve the problem of a cantelever beam under fixed\r\n# load. \r\n#============================================================\r\nu_init = TrialFunction(V)\r\nd = u_init.geometric_dimension()\r\nv = TestFunction(V)\r\nf = Constant((0.0,0.0,0.0))\r\nT_init = Expression(('0.0', 'x[0] >= 0.48*l && x[0] <= .52*l && near(x[1],w) && x[2] >= 0.3*h && x[2] <= 0.7*h? A : 0.0' ,'0.0'), degree=1, l=l_nd, w=w_nd,h=h_nd, A=traction_nd)\r\nF_init = inner(sigma(u_init),epsilon(v))*dx - dot(f,v)*dx - dot(T_init,v)*ds\r\na_init, L_init = lhs(F_init), rhs(F_init)\r\n\r\nprint(\"Solving the initial cantelever problem\")\r\nu_init = Function(V)\r\nsolve(a_init==L_init,u_init,[bc_left,bc_right])\r\nw_nd = u_init(l_nd/2.0,w_nd/2.0,h_nd/2.0)\r\nw = w_nd * length\r\nprint(w[1])\r\n\r\n#============================================================\r\n# Next we use this as initial condition, let the force go and \r\n# study the vertical vibrations of the beam\r\n#============================================================\r\nu_n = interpolate(Constant((0.0,0.0,0.0)),V)\r\nu_n_1 = interpolate(Constant((0.0,0.0,0.0)),V)\r\nu_n.assign(u_init)\r\nu_n_1.assign(u_init)\r\n\r\nT_n = Constant((0.0,0.0,0.0))\r\n\r\nu = TrialFunction(V)\r\nd = u.geometric_dimension()\r\nv = TestFunction(V)\r\n\r\nF = (dt*dt)*inner(sigma(u),epsilon(v))*dx + dot(u,v)*dx - (dt*dt)*dot(f,v)*dx - (dt*dt)*dot (T_n,v)*ds - 2.0*dot(u_n,v)*dx + dot(u_n_1,v)*dx\r\na,L = lhs(F), rhs(F)\r\n\r\nxdmffile_u = XDMFFile('results/solution.xdmf')\r\nxdmffile_s = XDMFFile('results/stress.xdmf')\r\n\r\nu = Function(V)\r\n\r\nu_store = [0] * num_steps\r\ntime = [0] * num_steps\r\n\r\nindex = 0\r\nfor n in range(num_steps):\r\n\tprint(\"time = %.2f\" % t)\r\n\tT_n.t = t\r\n\tsolve(a == L, u, [bc_left,bc_right])\r\n\tu_grab = u(0.5,0.1,0.1)\r\n\tu_store[n] = u_grab[1]\r\n\r\n\tif(abs(t-index)<0.01):\r\n\t\tprint(\"Writing output files...\")\r\n\t\txdmffile_u.write(u*length,t)\r\n\t\tW = TensorFunctionSpace(mesh, \"Lagrange\", 1)\r\n\t\tstress = lambda_*nabla_div(u)*Identity(d) + mu*(epsilon(u) + epsilon(u).T)\r\n\t\txdmffile_s.write(project(stress,W),t)\r\n\t\tindex += 1\r\n\r\n\ttime[n] = t\r\n\tt+=dt\r\n\tu_n_1.assign(u_n)\r\n\tu_n.assign(u)\r\n\r\n\r\n# Get period of oscillation\r\nu_np = np.array(u_store)\r\nmin_args = argrelextrema(u_np,np.less)\r\nperiod = (time[min_args[0][1]] - time[min_args[0][0]])*t_char\r\nnat_freq = 2*math.pi /period\r\nprint(\"Period of Oscillation\", period, \" seconds\")\r\nprint(\"Natural Frequency: \", nat_freq,\" rad/s\")\r\n\r\nplt.figure(1)\r\nplt.plot(time,u_store)\r\nplt.xlabel('time [s]')\r\nplt.ylabel('Vertical Deflection [m]')\r\nplt.savefig('1cfig.png')\r\n\r\n","sub_path":"Project03/1c_free_vibration.py","file_name":"1c_free_vibration.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"92847332","text":"import cv2\nimport os\n\n# input folder\nfolder_name ='data/images_test_staphylococcus_512x512_20'\n\nnew_x = 256\nnew_y = 256\n\noutput_foldername = 'data/images_test_staphylococcus_256x256_20'\n\n# create output folder\nif not os.path.isdir(output_foldername):\n os.mkdir(output_foldername)\n\nfilename_list = os.listdir(folder_name)\nprint(filename_list)\n\nfor image in range(0, len(filename_list)):\n # filename = filename_list[image]\n filename = str(image) + '.PNG'\n\n path = folder_name + '/' + filename\n\n img = cv2.imread(path)\n \n output_filename = str(image) + '.PNG'\n\n output_path = output_foldername + '/' + output_filename\n print(output_path)\n\n new_img = cv2.resize(img, (new_x, new_y), interpolation = cv2.INTER_AREA)\n # INTER_AREA for shrinking\n \n cv2.imwrite(output_path, new_img)\n\n ","sub_path":"resize_images.py","file_name":"resize_images.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"164952272","text":"\n\nfrom xai.brain.wordbase.nouns._bash import _BASH\n\n#calss header\nclass _BASHED(_BASH, ):\n\tdef __init__(self,): \n\t\t_BASH.__init__(self)\n\t\tself.name = \"BASHED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"bash\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_bashed.py","file_name":"_bashed.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"134917004","text":"from manimlib.imports import *\nimport os\nimport pyclbr\nfrom pdb import set_trace\n\nclass PlotStepFow(GraphScene):\n CONFIG = {\n \"x_min\": 0,\n \"x_max\": 11,\n \"x_axis_width\": 9,\n \"x_tick_frequency\": 1,\n \"x_leftmost_tick\": None, # Change if different from x_min\n \"x_labeled_nums\": None,\n \"x_axis_label\": \"$t[s]$\",\n \"y_min\": 0,\n \"y_max\": 2,\n \"y_axis_height\": 6,\n \"y_tick_frequency\": 1,\n \"y_bottom_tick\": None, # Change if different from y_min\n \"y_labeled_nums\": None,\n \"y_axis_label\": \"$F_{d}$\",\n \"axes_color\": GREY,\n \"graph_origin\": 2.5 * DOWN + 4 * LEFT,\n \"exclude_zero_label\": True,\n \"num_graph_anchor_points\": 25,\n \"default_graph_colors\": [BLUE, GREEN, YELLOW],\n \"default_derivative_color\": GREEN,\n \"default_input_color\": YELLOW,\n \"default_riemann_start_color\": BLUE,\n \"default_riemann_end_color\": GREEN,\n \"area_opacity\": 0.8,\n \"num_rects\": 50,\n \"function_color\" : RED,\n \"x_labeled_nums\" :range(0,10,1),\n \"y_labeled_nums\" :range(0,1,1)\n\n }\n\n def construct(self):\n self.x1=0.98\n self.x2=1.02\n self.setup_axes(animate=True)\n func_graph=self.get_graph(self.func_to_graph,self.function_color)\n func_graph1=self.get_graph(self.func_to_graph1,self.function_color,0,self.x1)\n func_graph2=self.get_graph(self.func_to_graph2,self.function_color,self.x1,self.x2)\n func_graph3=self.get_graph(self.func_to_graph3,self.function_color,self.x2)\n\n #func_graph2=self.get_graph(self.func_to_graph2)\n vert_line = self.get_vertical_line_to_graph(TAU,func_graph,color=YELLOW)\n graph_lab = self.get_graph_label(func_graph, label = \"Fd\")\n #graph_lab2=self.get_graph_label(func_graph2,label = \"\\\\sin(x)\", x_val=-10, direction=UP/2)\n two_pi = TexMobject(\"x = 2 \\\\pi\")\n label_coord = self.input_to_graph_point(TAU,func_graph)\n two_pi.next_to(label_coord,RIGHT+UP)\n\n\n\n #self.play(ShowCreation(func_graph))\n \n #self.play(ShowCreation(vert_line), ShowCreation(graph_lab))#,ShowCreation(two_pi))\n self.play(ShowCreation(func_graph1))\n self.play(ShowCreation(func_graph2))\n self.play(ShowCreation(func_graph3))\n\n\n \n def func_to_graph(self,x):\n y=x*0\n x1=0.98\n x2=1.02\n if x>=x1 and x=x2:\n y=x*0+0.8;\n\n return y\n\n def func_to_graph1(self,x):\n y=x*0\n return y\n \n def func_to_graph2(self,x):\n x1=0.98\n x2=1.02\n y=(x-x1)*0.8/(abs(x2-x1));\n return y\n def func_to_graph3(self,x):\n y=(x*0.)+0.8;\n return y\n\n\nclass GraphFromData(GraphScene):\n # Covert the data coords to the graph points\n def get_points_from_coords(self,coords):\n return [\n # Convert COORDS -> POINTS\n self.coords_to_point(px,py)\n # See manimlib/scene/graph_scene.py\n for px,py in coords\n ]\n\n # Return the dots of a set of points\n def get_dots_from_coords(self,coords,radius=0.1):\n points = self.get_points_from_coords(coords)\n dots = VGroup(*[\n Dot(radius=radius).move_to([px,py,pz])\n for px,py,pz in points\n ]\n )\n return dots\n\n\nclass PlotStepFow2(GraphFromData):\n CONFIG = {\n \"x_min\": 0,\n \"x_max\": 11,\n \"x_axis_width\": 9/2,\n \"x_tick_frequency\": 1,\n \"x_leftmost_tick\": None, # Change if different from x_min\n \"x_labeled_nums\": None,\n \"x_axis_label\": \"$t[s]$\",\n \"y_min\": 0,\n \"y_max\": 1.0001,\n \"y_axis_height\": 2,\n \"y_tick_frequency\": 0.5,\n \"y_bottom_tick\": None, # Change if different from y_min\n \"y_labeled_nums\": None,\n \"y_axis_label\": \"$F_{d}$\",\n \"axes_color\": GREY,\n \"graph_origin\": 3.0 * DOWN + 6 * LEFT,\n \"exclude_zero_label\": True,\n \"num_graph_anchor_points\": 25,\n \"default_graph_colors\": [BLUE, GREEN, YELLOW],\n \"default_derivative_color\": GREEN,\n \"default_input_color\": YELLOW,\n \"default_riemann_start_color\": BLUE,\n \"default_riemann_end_color\": GREEN,\n \"area_opacity\": 0.8,\n \"num_rects\": 50,\n \"function_color\" : RED,\n \"x_labeled_nums\" :range(0,11,1),\n \"y_labeled_nums\" :[0,0.5,1]\n\n }\n\n def construct(self):\n\n pump = SVGMobject(\"pump_copy\")\n pump.set_fill(WHITE, opacity = 0)\n pump.circle=pump.submobjects[0]\n pump.triangle=pump.submobjects[4]\n pump.line1=pump.submobjects[1]\n pump.line2=pump.submobjects[2]\n pump.bottom=pump.submobjects[3]\n pump.pline = pump.submobjects[5]\n pump.cylinder = pump.submobjects[6]\n pump.head = pump.submobjects[7]\n pump.rod = pump.submobjects[8]\n\n\n\n pump.triangle.set_fill(pump.color, opacity = 1)\n pump.scale(1.5)\n pump.shift(1.5*UP)\n \n arrow = Arrow(ORIGIN+LEFT*3.3+UP*0.3,ORIGIN+UP*2.7+LEFT*2.6)\n\n self.play(FadeIn(pump),FadeIn(arrow))\n\n\n self.setup_axes(animate=True)\n\n coords = [[0,0],[1,0],[1,0.8],[2,0.8],[3,0.8],[4,0.8],[5,0.8],[6,0.8],[7,0.8],[8,0.8],[9,0.8],[10,0.8]]\n points = self.get_points_from_coords(coords)\n # Set graph\n graph = DiscreteGraphFromSetPoints(points,color=ORANGE)\n func_graph=self.get_graph(self.func_to_graph,self.function_color)\n # Set dots\n dots = self.get_dots_from_coords(coords)\n self.play(ShowCreation(graph,run_time=1.5),Rotate(arrow, -PI/4))\n self.wait(2) \n f1 = TextMobject(\"{\\\\Huge $\\\\Rightarrow$}\")\n f1.scale(0.7)\n label_coord1 = self.input_to_graph_point(10,func_graph)\n f1.next_to(label_coord1,6*RIGHT+3*DOWN)\n self.play(FadeIn(f1))\n\n\n self.graph_origin = 3 * DOWN + 2* RIGHT\n self.y_axis_label = \"$p$\"\n self.y_max = 200\n self.y_tick_frequency=50\n self.y_labeled_nums = range(0,200,50)\n self.setup_axes(animate=True)\n func_graph2=self.get_graph(self.func_to_graph_bessel,self.function_color,1,10)\n #func_graph3=self.get_inv_graph(lambda y: np.sin(0.05*y),y_min=0,y_max=150)\n #path = self.get_inv_graph(lambda x: ((100*(x-2.5))**80)*100+363+5*np.sin(500*(x-2.5)), x_min=2.5, x_max=2.51)\n #path = self.get_inv_graph(lambda y: ((100*(y-2.5))**80)*100+363+5*np.sin(500*(y-2.5)), y_min=2.5, y_max=2.51)\n v = np.arange(100)/10\n vv = (v+0.3*np.sin(3*v))*15+363\n vv2 = (v+0.3*np.sin(3*v))*15+473\n z = np.ones((100))*2.51\n coords2 = np.stack((z, vv), axis=-1).tolist()\n coords3 = np.stack((z, vv2), axis=-1).tolist()\n\n points2 = self.get_points_from_coords(coords2)\n points3 = self.get_points_from_coords(coords3)\n\n path = DiscreteGraphFromSetPoints(points2,color=ORANGE)\n path2 = DiscreteGraphFromSetPoints(points3,color=ORANGE)\n\n location = self.coords_to_point(2.5,363) #location: Point\n self.play(MoveAlongPath(pump.head,path,run_time=5),MoveAlongPath(pump.rod,path2,run_time=5),ShowCreation(func_graph2,run_time=5))\n #self.play(MoveAlongPath(pump.head, path,run_time=2))\n\n #self.play(ShowCreation(func_graph2,run_time=2))\n\n\n\n\n def get_points_from_coords(self,coords):\n return [\n # Convert COORDS -> POINTS\n self.coords_to_point(px,py)\n # See manimlib/scene/graph_scene.py\n for px,py in coords\n ]\n\n def func_to_graph(self,x):\n y=x*0\n x1=0.98\n x2=1.02\n if x>=x1 and x=x2:\n y=x*0+0.8;\n\n return y\n def func_to_graph_bessel(self,x):\n return -150*np.sin(3*(x-0.9))/(3*(x-0.9))+150\n\n return y\n\n def get_inv_graph(\n self, func,\n color=None,\n y_min=None,\n y_max=None,\n **kwargs\n ):\n if color is None:\n color = next(self.default_graph_colors_cycle)\n if y_min is None:\n y_min = self.x_min\n if y_max is None:\n y_max = self.x_max\n\n def parameterized_function(alpha):\n y = interpolate(y_min, y_max, alpha)\n x = func(y)\n if not np.isfinite(x):\n x = self.x_max\n return self.coords_to_point(x, y)\n\n graph = ParametricFunction(\n parameterized_function,\n color=color,\n **kwargs\n )\n graph.underlying_function = func\n return graph\n\nclass DiscreteGraphFromSetPoints(VMobject):\n def __init__(self,set_of_points,**kwargs):\n super().__init__(**kwargs)\n self.set_points_as_corners(set_of_points)\nclass SmoothGraphFromSetPoints(VMobject):\n def __init__(self,set_of_points,**kwargs):\n super().__init__(**kwargs)\n self.set_points_smoothly(set_of_points)\n\n\n\n\n\nif __name__ == \"__main__\":\n # Call this file at command line to make sure all scenes work with version of manim\n # type \"python manim_tutorial_P37.py\" at command line to run all scenes in this file\n #Must have \"import os\" and \"import pyclbr\" at start of file to use this\n ###Using Python class browser to determine which classes are defined in this file\n module_name = 'matteo_manim' #Name of current file\n module_info = pyclbr.readmodule(module_name)\n\n for item in module_info.values():\n if item.module==module_name:\n print(item.name)\n os.system(\"python -m manim matteo_manim.py %s -l\" % item.name) #Does not play files\n\n","sub_path":"matteo_manim.py","file_name":"matteo_manim.py","file_ext":"py","file_size_in_byte":9697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"121031130","text":"# -*- coding: latin-1 -*-\n\"\"\"\n fragments - define text fragments in the document\n\"\"\"\nfrom domain.norm_document.model import Chapter, Section, Norm, Verifier\n\n\nS0901 = Section(\n identifier=\"09.01\",\n title=\"Bedrijfseisen voor toegangsbeveiliging\",\n text=\"Doelstelling: Toegang tot informatie en informatieverwerkende faciliteiten beperken.\",\n fragments=[\n\n Norm(\n identifier=\"09.01.01\",\n title=\"Beleid voor toegangsbeveiliging\",\n text=\"Een beleid voor toegangsbeveiliging behoort te worden vastgesteld, gedocumenteerd en beoordeeld op \"\n \"basis van bedrijfs- en informatiebeveiligingseisen.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.01.01/01\",\n title=\"\",\n text=\"- conform norm -\",\n bbn=1,\n )\n ],\n ),\n\n Norm(\n identifier=\"09.01.02\",\n title=\"Toegang tot netwerken en netwerkdiensten\",\n text=\"Gebruikers behoren alleen toegang te krijgen tot het netwerk en de netwerkdiensten waarvoor zij \"\n \"specifiek bevoegd zijn.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.01.02/01\",\n title=\"\",\n text=\"Alleen geauthenticeerde apparatuur kan toegang krijgen tot een vertrouwde zone.\",\n bbn=1,\n ),\n\n Verifier(\n identifier=\"09.01.02/02\",\n title=\"\",\n text=\"Gebruikers met eigen of ongeauthenticeerde apparatuur (Bring Your Own Device) krijgen \"\n \"alleen toegang tot een onvertrouwde zone.\",\n bbn=1,\n ),\n ],\n ),\n\n ],\n)\n\n\nS0902 = Section(\n identifier=\"09.02\",\n title=\"Beheer van toegangsrechten van gebruikers\",\n text=\"Doelstelling: Toegang voor bevoegde gebruikers bewerkstelligen en onbevoegde toegang tot systemen en \"\n \"diensten voorkomen.\",\n fragments=[\n\n Norm(\n identifier=\"09.02.01\",\n title=\"Registratie en afmelden van gebruikers\",\n text=\"Een formele registratie- en afmeldingsprocedure behoort te worden geïmplementeerd om toewijzing \"\n \"van toegangsrechten mogelijk te maken.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.02.01/01\",\n title=\"\",\n text=\"Er is een sluitende formele registratie- en afmeldprocedure voor het beheren van \"\n \"gebruikersidentificaties.\",\n bbn=1,\n ),\n Verifier(\n identifier=\"09.02.01/02\",\n title=\"\",\n text=\"Het gebruiken van groepsaccounts is niet toegestaan tenzij dit wordt gemotiveerd en \"\n \"vastgelegd door de proceseigenaar.\",\n bbn=1,\n ),\n ],\n ),\n\n Norm(\n identifier=\"09.02.02\",\n title=\"Gebruikers toegang verlenen\",\n text=\"Een formele gebruikerstoegangsverleningsprocedure behoort te worden geïmplementeerd om \"\n \"toegangsrechten voor alle typen gebruikers en voor alle systemen en diensten toe te wijzen of \"\n \"in te trekken.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.02.02/01\",\n title=\"\",\n text=\"Er is uitsluitend toegang verleend tot informatiesystemen na autorisatie door een \"\n \"bevoegde functionaris.\",\n bbn=1,\n ),\n Verifier(\n identifier=\"09.02.02/02\",\n title=\"\",\n text=\"Op basis van een risicoafweging is bepaald waar en op welke wijze functiescheiding wordt \"\n \"toegepast en welke toegangsrechten worden gegeven.\",\n bbn=1,\n ),\n Verifier(\n identifier=\"09.02.02/03\",\n title=\"\",\n text=\"Er is een actueel mandaatregister waaruit blijkt welke personen bevoegdheden hebben voor \"\n \"het verlenen van toegangsrechten dan wel functieprofielen.\",\n bbn=2,\n ),\n ],\n ),\n\n Norm(\n identifier=\"09.02.03\",\n title=\"Beheren van speciale toegangsrechten\",\n text=\"Het toewijzen en gebruik van speciale toegangsrechten behoren te worden beperkt en beheerst.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.02.03/01\",\n title=\"\",\n text=\"De uitgegeven speciale bevoegdheden worden minimaal ieder kwartaal beoordeeld.\",\n bbn=2,\n ),\n ],\n ),\n\n Norm(\n identifier=\"09.02.04\",\n title=\"Beheer van geheime authenticatie-informatie van gebruikers\",\n text=\"Het toewijzen van geheime authenticatie-informatie behoort te worden beheerst via een formeel \"\n \"beheersproces.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.02.04/01\",\n title=\"\",\n text=\"- conform norm -\",\n bbn=1,\n ),\n ],\n ),\n\n Norm(\n identifier=\"09.02.05\",\n title=\"Beoordeling van toegangsrechten van gebruikers\",\n text=\"Eigenaren van bedrijfsmiddelen behoren toegangsrechten van gebruikers regelmatig te beoordelen.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.02.05/01\",\n title=\"\",\n text=\"Alle uitgegeven toegangsrechten worden minimaal eenmaal per jaar beoordeeld.\",\n bbn=1,\n ),\n Verifier(\n identifier=\"09.02.05/02\",\n title=\"\",\n text=\"De opvolging van bevindingen is gedocumenteerd en wordt behandeld als beveiligingsincident.\",\n bbn=1,\n ),\n Verifier(\n identifier=\"09.02.05/03\",\n title=\"\",\n text=\"Alle uitgegeven toegangsrechten worden minimaal eenmaal per halfjaar beoordeeld.\",\n bbn=2,\n ),\n ],\n ),\n\n Norm(\n identifier=\"09.02.06\",\n title=\"Toegangsrechten intrekken of aanpassen\",\n text=\"De toegangsrechten van alle medewerkers en externe gebruikers voor informatie en \"\n \"informatieverwerkende faciliteiten behoren bij beëindiging van hun dienstverband, contract of \"\n \"overeenkomst te worden verwijderd, en bij wijzigingen behoren ze te worden aangepast.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.02.06/01\",\n title=\"\",\n text=\"- conform norm -\",\n bbn=1,\n ),\n ],\n ),\n\n ],\n)\n\n\nS0903 = Section(\n identifier=\"09.03\",\n title=\"Verantwoordelijkheden van gebruikers\",\n text=\"Doelstelling: Gebruikers verantwoordelijk maken voor het beschermen van hun authenticatie-informatie.\",\n fragments=[\n\n Norm(\n identifier=\"09.03.01\",\n title=\"Geheime authenticatie-informatie gebruiken\",\n text=\"Van gebruikers behoort te worden verlangd dat zij zich bij het gebruiken van geheime \"\n \"authenticatie-informatie houden aan de praktijk van de organisatie.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.03.01/01\",\n title=\"\",\n text=\"Medewerkers worden ondersteund in het beheren van hun wachtwoorden door het beschikbaar \"\n \"stellen van een wachtwoordenkluis.\",\n bbn=2,\n ),\n ],\n ),\n\n ],\n)\n\n\nS0904 = Section(\n identifier=\"09.04\",\n title=\"Toegangsbeveiliging van systeem en toepassing\",\n text=\"Doelstelling: Onbevoegde toegang tot systemen en toepassingen voorkomen.\",\n fragments=[\n\n Norm(\n identifier=\"09.04.01\",\n title=\"Beperking toegang tot informatie\",\n text=\"Toegang tot informatie en systeemfuncties van toepassingen behoort te worden beperkt in \"\n \"overeenstemming met het beleid voor toegangsbeveiliging.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.04.01/01\",\n title=\"\",\n text=\"Er zijn maatregelen genomen die het fysiek en/of logisch isoleren van informatie met \"\n \"specifiek belang waarborgen.\",\n bbn=2,\n ),\n Verifier(\n identifier=\"09.04.01/02\",\n title=\"\",\n text=\"Gebruikers kunnen alleen die informatie met specifiek belang inzien en verwerken die \"\n \"ze nodig hebben voor de uitoefening van hun taak.\",\n bbn=2,\n ),\n ],\n ),\n\n Norm(\n identifier=\"09.04.02\",\n title=\"Beveiligde inlogprocedures\",\n text=\"Indien het beleid voor toegangsbeveiliging dit vereist, behoort toegang tot systemen en \"\n \"toepassingen te worden beheerst door een beveiligde inlogprocedure.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.04.02/01\",\n title=\"\",\n text=\"Als vanuit een onvertrouwde zone toegang wordt verleend naar een vertrouwde zone, gebeurt \"\n \"dit alleen op basis van minimaal two-factor authenticatie.\",\n bbn=1,\n ),\n Verifier(\n identifier=\"09.04.02/02\",\n title=\"\",\n text=\"Voor het verlenen van toegang tot het netwerk door externe leveranciers wordt vooraf een \"\n \"risicoafweging gemaakt. De risicoafweging bepaalt onder welke voorwaarden de leveranciers \"\n \"toegang krijgen. Uit een registratie blijkt hoe de rechten zijn toegekend.\",\n bbn=2,\n ),\n ],\n ),\n\n Norm(\n identifier=\"09.04.03\",\n title=\"Systeem voor wachtwoordbeheer\",\n text=\"Systemen voor wachtwoordbeheer behoren interactief te zijn en sterke wachtwoorden te waarborgen.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.04.03/01\",\n title=\"\",\n text=\"Als er geen gebruik wordt gemaakt van two factor authentication is de wachtwoordlengte \"\n \"minimaal 8 posities en complex van samenstelling. Vanaf een wachtwoordlengte van \"\n \"20 posities vervalt de complexiteitseis. Het aantal inlogpogingen is maximaal 10. \"\n \"De tijdsduur dat een account wordt geblokkeerd na overschrijding van het aantal keer \"\n \"foutief inloggen is vastgelegd.\",\n bbn=1,\n ),\n Verifier(\n identifier=\"09.04.03/02\",\n title=\"\",\n text=\"In situaties waar geen two-factor authenticatie mogelijk is, wordt minimaal halfjaarlijks \"\n \"het wachtwoord vernieuwd (zie ook 09.04.02/01.).\",\n bbn=2,\n ),\n Verifier(\n identifier=\"09.04.03/03\",\n title=\"\",\n text=\"Het wachtwoordbeleid wordt geautomatiseerd afgedwongen.\",\n bbn=2,\n ),\n Verifier(\n identifier=\"09.04.03/04\",\n title=\"\",\n text=\"Initiële wachtwoorden en wachtwoorden die gereset zijn, hebben een maximale geldigheidsduur \"\n \"van een werkdag en moeten bij het eerste gebruik worden gewijzigd.\",\n bbn=2,\n ),\n Verifier(\n identifier=\"09.04.03/05\",\n title=\"\",\n text=\"Wachtwoorden die voldoen aan het wachtwoordbeleid hebben een maximale geldigheidsduur \"\n \"van een jaar. Daar waar het beleid niet toepasbaar is, geldt een maximale geldigheidsduur \"\n \"van 6 maanden.\",\n bbn=2,\n ),\n ],\n ),\n\n Norm(\n identifier=\"09.04.04\",\n title=\"Speciale systeemhulpmiddelen gebruiken\",\n text=\"Het gebruik van systeemhulpmiddelen die in staat zijn om beheersmaatregelen voor systemen en \"\n \"toepassingen te omzeilen behoort te worden beperkt en nauwkeurig te worden gecontroleerd.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.04.04/01\",\n title=\"\",\n text=\"Alleen bevoegd personeel heeft toegang tot systeemhulpmiddelen.\",\n bbn=1,\n ),\n Verifier(\n identifier=\"09.04.04/02\",\n title=\"\",\n text=\"Het gebruik van systeemhulpmiddelen wordt gelogd. \"\n \"De logging is een halfjaar beschikbaar voor onderzoek.\",\n bbn=2,\n ),\n ],\n ),\n\n Norm(\n identifier=\"09.04.05\",\n title=\"Toegangsbeveiliging op programmabroncode\",\n text=\"Toegang tot de programmabroncode behoort te worden beperkt.\",\n bbn=1,\n fragments=[\n Verifier(\n identifier=\"09.04.05/01\",\n title=\"\",\n text=\"- conform norm -\",\n bbn=1,\n ),\n ],\n ),\n\n ],\n)\n\n\nCH09 = Chapter(\n identifier=\"09\",\n title=\"Toegangsbeveiliging\",\n fragments=[\n S0901,\n S0902,\n S0903,\n S0904,\n ]\n)\n","sub_path":"python/domain/bir2017/content/ch09.py","file_name":"ch09.py","file_ext":"py","file_size_in_byte":14659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"190587823","text":"\"\"\" monkey patch some Python 3.7/3.8 stuff into earlier versions \"\"\"\n\nimport re\nimport sys\nimport asyncio\nimport warnings\nimport logging\n\n\ndef asyncio_run(task, debug=False):\n try:\n loop = asyncio.get_event_loop()\n except Exception:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n if debug:\n loop.set_debug(True)\n logging.getLogger('asyncio').setLevel(logging.DEBUG)\n warnings.filterwarnings('always')\n else:\n loop.set_debug(False)\n logging.getLogger('asyncio').setLevel(logging.WARNING)\n warnings.filterwarnings('default')\n\n response = loop.run_until_complete(task)\n\n loop.run_until_complete(loop.shutdown_asyncgens())\n\n return response\n\n\ndef task_get_name(self):\n \"\"\" asyncio.tasks.Task.get_name \"\"\"\n\n match = re.search(r\"coro=<(\\S+)\", repr(self))\n return match.group(1).replace('.', '')\n\n\nasync def wait_gracefully(tasks, timeout=None):\n \"\"\"\n wait for tasks to complete issuing cancels to any still pending until done\n to ensure exceptions and results are always consumed\n \"\"\"\n\n while True:\n done, pending = await asyncio.wait(tasks, timeout=timeout)\n\n for t in done:\n if t.exception():\n print(\"exception:\", task_get_name(t), t.exception())\n elif t.result():\n print(\"result:\", task_get_name(t), t.result())\n\n if not pending:\n break\n\n for t in pending:\n t.cancel()\n\n tasks = pending\n\n\ndef patch():\n \"\"\" monkey patch some Python 3.7/3.8 stuff into earlier versions \"\"\"\n\n version = sys.version_info.major * 10 + sys.version_info.minor\n\n if version < 37:\n asyncio.get_running_loop = asyncio.get_event_loop\n asyncio.create_task = asyncio.ensure_future\n asyncio.current_task = asyncio.Task.current_task\n asyncio.all_tasks = asyncio.Task.all_tasks\n asyncio.run = asyncio_run\n asyncio.tasks.Task.get_name = task_get_name\n","sub_path":"aiotools.py","file_name":"aiotools.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"263523013","text":"# -*- coding: utf-8 -*-\n\"\"\"\npublic_data_utils.py: Functions related specifically to the public datasets.\n\nThe goal of these functions is to create generic pandas dataframes that can be further processed\nusing functions in evaluate_dataset.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom scipy.io import loadmat\n\nfrom evaluate_dataset import format_anchors_df, format_data_df\nfrom evaluate_dataset import add_gt_raw, apply_distance_gt\nfrom trajectory_creator import get_trajectory\n\n# Need to give different systems a name.\ngt_system_id = \"GT\"\nrange_system_id = \"Range\"\ngt_anchor_id = \"GT\"\n\n# time intervals of zig zag trajectory in which movement is roughly linear.\nTIME_RANGES = [\n (325, 350), # backward\n (375, 393), # forward\n (412, 445),\n (464, 484),\n (505, 534),\n (557, 575),\n (597, 624),\n (640, 670),\n (840, 867),\n (885, 908),\n (928, 961),\n (981, 1003),\n (1027, 1057),\n (1075, 1095),\n (1120, 1140),\n (1160, 1180),\n (1200, 1230),\n (1250, 1270),\n (1290, 1322),\n (1342, 1358),\n]\n\n\ndef read_dataset(filename, verbose=False):\n traj = get_trajectory(filename)\n\n dataname = filename.split('/')[-1].split('.')[0]\n t_window = 1.0\n min_time = 0\n max_time = 10000\n if dataname == 'uah1':\n t_window = 1.0\n min_time = 0\n max_time = 1000\n elif dataname == 'Plaza1':\n t_window = 0.1\n min_time = 0 #20 straight lines\n max_time = 1400 # 20 straight lines\n #min_time = 325 # first line\n #max_time = 350 # first line\n #min_time = 374 # second line\n #max_time = 395 # second line\n elif dataname == 'Plaza2':\n t_window = 0.1\n min_time = 45.1\n period = 101 - 45\n num_loops = 2\n max_time = min_time + num_loops * period\n traj.period = period\n elif dataname == 'Gesling1':\n t_window = 2.0\n min_time = 36\n period = 140 - 36\n num_loops = 2\n max_time = min_time + num_loops * period\n traj.period = period\n elif dataname == 'Gesling2':\n t_window = 2.0\n min_time = 23\n period = 186 - 23\n num_loops = 1\n max_time = min_time + num_loops * period\n traj.period = period\n elif dataname == 'Gesling3':\n t_window = 1\n min_time = 23\n period = 50\n num_loops = 1\n max_time = min_time + num_loops * period\n if not traj.params['full_period']:\n traj.period = 2 * period\n\n try:\n result_dict = loadmat(filename)\n except FileNotFoundError:\n raise FileNotFoundError('Could not find {}. Did you run the script download_datasets?'.format(filename))\n except Exception as e:\n print('Unknown reading error with {}. Check if the file looks ok.'.format(filename))\n raise e\n print('Successfully read {}'.format(filename))\n\n full_df, anchors_df = prepare_dataset(result_dict,\n range_system_id,\n gt_system_id, [min_time, max_time],\n t_window,\n verbose=verbose)\n return full_df, anchors_df, traj\n\n\ndef get_plotting_params(filename):\n xlim = ylim = (None, None)\n dataname = filename.split('/')[-1].split('.')[0]\n if dataname == 'uah1':\n xlim = 0, 50\n ylim = -20, 20\n elif dataname == 'Plaza1':\n xlim = -50, 10\n ylim = -20, 75\n elif dataname == 'Plaza2':\n xlim = -80, 10\n ylim = -15, 75\n elif 'Gesling' in dataname:\n xlim = -2, 50\n ylim = -2, 120\n return xlim, ylim\n\n\ndef create_anchors_df(anchor_data):\n \"\"\" Create standard anchors dataframe. \n\n :param anchors_data: anchors data read from .mat file ('TL' field).\n \"\"\"\n anchors_df = pd.DataFrame(columns=['anchor_id', 'system_id', 'px', 'py', 'pz'])\n anchor_ids = np.unique(anchor_data[:, 0])\n for i, anchor_id in enumerate(anchor_ids):\n anchors_df.loc[i, 'anchor_id'] = anchor_id\n anchors_df.loc[i, 'system_id'] = range_system_id\n\n # it is weird that there is more than one value for each anchor, it looks\n # like this was a bug in the dataset. we make sure they are all\n # the same and pick the first.\n px_values = np.unique(anchor_data[anchor_data[:, 0] == anchor_id, 1])\n py_values = np.unique(anchor_data[anchor_data[:, 0] == anchor_id, 2])\n assert len(px_values) == 1\n assert len(py_values) == 1\n anchors_df.loc[i, 'px'] = px_values[0]\n anchors_df.loc[i, 'py'] = py_values[0]\n\n return anchors_df\n\n\ndef create_full_df(range_data, gt_data, time_range=None):\n \"\"\"\" Create full dataframe. \"\"\"\n mask = np.ones(len(range_data), dtype=bool)\n if time_range is not None:\n times = range_data[:, 0]\n times -= min(times)\n mask = (times > time_range[0]) & (times < time_range[1])\n if not any(mask):\n print('empty mask!')\n print(min(times), max(times), time_range)\n range_df = pd.DataFrame(columns=['timestamp', 'px', 'py', 'pz', 'distance', 'system_id', 'anchor_id'],\n index=range(np.sum(mask)))\n range_df.loc[:, 'distance'] = range_data[mask, 3]\n range_df.loc[:, 'timestamp'] = range_data[mask, 0]\n range_df.loc[:, 'anchor_id'] = range_data[mask, 2]\n range_df.loc[:, 'system_id'] = range_system_id\n\n mask = np.ones(len(gt_data), dtype=bool)\n if time_range is not None:\n times = gt_data[:, 0]\n times -= min(times)\n mask = (times > time_range[0]) & (times < time_range[1])\n gt_df = pd.DataFrame(columns=range_df.columns)\n gt_df.loc[:, 'px'] = gt_data[mask, 1]\n gt_df.loc[:, 'py'] = gt_data[mask, 2]\n gt_df.loc[:, 'timestamp'] = gt_data[mask, 0]\n gt_df.loc[:, 'anchor_id'] = gt_anchor_id\n gt_df.loc[:, 'system_id'] = gt_system_id\n\n full_df = pd.concat([range_df, gt_df], ignore_index=True)\n full_df.sort_values('timestamp', inplace=True)\n full_df.reset_index(drop=True, inplace=True)\n full_df.loc[:, 'timestamp'] = full_df.timestamp - full_df.timestamp.min()\n return full_df\n\n\ndef prepare_dataset(result_dict, range_system_id, gt_system_id, time_range, t_window, verbose=False):\n min_time, max_time = time_range\n try:\n key_anchor = [key for key in result_dict.keys() if 'TL' in key][0]\n anchor_data = result_dict[key_anchor]\n key_range = [key for key in result_dict.keys() if 'TD' in key][0]\n range_data = result_dict[key_range]\n key_gt = [key for key in result_dict.keys() if 'GT' in key][0]\n gt_data = result_dict[key_gt]\n except KeyError:\n print('Problem reading')\n print(result_dict.keys())\n return\n\n anchors_df = create_anchors_df(anchor_data)\n anchors_df = format_anchors_df(anchors_df, range_system_id=range_system_id, gt_system_id=gt_system_id)\n\n if verbose:\n print('creating full_df...')\n full_df = create_full_df(range_data, gt_data, time_range)\n if len(full_df) == 0:\n raise ValueError('empty data frame')\n full_df = format_data_df(full_df, anchors_df, gt_system_id=gt_system_id, range_system_id=range_system_id)\n if verbose:\n print('...done')\n\n if verbose:\n print('adding ground truth...')\n #full_df = add_gt_raw(full_df, t_window=t_window, gt_system_id=gt_system_id)\n full_df.loc[:, ['px', 'py', 'pz']] = full_df.loc[:, ['px', 'py', 'pz']].fillna(method='ffill', limit=2)\n full_df.loc[:, \"distance_gt\"] = full_df.apply(\n lambda row: apply_distance_gt(row, anchors_df, gt_system_id=gt_system_id), axis=1)\n\n if verbose:\n print('...done')\n return full_df, anchors_df\n\n\ndef get_ground_truth(full_df, times):\n \"\"\" Find one ground truth for each time when we have a distance measurement. \n \"\"\"\n ground_truth_pos = full_df.loc[full_df.timestamp.isin(times), ['timestamp', 'px', 'py', 'pz']]\n ground_truth_pos = ground_truth_pos.astype(np.float32)\n ground_truth_pos = ground_truth_pos.groupby('timestamp').agg(np.nanmean)\n ground_truth_pos.reset_index(inplace=True)\n return ground_truth_pos.loc[:, ['px', 'py']]\n\n\ndef plot_distance_errors(this_df, ax=None, **kwargs):\n indices = np.argsort(this_df.distance.values)\n distances = this_df.distance.values[indices]\n distances_gt = this_df.distance_gt.values[indices]\n errors = distances - distances_gt\n\n # a quick hack for calculating the variance.\n error_df = pd.DataFrame({'e': errors, 'd': distances_gt})\n error_df.sort_values('d', inplace=True)\n variances = error_df.e.rolling(10).std().values\n print('mean std', np.nanmean(variances))\n print('median std', np.nanmedian(variances))\n\n if ax is None:\n fig, ax = plt.subplots()\n fig.set_size_inches(5, 2)\n ax.scatter(distances_gt, errors, alpha=0.5, **kwargs)\n ax.set_xlabel('real distance [m]')\n ax.set_ylabel('distance error [m]')\n return ax, errors, distances_gt\n\n\ndef plot_distance_times(full_df):\n range_ids = full_df[full_df.system_id == range_system_id].anchor_id.unique()\n fig, axs = plt.subplots(len(range_ids), sharex=True)\n fig.set_size_inches(10, 10)\n for i, anchor_id in enumerate(sorted(range_ids)):\n this_df = full_df[full_df.anchor_id == anchor_id]\n axs[i].scatter(this_df.timestamp, this_df.distance, color='red', label='measured distance')\n axs[i].scatter(this_df.timestamp, this_df.distance_gt, color='green', label='real distance')\n axs[i].legend(loc='upper right')\n axs[i].set_title('anchor {}'.format(anchor_id))\n axs[i].set_ylabel('distance [m]')\n axs[i].set_xlabel('time [s]')\n return fig, axs\n","sub_path":"source/public_data_utils.py","file_name":"public_data_utils.py","file_ext":"py","file_size_in_byte":9731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"644690764","text":"from ursina import *\r\nfrom ursina.prefabs.platformer_controller_2d import PlatformerController2d\r\n\r\napp = Ursina()\r\n\r\nwindow.title = \"Platformer\"\r\nwindow.color = rgb(100, 170, 208)\r\nwindow.fullscreen = True\r\n\r\ncolor_grass = rgb(43, 130, 0)\r\ncolor_finish_1 = color.black\r\ncolor_finish_2 = color.white\r\ncolor_platforms_1 = rgb(181, 22, 22)\r\ncolor_platforms_2 = rgb(91, 22, 181)\r\n\r\nlast_x = 0\r\npre_finish = 7\r\ndeaths = 0\r\n\r\nplayer = PlatformerController2d(\r\n scale = (1, 1),\r\n position = (0, 5), \r\n collision = True,\r\n color = rgb(255, 139, 0),\r\n )\r\n\r\ncontrol_help = Entity(\r\n position = (-8, 2),\r\n model = \"quad\",\r\n scale = (8,6),\r\n texture = \"images/control\"\r\n )\r\n\r\ncontrol_help = Entity(\r\n position = (-7.3, -1),\r\n model = \"quad\",\r\n scale = (6, 2),\r\n texture = \"images/control2\"\r\n )\r\n\r\ngimp_easter_egg = Entity(\r\n position = (-10.5, -5.5),\r\n model = \"quad\",\r\n scale = (2, 2),\r\n texture = \"images/gimp\"\r\n )\r\n\r\ntest = Text(text = \"Deaths: 0\", x = -0.82, y = .4, scale = 1, origin = (0,0))\r\n\r\ncamera.add_script(SmoothFollow(target = player, offset = [0.5, -35], speed = 6))\r\n\r\nmusic_game = Audio(\"audio/audio_fon\", pitch = 1, loop = True, autoplay = True)\r\n\r\non_off_switch = ButtonGroup((\"music off\", \"music on\"), min_selection = 1, x = -0.8885, y = .5, default=\"music on\", selected_color = color.red)\r\n\r\n\r\ndef on_value_changed():\r\n print(\"turn:\", on_off_switch.value)\r\n if on_off_switch.value == [\"music off\"]:\r\n music_game.pause()\r\n elif on_off_switch.value == [\"music on\"]:\r\n music_game.resume()\r\non_off_switch.on_value_changed = on_value_changed\r\n\r\n\r\ndef input(key):\r\n if key == \"enter\":\r\n global deaths\r\n deaths += 1\r\n test.text = \"Deaths: \" + str(deaths)\r\n player.position = (0, 2)\r\n if key == \"escape\":\r\n sys.exit()\r\n\r\n\r\ndef world(x = 0, y = 0, columns = 0):\r\n def builder(amount_map_block, color_map_up, times):\r\n global last_x\r\n if last_x > 0:\r\n block_map_x = last_x\r\n else:\r\n block_map_x = x\r\n for i in range(times):\r\n if amount_map_block == pre_finish:\r\n amount_map_block = pre_finish\r\n else:\r\n amount_map_block = random.randint(4, 6)\r\n block_map_y = y\r\n for i in range(amount_map_block):\r\n block = Entity(\r\n position = (block_map_x, block_map_y),\r\n model = \"quad\",\r\n collider = \"box\",\r\n scale = (1,1),\r\n color = rgb(89, 87, 86),\r\n )\r\n block_map_y += 1\r\n for i in range(2):\r\n block = Entity(\r\n position = (block_map_x, block_map_y),\r\n model = \"quad\",\r\n collider = \"box\",\r\n scale = (1,1),\r\n color = rgb(89, 32, 3),\r\n )\r\n block_map_y += 1\r\n\r\n for i in range(1):\r\n block = Entity(\r\n position = (block_map_x, block_map_y),\r\n model = \"quad\",\r\n collider = \"box\",\r\n scale = (1,1),\r\n color = color_map_up,\r\n )\r\n block_map_y += 1\r\n block_map_x += 1\r\n last_x = block_map_x\r\n\r\n\r\n def finish():\r\n finish_flag = Entity(\r\n position = (last_x + 3.3 , 2.8),\r\n model = \"quad\",\r\n scale = (8, 4.8),\r\n texture = \"images/finish_flag_img\"\r\n )\r\n builder(pre_finish, color_finish_1, 1)\r\n builder(pre_finish, color_finish_2, 1)\r\n builder(pre_finish, color_finish_1, 1)\r\n builder(pre_finish, color_finish_2, 1)\r\n builder(pre_finish, color_finish_1, 1)\r\n builder(pre_finish, color_finish_2, 1)\r\n builder(pre_finish, color_finish_1, 1)\r\n\r\n\r\n def draw_platform(start_x_platform, start_y_platform, color_platform):\r\n platforms = columns // 20\r\n platform_block_x = x + start_x_platform\r\n platform_block_y = y + start_y_platform\r\n for i in range(platforms):\r\n amount_platform_block = random.randint(5, 8)\r\n for i in range(amount_platform_block):\r\n if last_x - platform_block_x > 10:\r\n block = Entity(\r\n position = (platform_block_x, platform_block_y),\r\n model = \"quad\",\r\n collider = \"box\",\r\n scale = (1,1),\r\n color = color_platform,\r\n )\r\n else:\r\n block = Entity(\r\n position = (platform_block_x, platform_block_y),\r\n model = \"quad\",\r\n collider = \"box\",\r\n collision = False,\r\n scale = (1,1),\r\n color = color.clear,\r\n )\r\n platform_block_x += 1\r\n platform_block_x += random.randint(20, 22)\r\n\r\n\r\n builder(0, color_grass, columns)\r\n builder(pre_finish, color_grass, 3)\r\n finish()\r\n draw_platform(8, 11, color_platforms_1)\r\n draw_platform(16, 15, color_platforms_2)\r\nworld(0, -9, 90)\r\n\r\napp.run()","sub_path":"URS_11_11/Ursina_platformer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"443042113","text":"import queue\n\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\n\nclass Event:\n def __init__(self, begin, end, description, v):\n self.b = begin\n self.e = end\n self.description = description\n self.v = v\n\n def update_description(self, description):\n self.description = description\n\n def get_minutes(self):\n return (self.e - self.b).days * 24 * 60 + (self.e - self.b).seconds / 60\n\n def print(self):\n print(\"时间:%s - %s;事件:%s 用时 %.2f 小时\" %\n (self.b, self.e, self.description, self.get_minutes() / 60))\n\n\ndef get_centers(sample, column):\n cluster = []\n\n for i in range(len(sample)):\n cluster.append(sample.iloc[i][column])\n\n cluster = np.array(cluster).reshape((-1, 1))\n\n estimator = KMeans(n_clusters=2) # 构造聚类器\n estimator.fit(cluster) # 聚类\n centroids = estimator.cluster_centers_ # 获取聚类中心\n centers = centroids.reshape(-1)\n centers.sort()\n return centers\n\n\ndef get_work_events(df, threshold, ref_column, ignore_time, break_time):\n # FIXME 降低复杂性到 O(n)\n tags = []\n events = []\n for i in range(len(df)):\n if df.iloc[i][ref_column] > threshold:\n tags.append(1)\n else:\n tags.append(0)\n\n e = Event(df.iloc[0][\"datetime\"], -1, -1, tags[0])\n i = 1\n while True:\n if i >= len(df):\n break\n if tags[i] != e.v or i == len(df) - 1:\n e.e = df.iloc[i - 1][\"datetime\"] + \\\n (df.iloc[i][\"datetime\"] - df.iloc[i - 1][\"datetime\"]) / 2\n if e.v == 1:\n e.update_description(\"开机\")\n else:\n if e.get_minutes() < break_time:\n e.update_description(\"休息\")\n else:\n e.update_description(\"停机\")\n events.append(e)\n e = Event(e.e, -1, -1, tags[i])\n i += 1\n\n find = True\n new_events = []\n while find:\n find = False\n new_events.append(events[0])\n for i in range(1, len(events) - 1):\n if events[i].get_minutes() < ignore_time:\n find = True\n new_events[-1].e = events[i + 1].e\n end = i + 1\n break\n else:\n new_events.append(events[i])\n if find:\n for i in range(end + 1, len(events)):\n new_events.append(events[i])\n else:\n new_events.append(events[-1])\n\n events, new_events = new_events, []\n\n return tags, events\n\n\nclass EnergyStatusCluster:\n def __init__(self, data_df, column, threshold, ignore_time, break_time):\n self.df = data_df\n self.column = column\n self.threshold = threshold\n self.ignore_time = ignore_time\n self.break_time = break_time\n self.event_queue = []\n self.begin_index, self.end_index = None, None\n # self.event_queue = queue.Queue()\n return\n\n def analyze(self):\n centers = get_centers(self.df, self.column)\n\n threshold = (1 - self.threshold) * centers[0] + self.threshold * centers[1]\n\n tags, es = get_work_events(self.df, threshold, self.column, self.ignore_time, self.break_time)\n self.df[\"tag\"] = tags\n self.event_queue = es\n\n def get_energy_time(self):\n # for e in self.event_queue:\n # e.print()\n ret = []\n q = self.event_queue\n begin_time, end_time = None, None\n for i in range(1, len(q)):\n if begin_time is None:\n if q[i - 1].description == \"停机\" and q[i].description == \"开机\":\n begin_time = q[i].b\n self.begin_index = i\n else:\n if end_time is not None:\n break\n if q[i - 1].description == \"开机\" and q[i].description == \"停机\":\n end_time = q[i].b\n self.end_index = i\n\n return [begin_time, end_time]\n\n def get_break_info(self):\n if self.begin_index is None:\n self.get_energy_time()\n nums, time = 0, 0\n if self.begin_index is None:\n return [0, 0]\n if self.end_index is None:\n self.end_index = len(self.event_queue)\n for i in range(self.begin_index, self.end_index):\n if self.event_queue[i].description == \"休息\":\n nums += 1\n time += self.event_queue[i].get_minutes()\n\n return [nums, time / 60]\n","sub_path":"algorithm/base/cluster/energy_status_cluster.py","file_name":"energy_status_cluster.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"216181872","text":"import execjs\nfrom Crypto.Cipher import AES\nfrom binascii import b2a_hex, a2b_hex\n\n\ndef js_aes(text):\n jscode = \"\"\"\n function encryptByAES(pwd) {\n var cryptoJS = require(\"crypto-js\");\n\n let i = cryptoJS.enc.Utf8.parse(\"12345678901234561234567890123456\");\n let t = cryptoJS.enc.Utf8.parse(pwd);\n let o = cryptoJS.enc.Utf8.parse(\"1234567890123456\");\n return cryptoJS.AES.encrypt(t, i, {\n iv: o,\n mode: cryptoJS.mode.CBC,\n padding: cryptoJS.pad.Pkcs7\n }).ciphertext.toString()\n }\n \"\"\"\n ctx = execjs.compile(jscode)\n encrypto = ctx.call(\"encryptByAES\", text)\n\n return encrypto\n\n\ndef py_aes(text):\n key = b\"12345678901234561234567890123456\" # 长度必须为16\n text = text.encode(\"utf-8\")\n\n cryptor = AES.new(key, AES.MODE_CBC, iv=b\"1234567890123456\")\n pad = 16 - len(text) % 16\n text = text + (chr(pad) * pad).encode(\"utf-8\") # 相当于JS里面的 padding: cryptoJS.pad.Pkcs7\n ciphertext = cryptor.encrypt(text)\n\n return b2a_hex(ciphertext).decode(\"utf-8\")\n\n\ntext = \"!abc123你好\"\njs_res = js_aes(text)\npy_res = py_aes(text)\n\nprint(js_res == py_res)\nprint(js_res)\nprint(js_res)\n","sub_path":"项目模块/网易云/网易云/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"203030214","text":"from uuid import uuid4\nfrom typing import Dict\n\nfrom flask import Flask, jsonify\nfrom flask import request, redirect\nfrom flask_socketio import SocketIO, emit\n\nfrom flask_cors import CORS\n\nfrom Game.Components.GameState import GameState\nfrom Game.Session.Words import generate_word_session\nfrom Game.Views.GameStateView import game_state_str\nfrom Game.Views.PlayerView import player_view_state, PlayerView\nfrom Game.Components.Player import Player\nfrom Game.Modules.EventEnum import GameEventState\nfrom Game.Systems.GameStateSystem import add_player, add_deck_to_game, deal_to_players, generate_player\nfrom Database.database import build_game_state_table, upsert_game_state_in_db, get_game_state_in_db\n\napp = Flask(__name__)\nCORS(app,resources={r\"/*\":{\"origins\":\"*\"}})\nsocketio = SocketIO(app,cors_allowed_origins=\"*\")\n# socketio = SocketIO(app)\n\nbuild_game_state_table()\n\nMOCK_REDIS_CACHE: Dict[str, GameState] = dict()\nSITE_URL = \"http://127.0.0.1:5000\"\n\n@app.route(\"/\")\ndef hello_world():\n return '''Hello, backend is alive.'''\n\n\n@app.route(\"/create\")\ndef create_game():\n gs = GameState()\n gs.session = str(uuid4())\n gs.game_code = generate_word_session(3).lower()\n gs.game_event_state = GameEventState.WAITING_FOR_PLAYERS_TO_JOIN\n update_redis_cache(game_state=gs)\n join_link = f'/join/{gs.game_code}'\n return jsonify({\n 'game_code': gs.game_code.lower(),\n 'join_link': join_link\n })\n\n\n@app.route(\"/join\")\ndef join_game():\n if len(request.args) == 0:\n return f'''\n
\n \n

\n \n
\n '''\n game_code: str = request.args.get('gamecode','')\n return redirect(f'/join/{game_code.lower()}', code=302)\n\n\n@app.route(\"/join/\")\ndef join_game_with_session_id(game_code):\n gs = get_redis_cache(game_code.lower())\n \n if gs.game_event_state != GameEventState.WAITING_FOR_PLAYERS_TO_JOIN:\n return f'

Game is not accepting new players

'\n \n if gs:\n if len(request.args) == 0:\n return f'''

No Arguments

'''\n nick_name = request.args.get('nick_name')\n new_player = generate_player(name=nick_name)\n new_gs = add_player(gs, new_player)\n update_redis_cache(new_gs)\n game_link = f'/game/{game_code.lower()}/player/{new_player.uuid}'\n return jsonify({\n 'game_link': game_link,\n 'new_player_uuid': new_player.uuid,\n 'nick_name': nick_name\n })\n return f'

Game Does Not Exists

'\n\n\n@app.route(\"/game//player/\")\ndef game_session(game_code: str, player_uuid: str):\n game_state = get_redis_cache(game_code)\n player_view = player_view_state(game_state, player_uuid)\n return player_view.json()\n\n@socketio.on(\"connect\")\ndef connected():\n \"\"\"event listener when client connects to the server\"\"\"\n print(request.sid)\n print(\"client has connected\")\n emit(\"connect\",{\"data\":f\"id: {request.sid} is connected\"})\n\n@socketio.on('data')\ndef handle_message(data):\n \"\"\"event listener when client types a message\"\"\"\n print(\"data from the front end: \",str(data))\n emit(\"data\",{'data':data,'id':request.sid},broadcast=True)\n\n@socketio.on(\"disconnect\")\ndef disconnected():\n \"\"\"event listener when client disconnects to the server\"\"\"\n print(\"user disconnected\")\n emit(\"disconnect\",f\"user {request.sid} disconnected\",broadcast=True)\n\n\ndef update_redis_cache(game_state: GameState):\n game_code = game_state.game_code.lower()\n upsert_game_state_in_db(game_code, game_state.dict(), True)\n\n\ndef get_redis_cache(game_code) -> GameState:\n output = get_game_state_in_db(game_code.lower())\n print(output)\n return GameState(**output)\n\n\nif __name__ == \"__main__\":\n socketio.run(app)\n","sub_path":"backend_code/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"211292557","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import models, fields, api\r\n\r\nclass AccountInvoice(models.Model):\r\n _inherit = 'account.move'\r\n\r\n def get_footer_values(self):\r\n invoice = self.sudo()\r\n response = {}\r\n promissory_note_one = \"\"\"POR ESTE PAGARE ME(NOS) OBLIGO(AMOS) A PAGAR INCONDICIONALMENTE, A LA ORDEN DE\"\"\"\r\n promissory_note_two = \"\"\"EL DÍA \"\"\"+str(self.invoice_date_due)+\"\"\", EN ESTA CIUDAD, O EN CUALQUIER OTRA QUE SEA(MOS) \r\n REQUERIDO(OS) A ELECCION DEL TENEDOR DE ESTE PAGARE EL DIA DEL VENCIMIENTO INDICADO, LA CANTIDAD DE, \r\n \"\"\"+str(self.amount_residual)+\"\"\" (\"\"\"+ invoice._l10n_mx_edi_cfdi_amount_to_text() +\"\"\"\"), VALOR RECIBIDO \r\n EN MERCANCIA A \"\"\"\r\n reiterate = \"\"\"(NUESTRA) ENTERA SATISFACCION, SI NO FUERE PUNTUALMENTE CUBIERTO A SU VENCIMIENTO, PAGARE \r\n INTERESES MORATORIOS HASTA SU LIQUIDACION TOTAL A RAZON DEL % MENSUAL, CULIACÁN SINALOA, A \"\"\"+str(self.invoice_date_due)\r\n\r\n response['promissory_note_one'] = promissory_note_one\r\n response['promissory_note_two'] = promissory_note_two\r\n response['company'] = \"\"\"INDUSTRIAS GUACAMAYA SA DE CV, \"\"\"\r\n response['reiterate'] = reiterate\r\n\r\n return response\r\n\r\n def is_invoice_client(self):\r\n invoice = self\r\n\r\n if 'in_invoice' in invoice.move_type:\r\n return False\r\n if 'out_invoice' in invoice.move_type:\r\n return True\r\n\r\n def calculate_lines_details(self):\r\n self.ensure_one()\r\n invoice = self.sudo()\r\n details_move_lines = {}\r\n\r\n code_iva = \"14020001\"\r\n subtotal_products = 0\r\n subtotal_iva = 0\r\n subtotal_credit = 0\r\n\r\n total_debit = 0\r\n total_credit = 0\r\n\r\n details_move_lines[\"details_product\"] = []\r\n details_move_lines[\"details_tax\"] = []\r\n details_move_lines[\"details_credit\"] = []\r\n details_move_lines[\"details_credit\"] = []\r\n for line in invoice.line_ids:\r\n #extraccion de detalles del producto\r\n if line.product_id:\r\n subtotal_products = subtotal_products + line.debit\r\n details_move_lines[\"details_product\"].append(line)\r\n #Extracción para los detalles de iva\r\n elif not line.product_id and line.tax_line_id:\r\n subtotal_iva = subtotal_iva + line.debit\r\n details_move_lines[\"details_tax\"].append(line)\r\n #Exctracción de otros conceptos\r\n elif not line.product_id and not line.tax_line_id:\r\n subtotal_credit = subtotal_credit + line.credit\r\n details_move_lines[\"details_credit\"].append(line)\r\n\r\n total_debit = subtotal_products + subtotal_iva\r\n total_credit = subtotal_credit\r\n details_move_lines.update({\"subtotal_products\": subtotal_products})\r\n details_move_lines.update({\"subtotal_tax\" : subtotal_iva})\r\n details_move_lines.update({\"total_debit\" : total_debit})\r\n details_move_lines.update({\"total_credit\": total_credit})\r\n\r\n return details_move_lines\r\n\r\n def calculate_no_entrada(self):\r\n #se obtiene el ultimo insertado según las fechas\r\n # response = self.env[\"stock.picking\"].search([('origin','=',self.invoice_origin),('partner_id','=',self.partner_id.id),('date_done', '<=',self.invoice_date)], order= 'date_done desc', limit=1)\r\n if self.invoice_line_ids:\r\n purchase_orders = []\r\n stock_picking = []\r\n for line in self.invoice_line_ids:\r\n if line.purchase_order_id:\r\n if line.purchase_order_id not in purchase_orders:\r\n purchase_orders.append(line.purchase_order_id)\r\n response = self.env[\"stock.picking\"].search(\r\n [('origin', '=', line.purchase_order_id.name), ('partner_id', '=', self.partner_id.id),\r\n ('state', '=', 'done')], order='date_done desc', limit=1)\r\n if response:\r\n stock_picking.append(response.name)\r\n if stock_picking:\r\n stock_picking_values = \", \".join(stock_picking)\r\n return stock_picking_values\r\n return {}\r\n\r\n\r\n def action_post(self):\r\n super(AccountInvoice, self).action_post()\r\n if 'out_invoice' in self.move_type:\r\n self.action_process_edi_web_services()\r\n\r\nclass CustomAccountMoveLine(models.Model):\r\n _inherit = \"account.move.line\"\r\n\r\n discount_promotions = fields.Float(string='Descuento Promociones (%)', digits='Discount', default=0.0, readonly=True)","sub_path":"invoice_pdf_customization/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"317286095","text":"#!/usr/bin/env python\n#-*- mode: Python;-*-\n\nimport atexit\nimport json\nimport logging\nimport os\nimport sys\nimport tempfile\nimport traceback\n\nimport click\n\nfrom kubedrctl.cli import context\n\ncmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), 'commands'))\n\nclass MyCLI(click.MultiCommand):\n def list_commands(self, ctx):\n rv = []\n for filename in os.listdir(cmd_folder):\n if filename.endswith('.py') and filename.startswith('cmd_'):\n rv.append(filename[4:-3])\n rv.sort()\n return rv\n\n def get_command(self, ctx, name):\n try:\n if sys.version_info[0] == 2:\n name = name.encode('ascii', 'replace')\n\n mod = __import__('kubedrctl.cli.commands.cmd_' + name, None, None, ['cli'])\n except ImportError:\n logging.error(traceback.format_exc())\n return\n\n return mod.cli\n\n@click.command(cls=MyCLI)\n@click.version_option()\n@context.pass_context\ndef cli(ctx):\n \"\"\"KubeDR CLI.\n \"\"\"\n\n pass\n\ndef init_logging():\n fd, logfile = tempfile.mkstemp(suffix='.txt', prefix='kubedrctl')\n os.close(fd)\n logging.basicConfig(filename=logfile, level=logging.DEBUG, format='%(asctime)-15s: %(levelname)s: %(message)s')\n\n # Use \"CRITICAL\" for logging messages that should go to console as well as to the\n # log file. If we use \"INFO\" level, stack traces will end up on console (because they\n # will be logged at \"ERROR\" level.\n logger = logging.getLogger()\n ch = logging.StreamHandler()\n ch.setLevel(logging.CRITICAL)\n ch.setFormatter(logging.Formatter(''))\n logger.addHandler(ch)\n\n return logfile\n\ndef main():\n logfile = init_logging()\n # logging.critical(\"logfile: {}\".format(logfile))\n\n try:\n cli()\n except Exception as e:\n logging.error(traceback.format_exc())\n\n exctype, value = sys.exc_info()[:2]\n click.secho(traceback.format_exception_only(exctype, value)[0], fg='red')\n\n sys.exit(1)\n","sub_path":"kubedrctl/cli/kubedrctl.py","file_name":"kubedrctl.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"166696099","text":"# Search in a sorted array of unknown size\n# Time Complexity: O(log n)\nimport sys\n# \"\"\"\n# This is ArrayReader's API interface.\n# You should not implement it, or speculate about its implementation\n# \"\"\"\n#class ArrayReader:\n# def get(self, index: int) -> int:\n\nclass Solution:\n \n def search(self, reader, target):\n \"\"\"\n :type reader: ArrayReader\n :type target: int\n :rtype: int\n \"\"\"\n # performs binary search\n def binarySearch(reader, target, l, h):\n while l <= h:\n \n m = l + (h-l) // 2\n\n if reader.get(m) == target:\n return m\n elif reader.get(m) < target:\n l = m + 1\n else:\n h = m - 1\n return -1\n \n h = 1\n # check if the elem is not equal to infinity\n # elem at h pos is less than equal to target\n while reader.get(h) is not sys.maxsize and reader.get(h) <= target:\n # jump the h ptr by 2\n h *= 2\n # perform binary search on it\n return binarySearch(reader, target, 0, h)\n \n ","sub_path":"Search_unknownsize.py","file_name":"Search_unknownsize.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"132605123","text":"import pickle, os\r\nimport random\r\n\r\nN=200\r\n\r\nclass Record :\r\n '''\r\n DESCRIPTON : To create an object of type 'Record'\r\n ATTRIBUTES : key, nonkey\r\n '''\r\n\r\n def __init__(self, key, nonkey):\r\n '''\r\n OBJECTIVE : To initialize an Record object\r\n INPUT PARAMETERS :\r\n self : (Implicit) Record object\r\n key : key value of the object Record\r\n nonkey : value corresponding to that key\r\n OUTPUT :\r\n None\r\n '''\r\n\r\n #Appoach: key = key & nonkey = nonkey\r\n\r\n self.key = key \r\n self.nonkey = nonkey\r\n\r\n\r\n def __str__(self):\r\n '''\r\n OBJECTIVE: To return a string of the values of the object Record\r\n INPUT PARAMETERS :\r\n self : (Implicit) Record object\r\n OUTPUT : \r\n a string representing the Record object\r\n '''\r\n\r\n return \"\\nKey: \"+str(self.key) + \"\\nnonkey: \" + str(self.nonkey)\r\n\r\n\r\ndef writeRecord():\r\n\r\n '''\r\n OBJECTIVE : To write records in file1\r\n INPUT PARAMETERS :\r\n None\r\n OUTPUT :\r\n None\r\n '''\r\n\r\n #Approach: Dump Record object in f1 and also dump [location] in f2 for each record\r\n\r\n f1 = open(\"fileRecord.txt\", \"wb\")\r\n f2 = open(\"fileRecord2.txt\", \"wb\")\r\n \r\n for i in range(1,N+2):\r\n loc = f1.tell()\r\n key = i + 50000\r\n val = str(key) * random.randint(50,250)\r\n \r\n lst = [loc + 500000]\r\n ob = Record(key, val)\r\n pickle.dump(ob, f1)\r\n pickle.dump(lst, f2)\r\n \r\n f1.close()\r\n f2.close()\r\n\r\n \r\n\r\ndef read_a_record( n ):\r\n '''\r\n OBJECTIVE : To read and print a record from the file1 using file2\r\n INPUT PARAMETERS :\r\n n : record number which is to be read\r\n OUTPUT :\r\n None\r\n '''\r\n\r\n #Approach: read location of nth record from f2 and read Record object from that location from f2\r\n\r\n f1 = open(\"fileRecord.txt\", \"rb\")\r\n f2 = open(\"fileRecord2.txt\", \"rb\")\r\n\r\n o2 = pickle.load(f2)\r\n\r\n size = f2.tell()\r\n\r\n f2.seek(0, os.SEEK_END)\r\n\r\n lenF2 = f2.tell()\r\n\r\n if n >= lenF2//size or n <= 0:\r\n print(\"Record not Found\")\r\n return\r\n \r\n f2.seek(size * (n-1))\r\n\r\n o2 = pickle.load(f2)\r\n\r\n f1.seek(o2[0] - 500000 )\r\n o1 = pickle.load(f1)\r\n print(o1)\r\n \r\n \r\n f1.close()\r\n f2.close()\r\n\r\n \r\n \r\ndef main() :\r\n '''\r\n OBJECTIVE : main function for file handling\r\n INPUT PARAMETERS :\r\n None\r\n OUTPUT :\r\n None\r\n '''\r\n\r\n writeRecord()\r\n\r\n\r\n while True:\r\n n = input(\"\\nEnter record number to be read else enter '#': \")\r\n if n == '#':\r\n print(\"\\tEnd\")\r\n break\r\n read_a_record(int(n))\r\n\r\n\r\nif __name__ == \"__main__\" :\r\n main()\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n","sub_path":"RecordProg.py","file_name":"RecordProg.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"417979447","text":"import mne \nimport numpy as np\nimport os\nimport scipy.stats as ss\n\n# Set folders and files (have to change these based on channel and condition)\nchannelName = 'MEG0221'\nsubjectDir = '/home/timb/camcan/subjects/'\n\n#Rest condition\nrest_dataDir = '/media/NAS/lpower/BetaSourceLocalization/restData/'+ channelName \nrest_stcPrefix = 'transdef_mf2pt2_rest_raw_rest_210s_cleaned-epo_restBetaEvents_sLORETA_fsaverage-lh.stc'\n\n#Post-stim condition\npost_dataDir = '/media/NAS/lpower/BetaSourceLocalization/postStimData/'+ channelName\npost_stcPrefix = 'transdef_transrest_mf2pt2_task_raw_buttonPress_duration=3.4s_cleaned-epo_postBetaEvents_sLORETA_fsaverage-lh.stc'\n\n#Find all subject folders that exist\nsubjects = os.listdir(post_dataDir)\n \n# Loop over all subject folders\ndiffs = []\nsub_count = 0;\nfor subjectID in subjects:\n \n # Set file path for stc file\n restStcFile = os.path.join(rest_dataDir, subjectID, rest_stcPrefix)\n postStcFile = os.path.join(post_dataDir, subjectID, post_stcPrefix)\n\n # If files exist read in premovement and postmove source estimates \n if os.path.exists(restStcFile) and os.path.exists(postStcFile):\n rest_stc = mne.read_source_estimate(restStcFile)\n post_stc = mne.read_source_estimate(postStcFile)\n \n #Take the difference between estimates (post - pre-move)\n diff_stc = post_stc.__sub__(rest_stc)\n #save source estimate data for this subject to a list\n diff_vertex_vals = diff_stc.data\n diffs.append(diff_vertex_vals)\n print(sub_count)\n sub_count = sub_count + 1\n\n#reformat difference arrays for computing t-tests for each vertex\ndiffs = np.asarray(diffs)\ndiffs = np.reshape(diffs, (sub_count,20484))\n\n#Perform t-test for each difference vertex across participants (null hypothesis: diff=0)\nttests = []\npvals = []\ntstats = []\nfor i in range(0,diffs.shape[1]):\n thisTest = ss.ttest_1samp(diffs[:,i],0)\n ttests.append(thisTest)\n\n thispval = thisTest.pvalue\n pvals.append(thispval)\n\n thisTstat = thisTest.statistic\n tstats.append(thisTstat)\n\n#Restructure pvals and create a source estimate object with the p-val data so it can be plotted \npvals = np.asarray(pvals)\npvals = np.reshape(pvals, (20484,1))\npval_stc = diff_stc.copy()\npval_stc.data = pvals\n\n#Do the same thing with the t-stat \ntstats = np.asarray(tstats)\ntstats = np.reshape(tstats, (20484,1))\ntstats_stc = diff_stc.copy()\ntstats_stc.data = tstats\n\n#Save and plot\noutFileName = '/media/NAS/lpower/BetaSourceLocalization/comparisonMaps/sLORETA_rest_post_pvals'\npval_stc.save(outFileName)\n\noutFileName = '/media/NAS/lpower/BetaSourceLocalization/comparisonMaps/sLORETA_rest_post_tstats'\ntstats_stc.save(outFileName)\n\n#pval_stc.plot(surface='pial', hemi='both', subjects_dir=subjectDir, subject='fsaverage',\n # backend='mayavi', time_viewer=True, clim=clim)\n\n","sub_path":"rest_post_diff_maps.py","file_name":"rest_post_diff_maps.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"243371716","text":"import matplotlib.pyplot as plt\nfrom matplotlib import cm\ncmap = cm.spectral\nfrom math import log\nimport logging\n\nroot = logging.getLogger()\n\nclass Plotter:\n color_dict = dict()\n thickness_dict = dict()\n zorder_dict = dict()\n\n def __init__(self, voltage_levels):\n if voltage_levels:\n for voltage in voltage_levels.split('|'):\n self.color_dict[voltage] = cmap(int(255 * ((int(voltage) - 110000) / 340000.0)))\n if int(voltage) / 300000 > 0:\n self.thickness_dict[voltage] = 3\n self.zorder_dict[voltage] = 1\n elif int(voltage) / 220000 > 0:\n self.thickness_dict[voltage] = 2\n self.zorder_dict[voltage] = 2\n else:\n self.thickness_dict[voltage] = 1\n self.zorder_dict[voltage] = 3\n\n def plot_topology(self, circuits, boundary, partition_by_station_dict, cities, destdir):\n fig = plt.figure(figsize=(10, 12), facecolor='white')\n ax = plt.subplot(111)\n ax.set_axis_off()\n fig.add_axes(ax)\n\n if boundary is not None:\n (xmin, ymin, xmax, ymax) = boundary.buffer(0.5).bounds\n plt.xlim([xmin, xmax])\n plt.ylim([ymin, ymax])\n if hasattr(boundary, 'geoms'):\n for polygon in boundary.geoms:\n Plotter.plot_polygon(polygon)\n else:\n Plotter.plot_polygon(boundary)\n\n for circuit in circuits:\n plt.plot(circuit.members[0].lon, circuit.members[0].lat, marker='o', markerfacecolor='black', linestyle=\"None\", markersize=5, zorder=10)\n plt.plot(circuit.members[-1].lon, circuit.members[-1].lat, marker='o', markerfacecolor='black',\n linestyle=\"None\", markersize=5, zorder=10)\n #ax.annotate(circuit.members[0].id, (circuit.members[0].lon, circuit.members[0].lat))\n #ax.annotate(circuit.members[-1].id, (circuit.members[-1].lon, circuit.members[-1].lat))\n\n for line in circuit.members[1:-1]:\n x,y = line.geom.xy\n plt.plot(x, y, color=self.color_dict[line.voltage.split(';')[0]], alpha=1,\n linewidth=self.thickness_dict[line.voltage.split(';')[0]], solid_capstyle='round', zorder=self.zorder_dict[line.voltage.split(';')[0]])\n\n if cities is not None:\n for city in cities:\n if city.geom.within(boundary):\n plt.plot(city.lon, city.lat, marker='o', markerfacecolor='#ff0000', linestyle=\"None\", markersize=log(city.population, 10), zorder=2)\n if city.population >= 200000 and 'DEUTSCHLAND' not in city.name:\n label = city.name\n ax.annotate(label, (city.lon, city.lat))\n\n plt.plot([], [], marker='o', markerfacecolor='black', linestyle=\"None\", markersize=5, zorder=5, label='station')\n for voltage in self.color_dict.keys():\n label = voltage + 'V'\n plt.plot([], [], color=self.color_dict[voltage], lw=1.3, zorder=5, label=label)\n l = plt.legend(numpoints=1, loc=2)\n l.set_zorder(5)\n\n plt.savefig(destdir + '/topology.png', bbox_inches='tight', pad_inches=0, dpi=600)\n\n # Voronoi partitions\n if partition_by_station_dict is not None:\n for station in partition_by_station_dict.keys():\n partition_polygon = partition_by_station_dict[station]\n if hasattr(partition_polygon, 'geoms'):\n for polygon in partition_polygon:\n Plotter.plot_polygon(polygon, '#888888', zorder=2)\n else:\n Plotter.plot_polygon(partition_polygon, '#888888', zorder=2)\n plt.plot([], [], color='#888888', lw=2, zorder=5, label='Voronoi partitions')\n plt.savefig(destdir + '/topology_voronoi.png', bbox_inches='tight', pad_inches=0, dpi=600)\n\n\n @staticmethod\n def plot_polygon(polygon, color='#cccccc', zorder=1):\n x, y = polygon.exterior.xy\n plt.plot(x, y, color=color, alpha=1,\n linewidth=2, solid_capstyle='round', zorder=zorder)","sub_path":"code/Plotter.py","file_name":"Plotter.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"444324197","text":"#! /usr/bin/env python\nimport pathlib\nfrom math import cos, sin, radians\n\ndef get_result(data):\n x, y = 0, 0\n _translation = {\n 0: 'E',\n 90: 'S',\n 180: 'W',\n 270: 'N'\n }\n cur = 'E'\n deg = 0\n for move in data:\n d = move[0]\n l = int(move[1:])\n if d == 'F':\n d = cur\n\n if d == 'N':\n y += l\n elif d == 'E':\n x += l\n elif d == 'S':\n y -= l\n elif d == 'W':\n x -= l\n elif d == 'R':\n deg += l\n cur = _translation[abs(deg % 360)]\n elif d == 'L':\n deg -= l\n cur = _translation[abs(deg % 360)]\n\n return abs(x) + abs(y)\n\n\n\ndef get_result2(data):\n x, y = 0, 0\n def translate(way, deg):\n x,y = way\n deg = radians(deg)\n return [x*cos(deg) - y*sin(deg), x*sin(deg) + y*cos(deg)]\n way = [10,1]\n for move in data:\n d = move[0]\n l = int(move[1:])\n if d == 'F':\n x += l*way[0]\n y += l*way[1]\n elif d == 'N':\n way[1] += l\n elif d == 'E':\n way[0] += l\n elif d == 'S':\n way[1] -= l\n elif d == 'W':\n way[0] -= l\n elif d == 'R':\n way = translate(way, 360 - l % 360)\n elif d == 'L':\n way = translate(way, l % 360)\n\n return abs(x) + abs(y)\n\n\ndef main():\n data = [line for line in pathlib.Path(\"input.txt\").read_text().split(\"\\n\")]\n result = get_result2(data)\n print(\"done\")\n print(result)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"12/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378402668","text":"# _*_ coding: utf-8 _*_\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.text import capfirst, get_text_list\nfrom django.dispatch import receiver\nfrom django.db.models import signals\nfrom unicodedata import normalize\nfrom django.core.exceptions import ValidationError\nfrom django.core.exceptions import NON_FIELD_ERRORS\nimport datetime\n# models\nfrom django.contrib.auth.models import AbstractUser # managers\nfrom .managers import UserManager\nfrom apps.person.models import Person\nfrom django.contrib.auth.models import Group, Permission\nfrom apps.space.models import Solution, Enterprise, Headquarter\n\n\nON = 'ON'\nOFF = 'OFF'\nUSER_STATUS_CHOICES = (\n (ON, _('Activate')),\n (OFF, _('Deactivate')),\n)\n\n\nclass User(AbstractUser):\n \"\"\"\n Table user\n \"\"\"\n class Meta:\n verbose_name = capfirst(_('User'))\n verbose_name_plural = capfirst(_('Users'))\n permissions = (\n ('user', 'Can ALL user'),\n )\n db_table = 'auth_user'\n\n last_headquarter_id = models.CharField(\n max_length=50, null=True, blank=True)\n last_module_id = models.CharField(max_length=50, null=True, blank=True)\n person = models.OneToOneField(\n Person, verbose_name=_('Person'), null=True, blank=True)\n\n objects = UserManager()\n\n def __str__(self):\n return self.username\n\n\ndef user_pre_save(sender, instance, raw, **kwargs):\n instance.last_login = datetime.datetime.now()\n if instance.person:\n instance.first_name = instance.person.first_name\n instance.last_name = instance.person.last_name\n instance.email = instance.person.email\n\n\n@receiver(signals.post_save, sender=User)\ndef user_post_save(sender, instance, created, raw, **kwargs):\n if created: # solo despues de crear un nuevo usuario\n UserStatus.objects.create(description='Alta', user=instance)\n\nsignals.pre_save.connect(user_pre_save, sender=User)\n# signals.post_save.connect(user_post_save, sender=User)\n\n\nclass UserStatus(models.Model):\n \"\"\"\n Tabla para el historial de los estados de los usuarios\n \"\"\"\n status = models.CharField(\n _('status'), max_length=50, choices=USER_STATUS_CHOICES, default=ON\n )\n description = models.TextField(_('description'), null=True, blank=True)\n # related_name=userstatus_set\n user = models.ForeignKey(User, verbose_name=capfirst(_('User')))\n created_at = models.DateTimeField(_('created at'), auto_now_add=True)\n updated_at = models.DateTimeField(_('updated at'), auto_now=True)\n\n class Meta:\n verbose_name = _('User status')\n verbose_name_plural = _('User statuses')\n db_table = 'prime_user_status'\n\n def __str__(self):\n return '%s %s' % (self.user.username, self.status)\n\nINPUT = \"INPUT\"\nOUTPUT = \"OUTPUT\"\n\nACCESS_TYPE_CHOICES = (\n (INPUT, \"Input\"),\n (OUTPUT, \"Output\"),\n\n)\n\n\nclass Access(models.Model):\n \"\"\"\n Tabla que registra los accesos de los usuarios al sistema\n \"\"\"\n access_type = models.CharField(\n _('access type'),\n max_length=50, choices=ACCESS_TYPE_CHOICES, default=INPUT)\n ip = models.CharField(_('IP'), max_length=50, null=True, blank=True)\n session_key = models.TextField(_('session key'), null=True, blank=True)\n\n user = models.ForeignKey(User, verbose_name=capfirst(_('User')))\n created_at = models.DateTimeField(_('created at'), auto_now_add=True)\n\n class Meta:\n verbose_name = _('Access')\n verbose_name_plural = _('Accesses')\n permissions = (\n (\"access\", \"Can ALL access\"),\n )\n\n def __str__(self):\n return \"%s %s\" % (self.user.username, self.access_type)\n\n\nPRO = 'PRO'\nWEB = 'WEB'\nVENTAS = 'VENTAS'\nBACKEND = 'BACKEND'\nMODULE_CHOICES = (\n (PRO, 'Profesional'),\n (WEB, 'Web informativa'),\n (VENTAS, 'Ventas'),\n (BACKEND, 'Backend Manager'),\n)\n\n\nclass Module(models.Model):\n \"\"\"\n Modulos del sistema\n \"\"\"\n module = models.CharField(_('module'), max_length=50,\n choices=MODULE_CHOICES, default=BACKEND)\n name = models.CharField(\n capfirst(_('name')), max_length=50)\n is_active = models.BooleanField(\n capfirst(_('active')), default=True)\n icon = models.CharField(_('icon'),\n max_length=50, null=True, blank=True)\n description = models.TextField(_('description'),\n null=True, blank=True)\n\n created_at = models.DateTimeField(_('created at'), auto_now_add=True)\n updated_at = models.DateTimeField(_('updated at'), auto_now=True)\n\n solutions = models.ManyToManyField(\n # Solution, verbose_name=_('Solutions'), null=True, blank=True)\n # comment because to alert null has no effect on ManyToManyField.\n Solution, verbose_name=_('Solutions'), blank=True)\n # , through='ModuleSolution'\n\n groups = models.ManyToManyField(\n Group, related_name='module_set',\n verbose_name=capfirst(_('Groups')), blank=True)\n # null=True, blank=True) # , through='ModuleGroup'\n # comment because to alert null has no effect on ManyToManyField.\n # related_name cambia module_set x initial_groups_module_set\n initial_groups = models.ManyToManyField(\n Group, related_name='initial_groups_module_set',\n verbose_name=_('Initial groups'), blank=True)\n # null=True, blank=True) # , through='ModuleInitialGroup'\n # comment because to alert null has no effect on ManyToManyField.\n\n class Meta:\n\n ordering = ['-id', ]\n verbose_name = _('Module')\n verbose_name_plural = _('Modules')\n permissions = (\n ('module', 'Can ALL module'),\n )\n unique_together = ('module', 'name',)\n\n def __str__(self):\n return '%s (%s)' % (\n self.name,\n dict((x, y)\n for x, y in MODULE_CHOICES)[self.module])\n\n def validate_unique(self, exclude=None):\n if normalize(\n 'NFKD',\n self.name\n ).encode('ascii', 'ignore').lower() in list(\n normalize(\n 'NFKD',\n c['name']).encode(\n 'ascii',\n 'ignore').lower()\n for c in self.__class__.objects.values(\n 'name'\n ).exclude(pk=self.pk).filter(module=self.module)\n ):\n raise ValidationError({\n 'name':\n (_(u'%(model_name)s with this %(field_label)s already exists.')\n % {\n 'model_name': '%s \"%s\"' % (\n capfirst(_('Module')) + '',\n dict(MODULE_CHOICES).get(self.module)),\n 'field_label': capfirst(_('name')),\n }, ),\n })\n super(Module, self).validate_unique(exclude=exclude)\n\n\nclass Menu(models.Model):\n \"\"\"\n Menus del sistema\n \"\"\"\n module = models.CharField(\n _('module'), max_length=50, choices=MODULE_CHOICES, default=BACKEND)\n title = models.CharField(capfirst(_('title')), max_length=50)\n url = models.CharField(max_length=150, default='#')\n pos = models.IntegerField(_('position'), default=1)\n icon = models.CharField(\n _('icon'), max_length=50, null=True, blank=True, default='')\n is_active = models.BooleanField(capfirst(_('active')), default=True)\n description = models.TextField(_('description'), null=True, blank=True)\n\n created_at = models.DateTimeField(_('created at'), auto_now_add=True)\n updated_at = models.DateTimeField(_('updated at'), auto_now=True)\n\n permission = models.ForeignKey(\n Permission, verbose_name=_('Permission'), null=True, blank=True)\n # related_name='parent',\n parent = models.ForeignKey(\n 'self', verbose_name=_('Parent'), null=True, blank=True)\n\n class Meta:\n verbose_name = _('Menu')\n verbose_name_plural = _('Menus')\n permissions = (\n ('menu', 'Can ALL menu'),\n )\n\n def __str__(self):\n return '%s (%s)' % (\n self.title,\n dict((x, y)\n for x, y in MODULE_CHOICES)[self.module])\n\n\nclass UserEnterprise(models.Model):\n \"\"\"\n Permisos a nivel de empresa\n \"\"\"\n created_at = models.DateTimeField(_('created at'), auto_now_add=True)\n updated_at = models.DateTimeField(_('updated at'), auto_now=True)\n\n user = models.ForeignKey(User, verbose_name=_('User'))\n group = models.ForeignKey(Group, verbose_name=_('Group'))\n enterprise = models.ForeignKey(Enterprise, verbose_name=_('Enterprise'))\n\n class Meta:\n verbose_name = _('User enterprise')\n verbose_name_plural = _('User enterprises')\n db_table = 'prime_user_enterprise'\n\n def __str__(self):\n return '%s %s - %s' % (\n self.user.username,\n self.enterprise.name,\n self.group.name)\n\n\nclass UserHeadquarter(models.Model):\n \"\"\"\n Permisos a nivel de sede 'Headquarter'\n \"\"\"\n created_at = models.DateTimeField(_('created at'), auto_now_add=True)\n updated_at = models.DateTimeField(_('updated at'), auto_now=True)\n\n user = models.ForeignKey(User, verbose_name=_('User'))\n group = models.ForeignKey(Group, verbose_name=_('Group'))\n headquarter = models.ForeignKey(Headquarter, verbose_name=_('Headquarter'))\n\n class Meta:\n verbose_name = _('User Headquarter')\n verbose_name_plural = _('User Headquarters')\n db_table = 'prime_user_headquarter'\n\n def __str__(self):\n return '%s %s %s - %s' % (\n self.user.username,\n self.Headquarter.name,\n self.Headquarter.enterprise.name,\n self.group.name)\n\n\nclass Backup(models.Model):\n \"\"\"\n Tabla para registro de las copias de la db\n \"\"\"\n file_name = models.CharField(_('file name'), max_length=50)\n description = models.TextField(_('description'), null=True, blank=True)\n size = models.CharField(_('size'), max_length=50, null=True, blank=True)\n\n user = models.ForeignKey(User, verbose_name=_('User'))\n created_at = models.DateTimeField(_('created at'), auto_now_add=True)\n\n class Meta:\n verbose_name = _('Backup')\n verbose_name_plural = _('Backups')\n permissions = (\n ('backup', 'Can ALL backup'),\n )\n\n def __str__(self):\n return self.file_name\n\n\nclass Ticket(models.Model):\n \"\"\"\n Tabla para impresiones de tickets\n \"\"\"\n text = models.CharField(_('text'), max_length=150, null=True, blank=True)\n row = models.IntegerField(_('row'), default=1)\n\n user = models.ForeignKey(\n User, verbose_name=_('User'), null=True, blank=True)\n\n class Meta:\n verbose_name = _('Ticket')\n verbose_name_plural = _('Tickets')\n permissions = (\n ('ticket', 'Can ALL ticket'),\n )\n\n def __str__(self):\n return '%s %s' % (self.user.username, self.text)\n","sub_path":"apps/prime/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"357829807","text":"import alsaaudio\nimport wave\nimport numpy\nimport os\n\n\ndef record(length, sample_rate=44100, bit_depth=16, channels=2):\n temp_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'temp.wav')\n width = bit_depth / 8\n inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, device='sysdefault:CARD=CODEC')\n inp.setchannels(channels)\n inp.setrate(sample_rate)\n inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)\n inp.setperiodsize(1024)\n\n raw_data_string = str()\n\n w = wave.open(temp_file, 'w')\n w.setnchannels(channels)\n w.setsampwidth(width)\n w.setframerate(sample_rate)\n\n total = 0\n\n while total < length * sample_rate:\n l, data = inp.read()\n raw_data_string = raw_data_string + data\n w.writeframes(data)\n total += l\n return temp_file\n\n\ndef read_to_float32_buffer(length, sample_rate=44100, bit_depth=16, channels=2):\n width = bit_depth / 8\n inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, device='sysdefault:CARD=CODEC')\n inp.setchannels(channels)\n inp.setrate(sample_rate)\n inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)\n inp.setperiodsize(1024)\n total = 0\n raw_data_string = str()\n\n while total < length * sample_rate:\n l, data = inp.read()\n raw_data_string = raw_data_string + data\n total += l\n\n converted_string_array = numpy.fromstring(raw_data_string, dtype='int16')\n converted_float32_array = converted_string_array.astype(numpy.float32)\n return converted_float32_array\n","sub_path":"nesbit/linux_record.py","file_name":"linux_record.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"440943291","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\nPE_0143\n\nCreated on Thu Oct 26 17:34:47 2017\n@author: mbh\n\"\"\"\n\nimport time\nimport math\n\ndef p143(limit=120000):\n \n t=time.clock()\n \n pairs=pairsList(limit)\n index=indexList(pairs,limit)\n \n# Which sums have been reached?\n sums=set()\n \n for i in range(len(pairs)):\n \n a,b=pairs[i][0],pairs[i][1]\n va,vb=[],[] \n ia,ib=index[a],index[b]\n \n while ialimit:\n break\n k=1\n while (k*(a+b) vraiment random\na = random.random()*div\t\t # Choix du numéro dans la liste [0,1[ => 0, [1,2[ => 1, etc...\ne,f = Eligibles[int(a)]\t\t # Transformation en entier du nombre aléatoire (réduit à l'entier inférieur, tel que 4,6345625 passe à 4)\nState_Salles[f][e]=2\t\t # On définit la salle à chercher\nminimap[f][e]=3\nEligibles.pop(int(a))\t\t # On retire la salle des salles éligibles.\nc,b = Eligibles[int(random.random()*len(Eligibles))] # Same thing mais plus rapidement.\nState_Salles[b][c]=3\t\t # On définit la salle de départ, pas besoin de la supprimer car on n'utilise plus Eligibles.\nminiX = b\nminiY = c \t\t\t\t\t # Les coordonnées sur la minimap\n\n\ndef Generation_Salle(Room):\n\tcases = [[1,1,1,1,1,1,1,1,1,1],[1,0,0,0,0,0,0,0,0,1],[1,0,0,0,0,0,0,0,0,1],[1,0,0,0,0,0,0,0,0,1],[1,0,0,0,0,0,0,0,0,1],[1,0,0,0,0,0,0,0,0,1],[1,0,0,0,0,0,0,0,0,1],[1,0,0,0,0,0,0,0,0,1],[1,0,0,0,0,0,0,0,0,1],[1,1,1,1,1,1,1,1,1,1]]\n\t# Liste de liste : cases[x][y] = 1 si mur, 0 si terrain libre, 2 si objet à trouver présent,3 si chemin vers autre salle.\n\t\n\tif Room[0]==True :\t\t# Structure de base (sans les chemins rajoutés) :\n\t\tcases[4][0]=3\t\t# 1111111111\n\t\tcases[5][0]=3\t\t# 1000000001\n\tif Room[1]==True :\t\t# 1000000001\n\t\tcases[0][4]=3\t\t# 1000000001\tPuis on rajoute des passages à gauche, en haut, à droite, et finalement en bas.\n\t\tcases[0][5]=3\t\t# 1000000001\n\tif Room[2]==True :\t\t# 1000000001\n\t\tcases[4][9]=3\t\t# 1000000001\n\t\tcases[5][9]=3\t\t# 1000000001\n\tif Room[3]==True :\t\t# 1000000001\n\t\tcases[9][4]=3\t\t# 1111111111\n\t\tcases[9][5]=3\n\treturn cases\n\nposition_perso = (320,320) #Le personnage démarre au milieu de la salle.\ndirection = 'down'\n\ncases = Generation_Salle(Salles[miniX][miniY])\nfor i in range(len(cases)):\n\tfor j in range(len(cases[i])):\n\t\tif cases[i][j]==1:\n\t\t\tscreen.blit(wall, ((64*i)-10,(64*j)-10))\n\t\tif cases[i][j]==2:\n\t\t\tscreen.blit(treasure, (64*i,64*j))\nscreen.blit(stand_down,position_perso)\n\n#BOUCLE INFINIE\ncontinuer = 1\nmouvement_possible = 1\nitem_found = 0\t\t\t\t# Si on trouve l'objet, passe à 1.\nhas_changed = 0 \t\t\t# Passe à 1 pour marquer le changement de salle et la génération de carte.\nmenu_ouvert = 0 \t\t\t# 0 pour fermé, 1 pour ouvert.\nmenu_selection = 0 \t\t\t# Curseur du menu : 0 pour Reprendre, 1 pour Volume, 2 pour Quitter.\njouer_cinematique = 0 \t\t# 1 si on joue la cinématique à la fin du jeu (utilisé ligne 249 et 338)\nprecedente_animation = stand_down\nclock = pg.time.Clock()\nclock.tick()\ntimer_duree_init = 7 #Utilisée plus tard pour les animations\nTemps = 0\na = 1\n\nwhile (continuer == 1):\n\tminimap[miniX][miniY] = 2 #On fixe la salle actuelle comme celle présente\n\tif menu_ouvert == 1 or mouvement_possible == 0 :\n\t\tclock.tick()\n\telse :\n\t\tTemps = Temps + clock.tick()\n\t\tMinutes = Temps / 60000 # Il s'agit de milisecondes.\n\t\tSecondes = (Temps - 60000 * Minutes)/1000 \n\n\tdeplacement = 0 \t\t# Si touche de déplacement appuyée, passe à 1\n\tif has_changed !=0 :\t# S'il y a eu un changement de salle.\n\n\t\tcases = Generation_Salle(Salles[miniX][miniY])\t\t\t# On génère la liste des cases.\n\t\t\n\t\tif State_Salles[miniX][miniY]==2 : cases[4][4]=2\t\t# Vérification si la salle possède la Triforce => On la met dedans.\n\t\tif has_changed == 1 : position_perso = (320-32,70) \t\t# Le personnage apparait en haut\n\t\tif has_changed == 2 : position_perso = (320-32,640-70)\t#...en bas\n\t\tif has_changed == 3 : position_perso = (70,320-32)\t\t#...à gauche\n\t\tif has_changed == 4 : position_perso = (640-70,320-32) \t#...à droite\n\t\tif has_changed == 5 : position_perso = (320,320)\t\t# A servi pour du débuggage : PAS ACTIF EN JEU\n\t\thas_changed = 0 \t\t\t\t\t\t\t\t\t\t#Le personnage a changé de salle => Plus besoin de le redéplacer à la position de départ.\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tscreen.blit(background, (0,0))\t\t\t\t\t\t\t#On fait apparaitre la nouvelle salle.\n\t\tfor i in range(len(cases)):\n\t\t\tfor j in range(len(cases[i])):\n\t\t\t\tif cases[i][j]==1:\n\t\t\t\t\tscreen.blit(wall, ((64*i)-10,(64*j)-10))\n\t\tif cases[4][4]==2:\n\t\t\tscreen.blit(treasure, (64*4,64*4))\n\t\t\n\n\tfor event in pg.event.get(): \t\t\t\t#Attente des evenements\n\t\tif event.type == QUIT:\n\t\t\tcontinuer = 0\n\t\tif event.type == KEYDOWN:\n\t\t\tif event.key == K_ESCAPE: \t\t# Si on appuie sur la touche echap\n\t\t\t\tif menu_ouvert == 0 and mouvement_possible == 1 : # Si le menu est fermé,\n\t\t\t\t\tmenu_ouvert = 1 \t\t\t\t\t\t\t# On ouvre le menu,\n\t\t\t\t\tpg.key.set_repeat(50000,16) \t\t\t\t# Et on met un chiffre abominablement grand (50s) pour empêcher la répétition des inputs.\n\t\t\t\t\tmenu_selection = 0 \t\t\t\t\t\t\t# On veut mettre le curseur de base sur \"Reprendre\"\n\t\t\t\t\tson_menu_open.play()\t\t\t\t\t\t# On joue le son d'ouverture de menu.\n\n\t\t\t\telif menu_ouvert == 1 :\t\t\t# Si le menu est ouvert,\n\t\t\t\t\tmenu_ouvert = 0\t\t\t\t# On ferme le menu\n\t\t\t\t\tpg.key.set_repeat(1,16)\t\t# Et on remet la répétition des inputs comme avant.\n\t\t\t\t\tson_menu_close.play()\t\t# On joue le son de fermeture de menu.\n\n\t\t\t\telif mouvement_possible == 0 :\n\t\t\t\t\tcontinuer = 0\n\t\t\t\t\tjouer_cinematique = 0\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Mouvement du personnage:\n\t\t\tif event.key == K_DOWN: \t\t\t\t\t\t\t\t\t\t\t\t# Si \"fleche bas\"\n\t\t\t\tif menu_ouvert == 0 and mouvement_possible == 1 : \t\t\t\t\t# Menu fermé => Personnage libre de se déplacer\n\t\t\t\t\tif direction != 'down' : direction_change = True \t\t\t\t# Si on change de direction, on l'enregistre pour les animations.\n\t\t\t\t\tdirection = 'down'\t\t\t\t\t\t\t\t\t\t\t\t# On change de direction.\n\t\t\t\t\tif cases[((position_perso[0]+32)/64)][((position_perso[1]+69)/64)]!=1 : # Si il n'y a pas de mur en bas,\n\t\t\t\t\t\tposition_perso = (position_perso[0],position_perso[1]+5)\t\t\t# On peut faire descendre le personnage.\n\t\t\t\t\t\tdeplacement = 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Et on indique qu'il se déplace pour les animations.\n\t\t\t\t\t\tif position_perso[1]>640-70 : \t# Si il est sur une case sortie\n\t\t\t\t\t\t\thas_changed = 1 \t\t\t# On indique qu'on a changé de carte, et où il doit apparaitre\n\t\t\t\t\t\t\tminimap[miniX][miniY]=1\t\t# On rajoute cet endroit comme exploré sur la minimap\t\n\t\t\t\t\t\t\tminiY = miniY + 1 \t\t\t# On change l'endroit dans la mini-map.\n\n\t\t\t\tif menu_ouvert == 1 :\n\t\t\t\t\tmenu_selection = (menu_selection + 1)%3 # Si le curseur est en bas, il revient en haut. Sinon il descend au menu suivant.\n\t\t\t\t\tson_menu_hover.play()\t\t\t\t\t# On joue le petit son qui indique qu'on change de sélection.\n\n\n\t\t\tif event.key == K_UP: # Si \"fleche haut\"\n\t\t\t\tif menu_ouvert == 0 and mouvement_possible == 1 :\n\t\t\t\t\tif direction != 'up' : direction_change = True\n\t\t\t\t\tdirection = \"up\"\n\t\t\t\t\tif cases[((position_perso[0]+32)/64)][((position_perso[1]-5)/64)]!=1 :\n\t\t\t\t\t\tposition_perso = (position_perso[0],position_perso[1]-5)\n\t\t\t\t\t\tdeplacement = 1\n\t\t\t\t\t\tif position_perso[1]<5 :\n\t\t\t\t\t\t\thas_changed = 2 \n\t\t\t\t\t\t\tminimap[miniX][miniY]=1\t\t# On rajoute cet endroit comme exploré sur la minimap\t\n\t\t\t\t\t\t\tminiY = miniY -1 \n\n\t\t\t\tif menu_ouvert == 1 :\n\t\t\t\t\tmenu_selection = (menu_selection - 1)%3 # Si le curseur est en haut, il va en bas. Sinon il remonte au menu précédent.\n\t\t\t\t\tson_menu_hover.play()\t\t\t\t\t# On joue le petit son qui indique qu'on change de sélection.\n\n\t\t\tif event.key == K_RIGHT: # Si \"fleche droite\"\n\n\t\t\t\tif menu_ouvert == 0 and mouvement_possible == 1 :\n\t\t\t\t\tif direction != 'right' : direction_change = True\n\t\t\t\t\tdirection = 'right'\n\t\t\t\t\tif cases[((position_perso[0]+69)/64)][((position_perso[1]+32)/64)]!=1 :\n\t\t\t\t\t\tdeplacement = 1\n\t\t\t\t\t\tposition_perso = (position_perso[0]+5,position_perso[1])\n\t\t\t\t\t\tif position_perso[0]>640-71 :\n\t\t\t\t\t\t\thas_changed = 3 \n\t\t\t\t\t\t\tminimap[miniX][miniY]=1\t\t# On rajoute cet endroit comme exploré sur la minimap\t\n\t\t\t\t\t\t\tminiX = miniX + 1\n\n\t\t\t\tif menu_ouvert == 1 :\n\t\t\t\t\tif menu_selection == 1 : \t\t\t\t\t\t\t\t\t# Si on est sur le bouton \"Volume : X0 %\"\n\t\t\t\t\t\tif volume < 10 :\n\t\t\t\t\t\t\tvolume = volume + 1\n\t\t\t\t\t\t\tpg.mixer.music.set_volume(float(volume)/10) \t\t\t\t\t# On augmente le volume de 10%\n\t\t\t\t\t\tson_menu_hover.play()\t\t\t\t\t\t\t\t\t# On joue le petit son pour avoir un feedback sonore.\n\n\t\t\tif event.key == K_LEFT: # Si \"fleche gauche\"\n\n\t\t\t\tif menu_ouvert == 0 and mouvement_possible == 1 :\n\t\t\t\t\tif direction != 'left' : direction_change = True\n\t\t\t\t\tdirection = 'left'\n\t\t\t\t\tif cases[((position_perso[0]-5)/64)][((position_perso[1]+32)/64)]!=1 :\n\t\t\t\t\t\tdeplacement = 1\n\t\t\t\t\t\tposition_perso = (position_perso[0]-5,position_perso[1])\n\t\t\t\t\t\tif position_perso[0]< 5 :\n\t\t\t\t\t\t\thas_changed = 4 \n\t\t\t\t\t\t\tminimap[miniX][miniY]=1\t\t# On rajoute cet endroit comme exploré sur la minimap\t\n\t\t\t\t\t\t\tminiX = miniX - 1\n\n\t\t\t\tif menu_ouvert == 1 :\n\t\t\t\t\tif menu_selection == 1 : \t\t\t\t\t\t\t\t\t\t\t# Si on est sur le bouton \"Volume : X0 %\"\n\t\t\t\t\t\tif volume > 0 :\t\t\t\t\t\t\t\t\t\t\t\t\t# Si le volume n'est pas à 0.\n\t\t\t\t\t\t\tvolume = volume - 1\t\n\t\t\t\t\t\t\tpg.mixer.music.set_volume(float(volume)/10) \t\t\t\t\t\t\t# On baisse le son de 10%\n\t\t\t\t\t\tson_menu_hover.play()\t\t\t\t\t\t\t\t\t\t\t# On joue le petit son pour avoir un feedback sonore.\n\n\t\t\tif event.key == K_RETURN :\n\t\t\t\tif menu_ouvert == 1 :\n\t\t\t\t\tif menu_selection == 0 :\t# Si Reprendre...\n\t\t\t\t\t\tmenu_ouvert = 0\t\t \t# On reprend le jeu.\n\t\t\t\t\t\tson_menu_close.play()\n\t\t\t\t\tif menu_selection == 1 :\t# Si Volume,\n\t\t\t\t\t\tson_menu_hover.play() \t# On donne juste un feedback sonore.\n\t\t\t\t\tif menu_selection == 2 :\t# Si Quitter,\n\t\t\t\t\t\tcontinuer = 0\t\t\t# On quitte le jeu.\n\n\t\t\t\telif mouvement_possible == 0 :\n\t\t\t\t\tcontinuer = 0\n\t\t\t\t\tjouer_cinematique = 1\n\n\n\t\t#Verification : Est-ce qu'on a trouvé l'objet ?\n\t\tif State_Salles[miniX][miniY] == 2 : # Grande ligne : Si le personnage est dans l'une des quatres cases du milieu.\n\t\t\tif cases[(position_perso[0]+32)/64][(position_perso[1]+32)/64]==2 or cases[(position_perso[0]-32)/64][(position_perso[1]+32)/64]==2 or cases[(position_perso[0]+32)/64][(position_perso[1]-32)/64]==2 or cases[(position_perso[0]-32)/64][(position_perso[1]-32)/64]==2:\n\t\t\t\tif item_found == 0 : \n\t\t\t\t\tson_item.play() # Youhou, on a trouvé l'objet. Fin du jeu.\n\t\t\t\t\titem_found = 1 # On évite de répéter le son en boucle.\n\t\t\t\t\tmouvement_possible = 0 # On modifie toutes les options.\n\n\t\t\t\t\n\t# Animations.\n\tif menu_ouvert == 0 : #Ne pas affecter les animations si le menu est ouvert (le menu étant transparant)\n\t\tif deplacement == 0 : \n\t\t\ttimer_duree = timer_duree_init # Bloque le timer sur la valeur de départ\n\t\t\tnumero_sprite = 0 \t# Bloque les sprites sur leur image de départ (on s'en moque un peu ici vu que on aura l'animation \n\t\t\t\t\t\t\t\t# stand_direction, mais sert pour réinitialiser les animations pour la mise en déplacement)\n\t\tif deplacement == 1 : # Si mouvement, condition vérifiée\n\t\t\ttimer_duree = timer_duree - 1 # On enlève 1 au timer\n\t\t\tif timer_duree == 0 : # Si le temps de l'image est écoulée,\n\t\t\t\ttimer_duree = timer_duree_init # on réinitialise le timer,\n\t\t\t\tnumero_sprite = numero_sprite + 1 # On passe à l'image suivante.\n\t\tif numero_sprite == 4 : numero_sprite = 0 # Si on dépasse le nombre d'images (4), on revient à l'image de départ.\n\n\n\tscreen.blit(background,(0,0))\n\t\n\tfor i in range(len(cases)):\n\t\tfor j in range(len(cases[i])):\n\t\t\tif cases[i][j]==1:\n\t\t\t\tscreen.blit(wall, ((64*i)-10,(64*j)-10))\n\t\t\tif cases[i][j]==2:\n\t\t\t\tscreen.blit(treasure, (64*i,64*j))\n\n\tif direction == 'down' :\n\t\tif menu_ouvert == 1 :\n\t\t\tscreen.blit(precedente_animation,position_perso)\n\t\telif deplacement == 0 :\n\t\t\tscreen.blit(stand_down,position_perso)\n\t\t\tprecedente_animation = stand_down\n\t\telif numero_sprite == 0 :\n\t\t\tscreen.blit(down_0,position_perso)\n\t\t\tprecedente_animation = down_0\n\t\telif numero_sprite == 1 :\n\t\t\tscreen.blit(down_1,position_perso)\n\t\t\tprecedente_animation = down_1\n\t\telif numero_sprite == 2 :\n\t\t\tscreen.blit(down_2,position_perso)\n\t\t\tprecedente_animation = down_2\n\t\telif numero_sprite == 3 :\n\t\t\tscreen.blit(down_3,position_perso)\n\t\t\tprecedente_animation = down_3\n\n\tif direction == 'up' :\n\t\tif menu_ouvert == 1 :\n\t\t\tscreen.blit(precedente_animation,position_perso)\n\t\telif deplacement == 0 :\n\t\t\tscreen.blit(stand_up,position_perso)\n\t\t\tprecedente_animation = stand_up\n\t\telif numero_sprite == 0 :\n\t\t\tscreen.blit(up_0,position_perso)\n\t\t\tprecedente_animation = up_0\n\t\telif numero_sprite == 1 :\n\t\t\tscreen.blit(up_1,position_perso)\n\t\t\tprecedente_animation = up_1\n\t\telif numero_sprite == 2 :\n\t\t\tscreen.blit(up_2,position_perso)\n\t\t\tprecedente_animation = up_2\n\t\telif numero_sprite == 3 :\n\t\t\tscreen.blit(up_3,position_perso)\n\t\t\tprecedente_animation = up_3\n\n\tif direction == 'right' :\n\t\tif menu_ouvert == 1 :\n\t\t\tscreen.blit(precedente_animation,position_perso)\n\t\telif deplacement == 0 :\n\t\t\tscreen.blit(stand_right,position_perso)\n\t\t\tprecedente_animation = stand_right\n\t\telif numero_sprite == 0 :\n\t\t\tscreen.blit(right_0,position_perso)\n\t\t\tprecedente_animation = right_0\n\t\telif numero_sprite == 1 :\n\t\t\tscreen.blit(right_1,position_perso)\n\t\t\tprecedente_animation = right_1\n\t\telif numero_sprite == 2 :\n\t\t\tscreen.blit(right_2,position_perso)\n\t\t\tprecedente_animation = right_2\n\t\telif numero_sprite == 3 :\n\t\t\tscreen.blit(right_3,position_perso)\n\t\t\tprecedente_animation = right_3\n\n\tif direction == 'left' :\n\t\tif menu_ouvert == 1 :\n\t\t\tscreen.blit(precedente_animation,position_perso)\n\t\telif deplacement == 0 :\n\t\t\tscreen.blit(stand_left,position_perso)\n\t\t\tprecedente_animation = stand_left\n\t\telif numero_sprite == 0 :\n\t\t\tscreen.blit(left_0,position_perso)\n\t\t\tprecedente_animation = left_0\n\t\telif numero_sprite == 1 :\n\t\t\tscreen.blit(left_1,position_perso)\n\t\t\tprecedente_animation = left_1\n\t\telif numero_sprite == 2 :\n\t\t\tscreen.blit(left_2,position_perso)\n\t\t\tprecedente_animation = left_2\n\t\telif numero_sprite == 3 :\n\t\t\tscreen.blit(left_3,position_perso)\n\t\t\tprecedente_animation = left_3\n\n\tscreen.blit(Panneau,(640,0))\n\n\t# On affiche la minimap :\n\n\tfor x in range(len(minimap)):\n\t\tfor y in range(len(minimap[x])):\n\t\t\tif minimap[x][y]== 1 :\n\t\t\t\tpg.draw.rect(screen,(200,200,200),(650+20*x,42+20*y,20,20))\n\t\t\tif minimap[x][y]== 2 :\n\t\t\t\tpg.draw.rect(screen,(50,50,200),(650+20*x,42+20*y,20,20))\n\t\t\tif minimap[x][y]== 3 :\n\t\t\t\tpg.draw.rect(screen,(200,50,50),(650+20*x,42+20*y,20,20))\n\n\n\t# On affiche le timer :\n\tif Minutes < 10 :\n\t\tscreen.blit(mafont.render(str(0),1,couleur_panneau),(666,417))\n\t\tscreen.blit(mafont.render(str(Minutes),1,couleur_panneau),(684,417))\n\telse :\n\t\tscreen.blit(mafont.render(str(Minutes),1,couleur_panneau),(666,417))\n\tif Secondes < 10 :\n\t\tscreen.blit(mafont.render(str(0),1,couleur_panneau),(765,417))\n\t\tscreen.blit(mafont.render(str(Secondes),1,couleur_panneau),(783,417))\n\telse :\n\t\tscreen.blit(mafont.render(str(Secondes),1,couleur_panneau),(765,417))\n\tscreen.blit(mafont.render(\"[ \" + str(miniX) +\" ; \" + str(miniY) + \" ]\",1,couleur_panneau),(706,520))\n\tscreen.blit(mafont.render(\"[ X ; Y ]\",1,couleur_panneau),(704,560))\n\n\tif menu_ouvert == 1:\n\t\tscreen.blit(Menu,(0,0))\n\t\tscreen.blit(fontvolume.render(str(int(volume*10))+\"%\",1,(255,255,255)),(400,380)) # On affiche le % de volume.\n\t\tif menu_selection == 0 :\n\t\t\tscreen.blit(bar_0,(194,350))\n\t\tif menu_selection == 1 :\n\t\t\tscreen.blit(bar_1,(180,420))\n\t\tif menu_selection == 2 :\n\t\t\tscreen.blit(bar_2,(233,495))\n\n\tif item_found == 1 :\n\t\tscreen.blit(win,(0,0))\n\t\tpg.mixer.music.stop() # On coupe la musique pour la cinématique.\n\n\tpg.display.update()\n\n\t\t\npg.mixer.stop()\t\t\t\t# On coupe tous les sons pour la cinématique.\n\nif jouer_cinematique == 1 :\n\n\tpg.draw.rect(screen,(0,0,0),(0,0,860,640))\n\tpg.mixer.music.load(\"sounds/cinematic_music.ogg\")\n\n\tslide_1 = pg.image.load(\"cinematique/part_1.png\")\n\tslide_2 = pg.image.load(\"cinematique/part_2.png\")\n\tslide_3 = pg.image.load(\"cinematique/part_3.png\")\n\tslide_4 = pg.image.load(\"cinematique/part_4.png\")\n\tslide_5 = pg.image.load(\"cinematique/part_5.png\")\n\tslide_6 = pg.image.load(\"cinematique/part_6.png\")\n\n\ttexte_1 = pg.image.load(\"cinematique/t_1.png\")\n\ttexte_2 = pg.image.load(\"cinematique/t_2.png\")\n\ttexte_3 = pg.image.load(\"cinematique/t_3.png\")\n\ttexte_4 = pg.image.load(\"cinematique/t_4.png\")\n\ttexte_5 = pg.image.load(\"cinematique/t_5.png\")\n\ttexte_6 = pg.image.load(\"cinematique/t_6.png\")\n\ttexte_7 = pg.image.load(\"cinematique/t_7.png\")\n\ttexte_8 = pg.image.load(\"cinematique/t_8.png\")\n\ttexte_9 = pg.image.load(\"cinematique/t_9.png\")\n\ttexte_10 = pg.image.load(\"cinematique/t_10.png\")\n\ttexte_11 = pg.image.load(\"cinematique/t_11.png\")\n\ttexte_12 = pg.image.load(\"cinematique/t_12.png\")\n\ttexte_13 = pg.image.load(\"cinematique/t_13.png\")\n\ttexte_14 = pg.image.load(\"cinematique/t_14.png\")\n\ttexte_15 = pg.image.load(\"cinematique/t_15.png\")\n\ttexte_16 = pg.image.load(\"cinematique/t_16.png\")\n\ttexte_17 = pg.image.load(\"cinematique/t_17.png\")\n\ttexte_18 = pg.image.load(\"cinematique/t_18.png\")\n\ttexte_19 = pg.image.load(\"cinematique/t_19.png\")\n\ttexte_20 = pg.image.load(\"cinematique/t_20.png\")\n\ttexte_21 = pg.image.load(\"cinematique/t_21.png\")\n\ttexte_22 = pg.image.load(\"cinematique/t_22.png\")\n\n\tpg.display.update()\n\t\n\tTemps = 0\n\tMinutes = 0\n\tSecondes = 0\n\tpg.mixer.music.play()\n\ndef DrawCadre():\n\tpg.draw.rect(screen,(0,0,0),(0,0,100,640))\n\tpg.draw.rect(screen,(0,0,0),(760,0,100,640))\n\tpg.draw.rect(screen,(0,0,0),(0,0,860,100))\n\tpg.draw.rect(screen,(0,0,0),(0,406,860,254))\ntime.sleep(3)\nclock.tick(60)\n\nwhile jouer_cinematique == 1 :\n\n\tTemps = Temps + clock.tick()\n\t# Il s'agit de milisecondes.\n\tSecondes = float(Temps) / 1000\n\n\t# Liste des animations à faire selon le timer :\n\tpg.draw.rect(screen,(0,0,0),(0,0,860,640)) # Fond noir\n\tif 1==1 : #Juste pour rabattre la liste des animations sur Sublime Text\n\t\tif Secondes < 8 :\n\t\t\tscreen.blit(texte_1,(0,242))\n\t\tif Secondes < 27 :\n\t\t\tif Secondes > 10 :\n\t\t\t\tscreen.blit(slide_1,(100,280-(18*Secondes)))\n\t\t\t\tDrawCadre()\n\t\t\tif Secondes > 10 and Secondes < 18 :\n\t\t\t\tscreen.blit(texte_2,(0,436))\n\t\t\tif Secondes > 20 :\n\t\t\t\tscreen.blit(texte_3,(0,436))\n\t\tif Secondes < 53 :\n\t\t\tif Secondes > 27 :\n\t\t\t\tscreen.blit(slide_2,(100,100))\n\t\t\t\tDrawCadre()\n\t\t\tif Secondes > 28 and Secondes < 36 :\n\t\t\t\tscreen.blit(texte_4,(0,436))\n\t\t\tif Secondes > 37 and Secondes < 45 :\n\t\t\t\tscreen.blit(texte_5,(0,436))\n\t\t\tif Secondes > 46 :\n\t\t\t\tscreen.blit(texte_6,(0,436))\n\t\tif Secondes < 77 :\n\t\t\tif Secondes > 53 :\n\t\t\t\tscreen.blit(slide_3,(100,100))\n\t\t\t\tDrawCadre()\n\t\t\tif Secondes > 54 and Secondes < 66 :\n\t\t\t\tscreen.blit(texte_7,(0,436))\n\t\t\tif Secondes > 67 and Secondes < 76 :\n\t\t\t\tscreen.blit(texte_8,(0,436))\n\t\tif Secondes < 120 :\n\t\t\tif Secondes > 77 :\n\t\t\t\tscreen.blit(slide_4,(947-(11*Secondes),100))\n\t\t\t\tDrawCadre()\n\t\t\tif Secondes > 77 and Secondes < 85 :\n\t\t\t\tscreen.blit(texte_9,(0,436))\n\t\t\tif Secondes > 85 and Secondes < 93 :\n\t\t\t\tscreen.blit(texte_10,(0,436))\n\t\t\tif Secondes > 94 and Secondes < 102 :\n\t\t\t\tscreen.blit(texte_11,(0,436))\n\t\t\tif Secondes > 103 and Secondes < 111 :\n\t\t\t\tscreen.blit(texte_12,(0,436))\n\t\t\tif Secondes > 112 and Secondes < 120 :\n\t\t\t\tscreen.blit(texte_13,(0,436))\n\t\tif Secondes < 158 :\n\t\t\tif Secondes > 120 :\n\t\t\t\tscreen.blit(slide_5,(100,100))\n\t\t\t\tDrawCadre()\n\t\t\tif Secondes > 120 and Secondes < 129 :\n\t\t\t\tscreen.blit(texte_14,(0,436))\n\t\t\tif Secondes > 130 and Secondes < 138 :\n\t\t\t\tscreen.blit(texte_15,(0,436))\n\t\t\tif Secondes > 139 and Secondes < 147 :\n\t\t\t\tscreen.blit(texte_16,(0,436))\n\t\t\tif Secondes > 148 and Secondes < 157 :\n\t\t\t\tscreen.blit(texte_17,(0,436))\n\n\t\tif Secondes > 162 and Secondes < 169 :\n\t\t\tscreen.blit(texte_18,(0,242))\n\n\t\tif Secondes < 207 :\n\t\t\tif Secondes > 171 :\n\t\t\t\tscreen.blit(slide_6,(100,100))\n\t\t\tif Secondes > 171 and Secondes < 179 :\n\t\t\t\tscreen.blit(texte_19,(0,436))\n\t\t\tif Secondes > 180 and Secondes < 188 :\n\t\t\t\tscreen.blit(texte_20,(0,436))\n\t\t\tif Secondes > 189 and Secondes < 197 :\n\t\t\t\tscreen.blit(texte_21,(0,436))\n\t\t\tif Secondes > 198 and Secondes < 206 :\n\t\t\t\tscreen.blit(texte_22,(0,436))\n\n\tif Secondes > 213 :\n\t\tjouer_cinematique = 0\n\n\n\tpg.display.update()\n","sub_path":"Projet/projet.py","file_name":"projet.py","file_ext":"py","file_size_in_byte":25970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"511550818","text":"import sys\nimport os\n\nsys.path.insert(0, '/home/eric/pliptool')\nfrom plip.modules.preparation import PDBComplex\n\nimport math\ndef calculate_distance(a, b):\n \"\"\"\n :param a: vector a\n :param b: vector b\n :return:\n \"\"\"\n dif1 = a[0] - b[0]\n dif2 = a[1] - b[1]\n dif3 = a[2] - b[2]\n\n return math.sqrt(dif1*dif1+dif2*dif2+dif3*dif3)\n\nclass PLIP_CSV_Filler(object):\n \"\"\"docstring for PLIP_CSV_Filler\"\"\"\n\n def __init__(self, id):\n self.id = id\n self.ligand_res_type = \"\"\n self.ligand_ID = \"\"\n self.protein_res_type = \"\"\n self.protein_ID = \"\"\n self.interaction_type = \"\"\n self.distance = \"\"\n self.angle = \"\"\n self.ligand_atom_type = \"\"\n self.protein_atom_type = \"\"\n self.hbond_type = \"\"\n\n def set_ligand_res_type(self, str):\n self.ligand_res_type = str\n\n def set_ligand_ID(self, str):\n self.ligand_ID = str\n\n def set_protein_ID(self, str):\n self.protein_ID = str\n\n def set_protein_res_type(self, str):\n self.protein_res_type = str\n\n def set_interaction_type(self, str):\n self.interaction_type = str\n\n def set_distance(self, str):\n self.distance = str\n\n def set_angle(self, str):\n self.angle = str\n\n def set_ligand_atom_type(self, str):\n self.ligand_atom_type = str\n\n def set_protein_atom_type(self, str):\n self.protein_atom_type = str\n\n def set_hbond_type(self, str):\n self.hbond_type = str\n\n def return_csv_string(self):\n s = \"\"\n for x in [self.id, self.ligand_res_type, self.ligand_ID, self.protein_res_type, self.protein_ID,\n self.interaction_type,\n self.distance, self.angle, self.ligand_atom_type, self.protein_atom_type, self.hbond_type]:\n s += str(x) + \",\"\n return s[:-1] + \"\\n\"\n\n\natomic_number = {1: \"H\", 7: \"N\", 6: \"C\", 8: \"O\", 16: \"S\"}\n\nnon_GAG_residues = [\"NI\", \"CH2\", \"SO4\", \"TDG\", 'CAC', 'ASG', 'CA', 'EPE', 'IPA', 'NA', 'PO4', 'GOL', 'A3P', 'CIT',\n 'MPD', 'ACY', '0G6', 'ZN', 'FMT', 'PEG', 'MG', 'E64', 'NO3', 'ACT', 'JHM', 'TLA', 'DTT', 'PCA',\n 'CO3', 'MN', 'PA5', 'FAD', 'THJ', 'SIA', 'PT', 'K', 'AMP', 'TL', 'MRD', 'DMJ', 'CS', 'ACP', 'CO',\n 'EDO', 'LDA', 'ACE', 'RET', 'PLM', 'C8E', 'BME', 'HG', 'FOR', 'BEK', 'ADE', 'NHE', 'BEZ', 'HEM',\n 'OS', 'NH2', 'CD', 'HEZ', 'ASO', 'VO4', 'FSM', 'PG4', 'BEN', '6PG', 'NPO', '2PO', 'ACH', 'PEP',\n 'NO2', 'UNX', '293', 'IFL', 'XX6', 'XX7', 'AGG', 'MPT', 'PGE', 'FE', '1PG', 'GBL', 'MES', 'A46',\n 'AZI', 'AVE', 'AVF', 'AVD', 'BCD', 'OH', 'CYS']\n\ninteractions_csv = open(\"interactions.csv\", \"w\")\ninteractions_csv.write(\"PDB_ID,ligand_res_type,ligand_ID,protein_res_type,protein_res_id,interaction_type,\"\n \"distance,angle,ligand_atom_type,protein_atom_type\\n\")\n# 0 1 2 3 4\n# PDB_ID ligand_res_type ligand_ID protein_res_type protein_res_id\n#\n# 5 6 7 8 9 hbond type\n# interaction_type distance angle ligand_atom_type protein_atom_type hbond type\nc = 0\n\n\n\n\npath = \"/home/eric/Projects/GlycoTorch/data/complexes/\"\ncount = 1\nprint(os.listdir(path))\n# loops through the files in the ../../pdb path\nfor ligand in os.listdir(path):\n if ligand.__contains__(\".pdb\"):\n my_mol = PDBComplex()\n\n print(\"{} {} out of {}\".format(ligand, count, len(os.listdir(path))))\n count += 1\n\n my_mol.load_pdb(path + ligand) # Load the PDB file into PLIP class\n my_mol.analyze()\n\n PDB_ID = my_mol.pymol_name\n\n residues = list(my_mol.interaction_sets.items())\n\n ligand_residue_names = set()\n for ligand_residue_name in residues:\n ligand_residue_names.add(ligand_residue_name[0].split(\":\")[0])\n\n ligand_residue_atoms = []\n\n\n # LYS: NZ, CE\n # HIS: CG, CD, NE2, ND1, CE\n # ARG: CZ, NH1, NH2, NE\n\n residue_atoms = {}\n pdb_file = open(path + ligand, \"r\")\n for line in pdb_file.readlines():\n\n if line.startswith(\"HETATM\"):\n l = line.split()\n if l[3] in ligand_residue_names:\n ligand_residue_atoms.append(line)\n\n if line.startswith(\"ATOM \"):\n l = line.split()\n if l[3] in ligand_residue_names:\n ligand_residue_atoms.append(line)\n\n if l[3] == \"LYS\":\n if l[2] == \"NZ\" or l[2] == \"CE\":\n if int(line[22:26].strip(\" \")) not in residue_atoms.keys():\n residue_atoms[int(line[22:26].strip(\" \"))] = []\n residue_atoms[int(line[22:26].strip(\" \"))].append(\"{} {} {} {}\\n\".format(l[-1], float(line[30:37].strip()),\n float(line[38:46].strip()), float(line[46:53].strip())))\n if l[3] == \"HIS\":\n if l[2] == \"CG\" or l[2] == \"CD\" or l[2] == \"NE2\" or l[2] == \"ND2\" or l[2] == \"ND1\" or l[2] == \"CE\":\n if int(line[22:26].strip(\" \")) not in residue_atoms.keys():\n residue_atoms[int(line[22:26].strip(\" \"))] = []\n residue_atoms[int(line[22:26].strip(\" \"))].append(\"{} {} {} {}\\n\".format(l[-1], float(line[30:37].strip()),\n float(line[38:46].strip()), float(line[46:53].strip())))\n if l[3] == \"ARG\":\n if l[2] == \"CZ\" or l[2] == \"NH1\" or l[2] == \"NH2\" or l[2] == \"NE\":\n if int(line[22:26].strip(\" \")) not in residue_atoms.keys():\n residue_atoms[int(line[22:26].strip(\" \"))] = []\n residue_atoms[int(line[22:26].strip(\" \"))].append(\"{} {} {} {}\\n\".format(l[-1], float(line[30:37].strip()),\n float(line[38:46].strip()), float(line[46:53].strip())))\n\n for res, interaction in residues:\n if not res.split(\":\")[0] in non_GAG_residues:\n ligand_salt_bridge = interaction.saltbridge_lneg\n for x in ligand_salt_bridge:\n\n filler = PLIP_CSV_Filler(PDB_ID)\n filler.set_interaction_type(\"saltbridge\")\n filler.set_ligand_ID(x.resnr_l)\n filler.set_ligand_res_type(x.restype_l)\n filler.set_protein_res_type(x.restype)\n filler.set_protein_ID(x.resnr)\n filler.set_ligand_atom_type(x.negative.fgroup)\n filler.set_distance(x.distance)\n interactions_csv.write(filler.return_csv_string())\n\n saltbridge_xyz = open(\"{0}_{1}/{0}_{1}_{2}.xyz\".format(x.negative.fgroup, x.restype, c), \"w\")\n c += 1\n\n neg = []\n\n for atom in x.negative.atoms:\n saltbridge_xyz.write(\"{} {} {} {}\\n\".format(atomic_number[atom.atomicnum], atom.coords[0],\n atom.coords[1], atom.coords[2]))\n neg.append((atom.coords[0], atom.coords[1], atom.coords[2]))\n\n if len(x.negative.atoms) == 2 and len(neg) == 2:\n for line in ligand_residue_atoms:\n l = line.split()\n l_atom_xyz = [float(line[30:37].strip()), float(line[38:46].strip()), float(line[46:53].strip())]\n if l[-1] == \"C\":\n if calculate_distance(l_atom_xyz, neg[0]) < 1.4 and calculate_distance(l_atom_xyz, neg[1]) < 1.4:\n saltbridge_xyz.write(\"{} {} {} {}\\n\".format(l[-1], float(line[30:37].strip()),\n float(line[38:46].strip()),\n float(line[46:53].strip())))\n\n\n\n\n\n xx = 0\n y = 0\n z = 0\n for x1, y1, z1 in neg:\n xx += x1\n y += y1\n z += z1\n neg_avg = [xx/len(neg), y/len(neg), z/len(neg)]\n\n if x.positive.resnr != \"HIS\" and x.positive.resnr in residue_atoms.keys():\n for atom in residue_atoms[x.positive.resnr]:\n if calculate_distance([float(x) for x in atom.split()[1:]], neg_avg) < 7:\n saltbridge_xyz.write(atom)\n\n elif x.positive.resnr == \"HIS\":\n for atom in x.positive.atoms:\n saltbridge_xyz.write(\"{} {} {} {}\\n\".format(atomic_number[atom.atomicnum], atom.coords[0],\n atom.coords[1], atom.coords[2]))\n\n\n ligand_hbond_donor = interaction.hbonds_ldon\n for x in ligand_hbond_donor:\n filler = PLIP_CSV_Filler(PDB_ID)\n filler.set_interaction_type(\"hbond_donor\")\n filler.set_ligand_ID(x.resnr_l)\n filler.set_ligand_res_type(x.restype_l)\n filler.set_protein_res_type(x.restype)\n filler.set_protein_ID(x.resnr)\n filler.set_ligand_atom_type(x.dtype)\n filler.set_protein_atom_type(x.atype)\n filler.set_distance(x.distance_ad)\n filler.set_angle(x.angle)\n filler.set_hbond_type(x.type)\n interactions_csv.write(filler.return_csv_string())\n\n ligand_hbond_acceptor = interaction.hbonds_pdon\n for x in ligand_hbond_acceptor:\n filler = PLIP_CSV_Filler(PDB_ID)\n filler.set_interaction_type(\"hbond_acceptor\")\n filler.set_ligand_ID(x.resnr_l)\n filler.set_ligand_res_type(x.restype_l)\n filler.set_protein_res_type(x.restype)\n filler.set_protein_ID(x.resnr)\n filler.set_ligand_atom_type(x.atype)\n filler.set_protein_atom_type(x.dtype)\n filler.set_distance(x.distance_ad)\n filler.set_angle(x.angle)\n filler.set_hbond_type(x.type)\n interactions_csv.write(filler.return_csv_string())\n\n ligand_hydrophobic = interaction.hydrophobic_contacts\n for x in ligand_hydrophobic:\n filler = PLIP_CSV_Filler(PDB_ID)\n filler.set_interaction_type(\"hydrophobic\")\n filler.set_ligand_ID(x.resnr_l)\n filler.set_ligand_res_type(x.restype_l)\n filler.set_protein_res_type(x.restype)\n filler.set_protein_ID(x.resnr)\n filler.set_ligand_atom_type(x.ligatom.type)\n filler.set_protein_atom_type(x.bsatom.type)\n filler.set_distance(x.distance)\n\n interactions_csv.write(filler.return_csv_string())\n\n water_bridges = interaction.water_bridges\n for x in water_bridges:\n filler = PLIP_CSV_Filler(PDB_ID)\n filler.set_interaction_type(\"water_bridges\")\n filler.set_ligand_ID(x.resnr_l)\n filler.set_ligand_res_type(x.restype_l)\n filler.set_protein_res_type(x.restype)\n filler.set_protein_ID(x.resnr)\n filler.set_ligand_atom_type(x.atype)\n filler.set_protein_atom_type(x.dtype)\n filler.set_distance((x.distance_aw + x.distance_dw) / 2)\n filler.set_angle(x.d_angle)\n interactions_csv.write(filler.return_csv_string())\n\n if len(interaction.pication_paro) > 1:\n print(\"\")\n\n if len(interaction.pistacking) > 1:\n print(\"\")\n\n if len(interaction.metal_complexes) > 1:\n print(\"\")\n\n # if len(interaction.water_bridges) > 1:\n # print(\"\")\n\n# metal complexes: 2HYU, 2HYV\n","sub_path":"PLIP.py","file_name":"PLIP.py","file_ext":"py","file_size_in_byte":12628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"487123510","text":"import numpy as np\nimport cv2\nimport math\n\nfrom processing_methods import *\nfrom hog_functions import get_polar_gradients\nfrom mbh import get_mbh_descriptor\nimport matplotlib.pyplot as plt\n\n# -----------------------------------METHODS-------------------------------------------------\n\n\ndef find_index_location(bgr, num_divisionsH, num_divisionsW, heightDivision, widthDivision, index):\n # calculate left corner point (x,y) of window\n startX = ((index - 1) % num_divisionsW) * widthDivision\n startY = np.floor((index - 1) / num_divisionsH).astype(np.int) * heightDivision\n\n return startX, startY\n\n\ndef non_display_window(bgr, num_divisionsH, num_divisionsW, non_displayed_region):\n # calculate height and width of window to not display\n heightDivision, widthDivision = np.floor(bgr.shape[0] / num_divisionsH).astype(np.int), np.floor(\n bgr.shape[1] / num_divisionsW).astype(np.int)\n\n startX, startY = find_index_location(bgr, num_divisionsH, num_divisionsW, heightDivision, widthDivision,\n non_displayed_region)\n\n # assign window to white in HSV\n bgr[startY:startY + heightDivision, startX:startX + widthDivision] = (255, 255, 255)\n\n\ndef get_prvs_windows(index, roi_list, prvs_window_list):\n prvs_window_list.append(roi_list[index - 1])\n return prvs_window_list\n\n\ndef get_next_window(index, roi_list, next_window_list):\n return next_window_list\n\n\n# returns a list with the pixels of the regions of interest\ndef get_roi(frame, num_divisionsW, num_divisionsH):\n gridDivisionW = np.floor(frame.shape[1] / num_divisionsW).astype(np.int)\n gridDivisionH = np.floor(frame.shape[0] / num_divisionsH).astype(np.int)\n roi_list = [(frame[y*gridDivisionH:(y+1)*gridDivisionH, x*gridDivisionW:(x+1)*gridDivisionW])\n for x in range(num_divisionsW) for y in range(num_divisionsH)]\n return roi_list\n\n\n# returns a histogram and hsv values for displaying\ndef get_optical_flow(prvs, next, hsv, erosionKernel, dilationKernel):\n # Get Gunnar-Farneback optical flow\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.1, 0)\n\n # Convert the cartesian flow vectors to polar vectors\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n\n # Convert angle to degrees for plotting\n hsv[..., 0] = ang * 180 / np.pi\n\n # Normalize the magnitude (intensity) to be within range 0-255\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n\n # Perform morphological operations\n hsv[..., 2] = cv2.morphologyEx(hsv[..., 2], cv2.MORPH_OPEN, erosionKernel)\n hsv[..., 2] = cv2.morphologyEx(hsv[..., 2], cv2.MORPH_CLOSE, dilationKernel)\n\n # Threshold image\n _, hsv[..., 2] = cv2.threshold(hsv[..., 2], 12, 255, cv2.THRESH_TOZERO)\n\n # Get the histogram of the area\n # myHist, _ = np.histogram(ang, bins=myRange, weights=hsv[..., 2], density=False)\n\n return hsv, ang\n\n\n# Draw a grid nxm grid onto the image\ndef draw_grid(num_divisionsH, num_divisionsW, bgr):\n # Get the top points and the left points\n heightDivision, widthDivision = np.floor(bgr.shape[0] / num_divisionsH).astype(np.int), np.floor(\n bgr.shape[1] / num_divisionsW).astype(np.int)\n topPoints = [(i * widthDivision, 0) for i in range(1, num_divisionsW)]\n bottomPoints = [(i * widthDivision, bgr.shape[0] - 1) for i in range(1, num_divisionsW)]\n leftPoints = [(0, i * heightDivision) for i in range(1, num_divisionsH)]\n rightPoints = [(bgr.shape[1] - 1, i * heightDivision) for i in range(1, num_divisionsH)]\n # draw a line\n for i in range(len(topPoints)):\n # print(topPoints[i])\n cv2.line(bgr, topPoints[i], bottomPoints[i], (0, 255, 0), thickness=3, lineType=8, shift=0)\n for i in range(len(leftPoints)):\n cv2.line(bgr, leftPoints[i], rightPoints[i], (0, 255, 0), thickness=3, lineType=8, shift=0)\n\n\ndef plot_histogram(frameCount, savedPlotCount, frameHist, myRange, numBins, save, hist_layout, dirName,\n numberOfFrames, saveToFolder, videoTitle, ylim_max, hist_scaling_factor):\n if frameCount is numberOfFrames:\n # save histograms to file\n figPath = '/home/tabitha/Desktop/automatic-detection-of-fish-behaviour/savedHistograms/'\n if saveToFolder:\n figNameString = figPath + dirName + '/' + videoTitle + '/{0:08}'.format(savedPlotCount) + '.png'\n else:\n figNameString = figPath + '/{0:08}'.format(savedPlotCount) + '.png'\n\n plt.figure(\"main figure\")\n plt.subplot(hist_layout[0, 0:])\n plt.ylim(0, ylim_max)\n plt.bar(myRange[:-1], frameHist / hist_scaling_factor, align='edge', width=2 * math.pi / numBins)\n #plt.bar(myRange[:-1], frameHist, align='edge', width=2 * math.pi / numBins)\n\n # plot visualisation stuff\n plt.title('Histogram of Optical Flow')\n plt.ylabel('Scaled Vector Histogram')\n plt.xlabel('Degrees (rad)')\n plt.grid(True)\n plt.tight_layout()\n\n '''\n if saveToFolder is True:\n if not os.path.exists(''.join((figPath, dirName, '/', videoTitle))):\n os.mkdir(''.join((figPath, dirName, '/', videoTitle)))\n plt.savefig(figNameString)\n plt.clf()\n '''\n savedPlotCount += 1\n\n if saveToFolder is True:\n print('saved figure', savedPlotCount)\n\n frameCount = 0\n\n return frameCount, savedPlotCount\n\n\ndef get_first_derivative(array, fps):\n # get symmetric first derivative of array\n step_size = 3 * float(1 / fps)\n derivative = [((array[i+1] - array[i-1]) / (2 * step_size)) for i in range(1, len(array)-1)]\n return derivative","sub_path":"NewHogBased/processing_methods.py","file_name":"processing_methods.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"415193043","text":"# ************** STUDENTS EDIT THIS FILE **************\n\nfrom SteeringBehaviors import Wander\nimport SimulationEnvironment as sim\nimport pygame, sys\n\nimport numpy as np\nimport pandas as pd\ndef collect_training_data(total_actions):\n #set-up environment\n sim_env = sim.SimulationEnvironment()\n\n #robot control\n action_repeat = 100\n steering_behavior = Wander(action_repeat)\n\n num_params = 7\n #STUDENTS: network_params will be used to store your training data\n # a single sample will be comprised of: sensor_readings, action, collision\n x1,x2,x3,x4,x5,a,y = 0,0,0,0,0,0,0;\n sensor_readings = list\n action = int\n collision = int\n network_params = {sensor_readings, action, collision}\n \n df = pd.DataFrame(columns=['s1', 's2', 's3','s4','s5','action','collision'])\n index = 0\n collision_count = 0\n non_collision_count = 0\n\n for action_i in range(total_actions):\n for event in pygame.event.get():\n pass\n progress = 100*float(action_i)/total_actions\n #print(f'Collecting Training Data {progress}% ', end=\"\\r\", flush=True)\n \n\n #steering_force is used for robot control only\n action, steering_force = steering_behavior.get_action(action_i, sim_env.robot.body.angle)\n #print('action:' , action)\n for action_timestep in range(action_repeat):\n row=list()\n if action_timestep == 0:\n \n state, collision, sensor_readings = sim_env.step(steering_force)\n #State \n # print('act1: ')\n # print('state:', state)\n # print('collisiontop: ', collision)\n # print('sensor', sensor_readings)\n # print('sensor', type(sensor_readings))\n # print(sensor_readings[0])\n \n else:\n state, collision, sensor_readings = sim_env.step(steering_force)\n # print('act2: ')\n # print('state:', state)\n # print('collisiontop: ', collision)\n # print('sensor', sensor_readings)\n #print(a,b)\n \n \n \n \n if collision:\n #print(\"collision_warning: \",collision)\n steering_behavior.reset_action()\n #STUDENTS NOTE: this statement only EDITS collision of PREVIOUS action\n #if current action is very new.\n if action_timestep < action_repeat * .3: #in case prior action caused collision\n #print(\"collision_bottom: \",collision)\n #To do:\n df.iloc[-1, df.columns.get_loc('collision')] = collision\n #print(df.iloc[-1])\n #network_params[-1][-1] = collision #share collision result with prior action\n break\n row = list()\n row.extend(sensor_readings.tolist())\n row.append(action)\n row.append(collision)\n # print('row: ', row)\n row = pd.Series(row, index = df.columns)\n if(len(row)!=0):\n if( non_collision_count < collision_count):\n df = df.append(row,ignore_index=True)\n index = index+1\n non_collision_count = non_collision_count + 1\n elif(row['collision']==1):\n df = df.append(row,ignore_index=True)\n index = index+1\n collision_count = collision_count + 1\n # if((len(df)%2000) == 0):\n # df.to_csv('submission2.csv',header=False,mode='a',index=False,line_terminator='\\n')\n # lst = [df]\n # del lst\n # df = pd.DataFrame(columns=['s1', 's2', 's3','s4','s5','action','collision'])\n # index = 0\n # if(index>800000):\n # break\n \n #print(df.head(100))\n df.to_csv('submission5.csv',header=False,index=False,line_terminator='\\n')\n\n\n #STUDENTS: Update network_params.\n\n\n #STUDENTS: Save .csv here. Remember rows are individual samples, the first 5\n #columns are sensor values, the 6th is the action, and the 7th is collision.\n #Do not title the columns. Your .csv should look like the provided sample.\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n total_actions = 8000000000\n collect_training_data(total_actions)\n","sub_path":"collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"197541069","text":"# Uses python2\nfrom __future__ import print_function\nC = []\nC.append(0)\nC.append(0)\nD = []\ndef calc(N):\n\t# C[0]=0\n\tfor i in range(2,N+1):\n\t\tif i%6==0:\n\t\t\tC.append(min(C[i/2],C[i/3],C[i-1])+1)\n\t\tif i%2==0 and i%3!=0:\n\t\t\tC.append(min(C[i/2],C[i-1])+1)\n\t\tif i%3==0 and i%2!=0:\n\t\t\tC.append(min(C[i/3],C[i-1])+1)\n\t\tif i%3!=0 and i%2!=0:\n\t\t\tC.append(C[i-1]+1)\n\treturn C[N]\ndef trace(i):\n\tif i==1:\n\t\treturn 1\n\tif i%6==0:\n\t\tif C[i/3] == min(C[i/2],C[i/3],C[i-1]):\n\t\t\tD.append(i/3)\n\t\t\treturn trace(i/3)\n\t\telif C[i/2] == min(C[i/2],C[i/3],C[i-1]):\n\t\t\tD.append(i/2)\n\t\t\treturn trace(i/2)\n\t\telse:\n\t\t\tD.append(i-1)\n\t\t\treturn trace(i-1)\n\telif i%3==0 and i%2!=0:\n\t\tif C[i/3] == min(C[i/3],C[i-1]):\n\t\t\tD.append(i/3)\n\t\t\treturn trace(i/3)\n\t\telse:\n\t\t\tD.append(i-1)\n\t\t\treturn trace(i-1)\n\telif i%2==0 and i%3!=0:\n\t\tif C[i/2] == min(C[i/2],C[i-1]):\n\t\t\tD.append(i/2)\n\t\t\treturn trace(i/2)\n\t\telse:\n\t\t\tD.append(i-1)\n\t\t\treturn trace(i-1)\n\telse:\n\t\tD.append(i-1)\n\t\treturn trace(i-1)\n\t\t\nN = int(raw_input())\nprint (calc(N))\n# print C\nD.append(N)\ntrace(N)\nD.reverse()\nfor i in D:\n\tprint (i,end=\" \")","sub_path":"Coursera/DSAlgo/Course1/4.1Calc.py","file_name":"4.1Calc.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"18812131","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.impute import SimpleImputer, MissingIndicator\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, PolynomialFeatures\nfrom sklearn.pipeline import Pipeline, make_pipeline, make_union\n\n\ndef split_ml_dataset(learn_df, split_date, y_col, X_cols):\n \n # Split the train/test set sequentially\n months = sorted(learn_df.index.get_level_values('date').unique().tolist())\n n = months.index(pd.to_datetime(split_date)) + 1\n \n train_months = months[:n]\n test_months = months[n:]\n \n print (\"Total months:\", len(months))\n print (\"Training months:\\n\", [str(i)[:7] for i in train_months], \"\\n\")\n print (\"Testing months:\\n\", [str(i)[:7] for i in test_months])\n \n # Divide features and target\n y = learn_df[y_col] \n X = learn_df[X_cols].astype(float) \n \n \n # Now, drop missing values for the purposes of learning\n learn_df.dropna(subset=[y_col], inplace=True)\n \n y_nomissing = learn_df[y_col] \n X_nomissing = learn_df[X_cols].astype(float) \n \n y_train, y_test = y_nomissing.loc[months[:n]], y_nomissing.loc[months[n:]]\n X_train, X_test = X_nomissing.loc[months[:n]], X_nomissing.loc[months[n:]]\n \n\n y_train = np.array(y_train)\n y_test = np.array(y_test)\n\n pp_pipeline = make_pipeline(\n #make_union(\n # SimpleImputer(missing_values=np.nan, strategy='mean'), #, add_indicator=True),\n #MissingIndicator(missing_values=np.nan)\n #),\n #PolynomialFeatures(2),\n MinMaxScaler() # https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html\n )\n\n X_train_scaled = pp_pipeline.fit_transform(X_train)\n X_scaled = pp_pipeline.transform(X)\n\n return X_train_scaled, y_train, X_scaled, X,y, train_months, test_months","sub_path":"experiment_2/src/ml_helpers/split_ml_dataset.py","file_name":"split_ml_dataset.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"144712028","text":"from dataset import LmdbDataset\nimport torchvision.transforms.functional as F\nimport torch.utils.data as data\nimport torch.optim as optim\nimport torch\nimport sys\nimport torch.nn as nn\nfrom img_show import wrong_img_save\nfrom dice_loss import dice_coeff\nfrom models import res3dmine, resnet3d, resnew\nfrom utils.logger import log\nfrom tqdm import tqdm\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.metrics import precision_score, recall_score, f1_score, roc_curve, roc_auc_score, auc, confusion_matrix\n\n\ndef channel_max(tensor):\n max_i = torch.zeros(tensor.size(0))\n for i in range(0, tensor.size(0)):\n max_i[i] = torch.argmax(tensor[i, :, :])\n return max_i\n\n\ndef adjust_learning_rate(epoch):\n lr = optimizer.param_groups[0]['lr']\n # print(lr)\n if (epoch % 30) == 0 and epoch != 0:\n lr = lr / 10\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef save_models(epoch):\n torch.save(model.state_dict(), 'model_save/resnet34_{}.model'.format(epoch))\n print('checkpoint saves')\n\n\ndef mixup_data(x, y, alpha=1.0, use_cuda=True):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\n\ndef mixup_criterion(criterion, pred, y_a, y_b, lam):\n return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n\n\ndef rand_bbox(size, lam):\n D = size[2]\n W = size[3]\n H = size[4]\n cut_rat = np.sqrt(1. - lam)\n cut_d = np.int(D * cut_rat)\n cut_w = np.int(W * cut_rat)\n cut_h = np.int(H * cut_rat)\n\n # uniform\n cz = np.random.randint(D)\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n\n bbz1 = np.clip(cz - cut_d // 2, 0, D)\n bbz2 = np.clip(cz + cut_d // 2, 0, D)\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n return bbz1, bbz2, bbx1, bby1, bbx2, bby2\n \n\ndef test(epoch):\n model.eval()\n test_acc = 0.0\n sample_sum = 0.0\n batch_sum = 0.0\n value_list = []\n for i, (images, labels_cls) in enumerate(test_loader):\n\n images = images.to(device)\n labels_cls = labels_cls.to(device)\n outputs_cls = model(images.float())\n # test_out = out_layer1[1, :, 0, :, :]\n # test_out = torch.norm(test_out, dim=0)\n for j in range(len(outputs_cls)):\n value_list.append([m(outputs_cls)[j].cpu().detach().numpy(), labels_cls[j].cpu().detach().numpy()])\n prediction = m(outputs_cls).ge(0.5).long().view(-1)\n # test_acc += torch.sum(prediction == labels_cls)\n # sample_sum += len(labels_cls)\n # batch_sum += 1\n\n with open('value.txt', 'w') as f:\n for value in value_list:\n f.write(str(value[0]))\n f.write('\\t')\n f.write(str(value[1]))\n f.write('\\n')\n # print(sample_sum, test_acc)\n # test_acc = test_acc.float() / sample_sum\n\n true_list = labels_cls.tolist()\n pre_list = prediction.tolist()\n\n c = confusion_matrix(true_list, pre_list)\n TP = c[1, 1]\n TN = c[0, 0]\n FP = c[0, 1]\n FN = c[1, 0]\n specificity = TN / float(TN + FP)\n sensitivity = TP / float(TP + FN)\n accuracy = (TP+TN) / float(TP+TN+FN+FP)\n print(accuracy, sensitivity, specificity)\n return accuracy, sensitivity, specificity \n\n\ndef train(num_epochs):\n for epoch in range(num_epochs):\n adjust_learning_rate(epoch)\n model.train()\n log.info('{} epochs in total, {} epoch'.format(num_epochs, epoch))\n train_loss = 0.0\n batch_sum = 0.0\n sample_sum = 0.0\n train_acc = 0.0\n total = 0\n for i, (images, labels_cls) in tqdm(enumerate(train_loader), total=len(train_loader)):\n\n images = images.to(device)\n labels_cls = labels_cls.to(device)\n\n #mix up\n r = np.random.rand(1)\n args_beta = 1.0\n args_cutmix_prob = 0.5\n if args_beta > 0 and r < args_cutmix_prob:\n # generate mixed sample\n lam = np.random.beta(args_beta, args_beta)\n rand_index = torch.randperm(images.size()[0]).cuda()\n target_a = labels_cls\n target_b = labels_cls[rand_index]\n bbz1, bbz2, bbx1, bby1, bbx2, bby2 = rand_bbox(images.size(), lam)\n images[:, :, bbz1:bbz2, bbx1:bbx2, bby1:bby2] = images[rand_index, :, bbz1:bbz2, bbx1:bbx2, bby1:bby2]\n # adjust lambda to exactly match pixel ratio\n lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) * (bbz2 - bbz1) / (images.size()[-1] * images.size()[-2] * images.size()[-3]))\n # compute output\n output = model(images)\n loss = loss_bce(output.view(-1), target_a.float()) * lam + loss_bce(output.view(-1), target_b.float()) * (1. - lam)\n else:\n # compute output\n output = model(images)\n loss = loss_bce(output.view(-1), labels_cls.float())\n\n train_loss += loss.data\n optimizer.zero_grad()\n # outputs_cls = model(images)\n # loss = loss_bce(outputs_cls.view(-1), labels_cls.float())\n\n \n loss.backward()\n optimizer.step()\n\n # prediction = m(outputs_cls).ge(0.5).long().view(-1)\n # train_acc += torch.sum(prediction == labels_cls)\n batch_sum += 1\n # sample_sum += len(labels_cls)\n\n train_loss = train_loss / batch_sum\n\n test_acc, sensitivity, specificity = test(epoch)\n if test_acc > 0.66 and sensitivity > 0.66 and specificity > 0.66:\n torch.save(model.state_dict(), './yw_save/cut-model_{}_acc_{:.3f}_{:.3f}_{:.3f}.pth'.format(epoch, test_acc, sensitivity, specificity))\n best_acc = test_acc\n print('checkpoint saves')\n\n print('Epoch {}, TrainLoss: {:.3f}, Test Acc: {:.3f}, Sensi: {:.3f}, Speci: {:.3f}'.format(epoch, train_loss, test_acc, sensitivity, specificity))\n # writer.add_scalar('Loss/train', train_loss.item(), epoch)\n log.info('Epoch {}, TrainLoss: {:.3f}, Test Acc: {:.3f}, Sensi: {:.3f}, Speci: {:.3f}'.format(epoch, train_loss, test_acc, sensitivity, specificity))\n # writer.flush()\n \n return test_acc\n\nif __name__ == '__main__':\n cuda_avail = torch.cuda.is_available()\n device = \"cuda:0\" if cuda_avail else \"cpu\"\n m = nn.Sigmoid().to(device)\n loss_bce = nn.BCEWithLogitsLoss().to(device)\n # loss_ce = nn.CrossEntropyLoss().to(device)\n loss_l1 = nn.SmoothL1Loss().to(device)\n\n train_dataset = LmdbDataset('../cb_folder/yw/train')\n test_dataset = LmdbDataset('../cb_folder/yw/test')\n train_loader = data.DataLoader(train_dataset, batch_size= 2048, num_workers=8, shuffle=True)\n test_loader = data.DataLoader(test_dataset, batch_size= 2048, num_workers=8)\n\n print('hello1')\n lamda_log = list()\n # for lamda in range(50, 10100, 20):\n # model = resnet3d.resnet18()\n model = resnew.resnet10()\n model = model.to(device)\n\n test_flag = True\n if test_flag:\n path = './yw_save/cut-model_28_acc_0.694_0.714_0.687.pth'\n model_dict = model.state_dict()\n pre_dict = torch.load(path)\n pre_dict = {k: v for k, v in pre_dict.items() if k in model_dict}\n model_dict.update(pre_dict)\n model.load_state_dict(model_dict)\n\n optimizer = optim.SGD(model.parameters(), lr=0.02, momentum=0.9, weight_decay=0.00001)\n state = {'net':model, 'optimizer':optimizer}\n log.info(state)\n\n\n # total_test_acc = train(100)\n test(0)\n # lamda_log.append([lamda, total_test_acc])","sub_path":"3d_new_test/main_multi.py","file_name":"main_multi.py","file_ext":"py","file_size_in_byte":7972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"302428482","text":"import click\nimport os\nimport shutil\nimport sys\nimport time\nfrom adb import adb_commands, usb_exceptions\nfrom androguard.core.bytecodes.apk import APK\n\nimport builtins as __builtin__\ndef print(*args, **kwargs):\n sys.stdout.write('[apk-launcher] ')\n return __builtin__.print(*args, **kwargs)\n\n@click.command()\n@click.argument('apk_path')\n@click.option('-n', '--nolog', required=False, default=False, is_flag=True)\ndef run(apk_path, nolog):\n try:\n from adb import sign_m2crypto\n\n rsa_signer = sign_m2crypto.M2CryptoSigner\n except ImportError:\n try:\n from adb import sign_pythonrsa\n\n rsa_signer = sign_pythonrsa.PythonRSASigner.FromRSAKeyPath\n except ImportError:\n try:\n from adb import sign_pycryptodome\n\n rsa_signer = sign_pycryptodome.PycryptodomeAuthSigner\n except ImportError:\n rsa_signer = None\n\n default = os.path.expanduser('~/.android/adbkey')\n if os.path.isfile(default):\n rsa_key_path = [default]\n\n adb = adb_commands.AdbCommands()\n devices = list(adb.Devices())\n if len(devices) == 0:\n print(\"No device\")\n return\n elif len(devices) == 1:\n device = devices[0]\n else:\n for idx, device in enumerate(devices):\n print('%d: %s\\tdevice' % (idx, device.serial_number))\n device = devices[int(input(\"Select device: \"))-1]\n\n try:\n adb.ConnectDevice(port_path=device.port_path, rsa_keys=[rsa_signer(path) for path in rsa_key_path])\n except usb_exceptions.ReadFailedError:\n print(f\"is your device offline? reconnect your device to pc.\")\n sys.exit(-1)\n except Exception as e:\n print(f\"kill the current adb session(error msg: {str(e)})\")\n os.system(\"adb kill-server\")\n try:\n adb.ConnectDevice(port_path=device.port_path, rsa_keys=[rsa_signer(path) for path in rsa_key_path]) # try again\n except:\n print(f\"something wrong.. try reconnect your device to pc and run again.\")\n sys.exit(-1)\n\n apk = APK(apk_path)\n print(f\"uninstall {apk.package} package\")\n adb.Uninstall(apk.package)\n\n print(f\"reinstall {apk_path} file\")\n adb.Install(apk_path, timeout_ms=1000000)\n print(f\"start {apk.package} on {apk.get_main_activity()}\")\n adb.Shell(\"am start -n %s/%s\" % (apk.package, apk.get_main_activity()))\n if not nolog:\n adb.Logcat('clean')\n time.sleep(1) # wait for activity\n pid = adb.Shell(\"ps -ef | grep %s | tr -s [:space:] ' ' | cut -d' ' -f2\" % apk.package)\n pid = pid.strip()\n\n print(f\"\\n\\n[ Logcat - {apk.package}({pid}) ]\\n\") # recommand to use pidcat\n for i in adb.Logcat('| grep -F \"%s\"' % (pid), 1000000):\n __builtin__.print(i)\n\nif __name__ == '__main__':\n if not shutil.which('adb'):\n print('You need to install adb. (reference, https://stackoverflow.com/a/32314718)')\n sys.exit(-1)\n \n run()\n","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"337106277","text":"import pytest\n\nimport numpy as np\nimport os\n\nfrom sklearn.metrics import roc_auc_score\n\nfrom lightfm.lightfm import LightFM, model_weights\nfrom lightfm.datasets import fetch_movielens\n\n\ndef _binarize(dataset):\n positives = dataset.data >= 4.0\n dataset.data[positives] = 1.0\n dataset.data[np.logical_not(positives)] = -1.0\n\n return dataset\n\n\nTEST_FILE_PATH = \"./tests/test.npz\"\nmovielens = fetch_movielens()\ntrain, test = _binarize(movielens[\"train\"]), _binarize(movielens[\"test\"])\n\n\nclass TestPersist:\n @pytest.fixture\n def model(self):\n # Train and persist a model\n model = LightFM(random_state=10)\n model.fit(movielens[\"train\"], epochs=5, num_threads=4)\n model.save(TEST_FILE_PATH)\n return model\n\n @classmethod\n def teardown_class(cls):\n os.remove(TEST_FILE_PATH)\n\n def test_all_params_persisted(self, model):\n # Load and confirm all model params are present.\n saved_model_params = list(np.load(TEST_FILE_PATH).keys())\n for x in dir(model):\n ob = getattr(model, x)\n # We don't need to persist model functions, or magic variables of the model.\n if not callable(ob) and not x.startswith(\"__\"):\n assert x in saved_model_params\n\n def test_all_loaded_weights_numpy_arrays(self, model):\n # Load a model onto an uninstanciated object\n loaded_model = LightFM.load(TEST_FILE_PATH)\n\n for weight_name in model_weights:\n assert callable(getattr(loaded_model, weight_name).any)\n\n def test_model_performance(self, model):\n train_predictions = model.predict(train.row, train.col)\n test_predictions = model.predict(test.row, test.col)\n\n trn_pred = roc_auc_score(train.data, train_predictions)\n tst_pred = roc_auc_score(test.data, test_predictions)\n\n # Performance is same as before when loaded from disk\n loaded_model = LightFM.load(TEST_FILE_PATH)\n\n train_predictions = loaded_model.predict(train.row, train.col)\n test_predictions = loaded_model.predict(test.row, test.col)\n\n # Use approximately equal because floating point math may make our summation slightly different.\n assert roc_auc_score(train.data, train_predictions) == pytest.approx(\n trn_pred, 0.0001\n )\n assert roc_auc_score(test.data, test_predictions) == pytest.approx(\n tst_pred, 0.0001\n )\n","sub_path":"tests/test_persist.py","file_name":"test_persist.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"426028671","text":"import re\n\nimport pytest\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015 by Kevin Landreth, David Michael Pennington and\ncontributors. See AUTHORS for more details.\n\nSome rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n###\n### Required config lines\n###\n@pytest.mark.parametrize(\"required_line\", [\n r'service timestamps debug datetime msec localtime show-timezone',\n r'service timestamps log datetime msec localtime show-timezone',\n r'clock timezone MST -7',\n r'service tcp-keepalives-in',\n r'service tcp-keepalives-out',\n r'ip tcp selective-ack',\n r'ip tcp timestamp',\n r'ip tcp synwait-time 10',\n r'ip tcp path-mtu-discovery',\n r'memory reserve critical 4096',])\ndef test_basics_exact(device, required_line):\n \"\"\"Required global configurations\"\"\"\n assert bool(device.find_lines(r'^'+required_line, exactmatch=True))\n\n###\n### Required partial config lines\n###\n@pytest.mark.parametrize(\"required_line\", [\n r'clock summer-time MDT recurring',\n r'enable secret',\n r'hostname',])\ndef test_basics_partial(device, required_line):\n \"\"\"Required global configurations\"\"\"\n assert bool(device.find_lines(r'^'+required_line, exactmatch=False))\n\n\n###\n### SNMP checks\n###\n@pytest.mark.parametrize(\"required_line\", [\n r'snmp-server community {0} [rR][oO] 99'.format(re.escape('g1v3mE$t@t$')),\n r'snmp-server community {0} [rR][wW] 99'.format(re.escape('SoMeThaNGwIErd')),\n ])\ndef test_snmp(device, required_line):\n \"\"\"Required global configurations\"\"\"\n assert bool(device.find_lines(required_line, exactmatch=True))\n\n@pytest.mark.parametrize(\"rejected_line\", [\n r'snmp-server\\scommunity\\s\\S+\\s+[rR][wW]',\n r'snmp-server\\scommunity\\s\\S+\\s+[rR][oO]',\n ])\ndef test_snmp_acl_required(device, rejected_line):\n \"\"\"Reject all SNMP communities with no ACLs\"\"\"\n assert not bool(device.find_lines(rejected_line, exactmatch=True))\n\n\n###\n### Required logging configurations\n###\n@pytest.mark.parametrize(\"required_line\", [\n r'logging 172.16.15.2',\n r'logging buffered 65535 debugging', ])\ndef test_logging(device, required_line):\n \"\"\"Required logging configs\"\"\"\n assert bool(device.find_lines(r'^'+required_line, exactmatch=True))\n\n###\n### Disable these services\n###\n@pytest.mark.parametrize(\"required_line\", [\n r'no service pad',\n r'no ip domain-lookup',\n r'ip ospf name-lookup',\n r'no ip source-route',\n r'no ip gratuitous-arps', # WARNING: HA clustering may require Grat ARP\n ])\ndef test_services_disabled(device, required_line):\n \"\"\"disable services\"\"\"\n assert bool(device.find_lines(r'^' + required_line, exactmatch=True))\n\n###\n### Reject these services\n###\n@pytest.mark.parametrize(\"rejected_line\", [\n r'service internal', # You should know what you're doing if you turn it on\n r'enable password', # Reject insecure enable passwords\n r'ip http server',\n r'ip http secure-server', \n r'ntp master',\n ])\ndef test_services_rejected(device, rejected_line):\n \"\"\"reject services\"\"\"\n assert not bool(device.find_lines(r'^' + rejected_line, exactmatch=True))\n\n@pytest.mark.parametrize(\"required_line\", [\n r' logging synchronous',\n r' exec-timeout 5 0',\n r' transport preferred none',\n ])\ndef test_vty(device, required_line):\n \"\"\"Required vty configs\"\"\"\n VTY_REGEX = r'line\\svty\\s(\\d.+)\\s*$'\n\n try:\n ttys = device.find_objects(VTY_REGEX)\n assert len(ttys) > 0\n except AssertionError:\n pytest.fail('No vty lines found')\n\n for tty in ttys:\n test_val = tty.re_match_iter_typed(required_line, group=0, \n default=\"__FAIL__\")\n assert test_val!=\"__FAIL__\"\n","sub_path":"src/test_ios_baseline.py","file_name":"test_ios_baseline.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"302356450","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nGathering select stock data.\r\n\r\nCreated on Sat Oct 31 11:26:11 2020\r\n\r\n@author: ryanar\r\n\"\"\"\r\n\r\nimport datetime\r\nimport re\r\n\r\nimport pandas as pd\r\n#import pandas_datareader.data as web\r\nimport requests\r\nimport json\r\nimport codecs\r\nimport yfinance as yf\r\n\r\n\r\nfrom stock_analysis.utils import label_sanitizer\r\n\r\nclass StockReader:\r\n \"\"\"Class for reading financial data from websites. \"\"\"\r\n \r\n RAPIDAPI_KEY = \"6c15347475msh62c656093ec2372p138b2fjsn39929a4ab815\" \r\n RAPIDAPI_HOST = \"apidojo-yahoo-finance-v1.p.rapidapi.com\"\r\n \r\n _index_tickers = {\r\n 'SP500' : '^GSPC',\r\n 'DOW' : '^DJI',\r\n 'NASDAQ' : '^IXIC'\r\n }\r\n \r\n def __init__(self, start, end=None):\r\n \"\"\"\r\n Create a StockReader object for readin across a given date range.\r\n\r\n Parameters\r\n ----------\r\n start : datetime object or string in format 'YYYYMMDD'\r\n The first day of the stock market data.\r\n end : datetime object or string in format 'YYYYMMDD', optional\r\n The last day of the stock market data. The default is today.\r\n\r\n Returns\r\n -------\r\n A StockReader object.\r\n\r\n \"\"\"\r\n self.start, self.end = map(\r\n lambda x:x.strftime('%Y%m%d') if isinstance(\r\n x, datetime.datetime\r\n ) else re.sub(r'\\D','',x),\r\n [start, end or datetime.date.today()]\r\n )\r\n if self.start >= self.end:\r\n raise ValueError('START date must be before END date')\r\n \r\n @property\r\n def available_tickers(self):\r\n \"\"\"\r\n Access the names of the indices whose tickers are supported.\r\n\r\n Returns\r\n -------\r\n list data type\r\n\r\n \"\"\"\r\n return list(self._index_tickers.keys())\r\n \r\n @classmethod\r\n def get_index_ticker(cls, index):\r\n \"\"\"\r\n Class method for getting the ticker of the specified inex, if know.\r\n\r\n Parameters\r\n ----------\r\n cls : TYPE\r\n DESCRIPTION.\r\n index : String\r\n The name of the index.\r\n\r\n Returns\r\n -------\r\n String or None.\r\n\r\n \"\"\"\r\n try:\r\n index = index.upper()\r\n except AttributeError:\r\n raise ValueError('Index must be a string')\r\n return cls._index_tickers.get(index, None)\r\n \r\n @label_sanitizer\r\n def get_ticker_data(self, ticker):\r\n \r\n \r\n \"\"\"\r\n Get historical OHLC data for a given data range and ticker.\r\n Tries to get from Investor Exchange (IEX), but falls back to Yahoo! Finance if IEX doesn't have it.'\r\n\r\n Parameters\r\n ----------\r\n ticker : String\r\n The stock symbol to lookup.\r\n\r\n Returns\r\n -------\r\n Pandas Dataframe containing stock data.\r\n\r\n \"\"\"\r\n \r\n# try:\r\n tickerData = yf.Ticker(ticker)\r\n \r\n return tickerData.history(period='1d', start='2010-1-1', end='2021-1-25')\r\n \r\n \r\n # df = pd.read_csv('../stock_analysis/data/'+ ticker +'.csv')\r\n # df['Date'] = pd.to_datetime(df['Date']) \r\n \r\n # df = df[df['Date'] > self.start]\r\n # df = df[df['Date'] < self.end]\r\n \r\n # df.set_index('Date', drop=True, inplace=True)\r\n \"\"\" \r\n except:\r\n if(False):\r\n url = \"https://apidojo-yahoo-finance-v1.p.rapidapi.com/stock/v3/get-historical-data\"\r\n \r\n querystring = {\"symbol\":ticker,\"region\":\"US\"}\r\n \r\n headers = {\r\n 'x-rapidapi-key': \"6c15347475msh62c656093ec2372p138b2fjsn39929a4ab815\",\r\n 'x-rapidapi-host': \"apidojo-yahoo-finance-v1.p.rapidapi.com\"\r\n }\r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n print(response.text)\r\n \r\n with open(ticker +\".json\", \"w\") as outfile:\r\n json.dump(response.json,outfile)\r\n # else:\r\n print(\"Hello\") \r\n\r\n df = pd.DataFrame()\r\n \r\n with open('../../stock_analysis/data/'+ ticker +'.json') as json_file: \r\n \r\n data = json.load(json_file) \r\n \r\n for price in data['prices']:\r\n # Ignore the dividend\r\n if (len(price) > 4):\r\n priceDict = {\r\n \"date\" : datetime.datetime.utcfromtimestamp(price[\"date\"]),\r\n \"open\" : price[\"open\"],\r\n \"close\" : price[\"close\"],\r\n \"low\" : price[\"low\"],\r\n \"high\" : price[\"high\"],\r\n \"volume\" : price[\"volume\"]\r\n } \r\n \r\n df = df.append(priceDict, ignore_index=True) \r\n \r\n df.set_index('date', drop=True, inplace=True) \r\n \r\n return df\r\n \"\"\" \r\n \r\n @label_sanitizer \r\n def get_bitcoin_data(self, url=False):\r\n \r\n \"\"\"\r\n Get bitcoin historical open-high-low-close (OHLC) data from coinmarketcap.com for a given date ranger.\r\n\r\n Returns\r\n -------\r\n A pandas dataframe with the bitcoin data.\r\n\r\n \"\"\"\r\n # try:\r\n # data = pd.read_html('https://coinmarketcap.com/currencies/bitcoin/historical-data/?'\r\n # 'start={}&end{}'.format(self.start,self.end), parse_dates=[0], index_col=[0]\r\n # )[0].sort_index()\r\n # except:\r\n \r\n if url:\r\n df = pd.read_csv(url)\r\n else:\r\n df = pd.read_csv('../stock_analysis/data/BTC-USD.csv')\r\n \r\n df['Date'] = pd.to_datetime(df['Date']) \r\n \r\n # df = df[df['Date'] > self.start]\r\n # df = df[df['Date'] < self.end]\r\n \r\n df.set_index('Date', drop=True, inplace=True)\r\n \r\n return df\r\n \r\n @label_sanitizer\r\n def get_index_data(self, index='SP500'):\r\n \"\"\"\r\n Get historical OHLC data from Yahoo! Finance for the chosen index for a given date range.\r\n\r\n Parameters\r\n ----------\r\n index : String, optional\r\n A string representing the Stock index to retrive. The default is 'SP500'.\r\n Currently support the indexes below:\r\n 'SP500' for S&P 500,\r\n 'DOW' for Dow Jones Industrial Average\r\n 'NASDAQ' for NASDAQ Composite Index\r\n\r\n Returns\r\n -------\r\n A pandas dataframe with the index data.\r\n\r\n \"\"\"\r\n if index not in self.available_tickers:\r\n raise ValueError(\r\n 'Index not supported. '\r\n f\"Available tickers are: {', '.join(self.available_tickers)}\"\r\n )\r\n \r\n try:\r\n \r\n df = pd.read_csv('../../stock_analysis/data/'+ self.get_index_ticker(index) +'.csv')\r\n df['Date'] = pd.to_datetime(df['Date']) \r\n \r\n df = df[df['Date'] > self.start]\r\n df = df[df['Date'] < self.end]\r\n \r\n df.set_index('Date', drop=True, inplace=True)\r\n \r\n except:\r\n df = pd.DataFrame()\r\n \r\n raise ValueError(\"Couldn't get the data for the index.\") \r\n \r\n return df","sub_path":"stock_analysis/stock_reader.py","file_name":"stock_reader.py","file_ext":"py","file_size_in_byte":7800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"224444612","text":"import os\nimport re\nfrom textract import process\n\n# Convert file given in filePath variable\n# If a file with with 'txt' extension already exists with the same name,\n# the function ignores and returns\ndef convert_file(file_path, save_path):\n print(\"converting file\", file_path)\n if (file_path[-3:] == 'pdf' and not(os.path.isfile(save_path))):\n document_byte_array = process(file_path);\n document_text = document_byte_array.decode('utf-8')\n document_text = clean_document_text(document_text)\n with open(save_path, 'w') as txt_file:\n txt_file.write(document_text)\n return document_text\n return None\n\n# Clean text from the document\ndef clean_document_text(text):\n lines = text.split('\\n')\n document_text = []\n for line in lines:\n line = re.sub('^[0-9]+\\. ', '', line)\n line = re.sub('[-a-zA-Z]+[0-9]+', '', line)\n line = re.sub('^\\. ', '', line)\n line = re.sub('([.,\\/#!$%\\^&\\*;:{}=\\-_`~()])', ' ', line)\n line = re.sub('\\([0-9.-:,]+\\)', '', line)\n line = re.sub('\\(Fig\\. [0-9]+\\)', '', line)\n line = re.sub('Fig\\. [0-9]+', '', line)\n line = re.sub('\\(Figure [0-9]+\\)', '', line)\n line = re.sub('Figure [0-9]+', '', line)\n line = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', line)\n line = re.sub('[^a-zA-Z0-9\\.:,!?; ]', '', line)\n line = re.sub('[0-9]', '', line)\n line = re.sub('\\\\b[a-z]{1,2}\\\\b', '', line)\n line = re.sub('\\ +', ' ', line)\n linesWithoutChars = re.match('^[^a-zA-Z]*$', line)\n if (linesWithoutChars == None):\n document_text.append(line)\n document_text = '\\n'.join(document_text)\n return document_text\n\n# Start at the root path and search all files to convert\ndef convert(path):\n for subdir, dirs, files in os.walk(path):\n for file in files:\n file_path = subdir + os.path.sep + file\n save_path = subdir[:-3] + 'txt' + os.path.sep + file[:-3] + 'txt'\n document_text = convert_file(file_path, save_path)\n\ndef main():\n path = 'corpus_test/pdf'\n convert(path)\n\nif __name__ == '__main__': main()\n","sub_path":"src/text/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"215147935","text":"\"\"\"Database session and the base class for model classes\"\"\"\nfrom logging import getLogger\nimport threading\n\nimport flask\nimport _mysql\nfrom MySQLdb.converters import conversions\nfrom sqlalchemy import engine_from_config, MetaData\nfrom sqlalchemy.engine import url\nfrom sqlalchemy.orm import create_session, scoped_session\nfrom sqlalchemy.exc import ResourceClosedError\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.exc import ResourceClosedError\ntry:\n from sqlalchemy.ext.declarative import _declarative_constructor\nexcept ImportError:\n from sqlalchemy.ext.declarative.api import _declarative_constructor\nfrom savalidation import ValidationMixin\n\nfrom affine import config\n\n__all__ = [\n 'metadata', 'recreate_engines', 'flush_and_close', 'Base',\n 'session', 'migrate_session',\n 'execute', 'migrate_execute',\n]\n\nlogger = getLogger(__name__)\nCONFIG_ERROR_STR = 'Your database settings are not correctly configured. Do you have a config file selected?'\n\n\ndef scopefunc():\n thread_id = id(threading.current_thread())\n app_name = flask.current_app.name if flask.current_app else None\n return (thread_id, app_name)\n\n\ndef create_primary_session():\n engine = metadata.bind\n assert engine is not None, CONFIG_ERROR_STR\n\n session = create_session(bind=engine, autoflush=False)\n app = flask.current_app\n if app:\n try:\n configure_session = app.configure_session\n except AttributeError:\n pass\n else:\n configure_session(session)\n return session\n\n\ndef create_migrate_session():\n assert migrate_engine is not None, CONFIG_ERROR_STR\n return create_session(bind=migrate_engine, autoflush=False)\n\n\ndef recreate_engines():\n global migrate_engine\n\n # Destroy old engines\n for _engine in [metadata.bind, migrate_engine]:\n if _engine is not None:\n # Nuke connection state\n _engine.dispose()\n\n # Get config for engines\n cfg = config._config.copy()\n migrate_url = cfg.pop('sqlalchemy.master.migrate_url', None)\n # Create primary engine\n if cfg.get('sqlalchemy.master.url'):\n metadata.bind = engine_from_config(cfg, prefix='sqlalchemy.master.')\n # Create new migrate engine\n if migrate_url:\n cfg['sqlalchemy.master.url'] = migrate_url\n migrate_engine = engine_from_config(cfg, prefix='sqlalchemy.master.')\n\n\nsession = scoped_session(create_primary_session, scopefunc=scopefunc)\nmetadata = MetaData() # metadata.bind is the primary engine\nmigrate_engine = None\nmigrate_session = scoped_session(create_migrate_session)\nrecreate_engines()\n\n\ndef flush_and_close():\n try:\n session.flush()\n except Exception:\n logger.exception('error while flushing')\n session.close()\n\n\ndef execute(query, *args, **kwargs):\n _session = kwargs.pop('session', session)\n result = _session.execute(query, *args, **kwargs)\n if result.closed:\n return\n try:\n return result.fetchall()\n except ResourceClosedError:\n return\n\n\ndef migrate_execute(query, *args, **kwargs):\n kwargs['session'] = migrate_session\n return execute(query, *args, **kwargs)\n\n\nclass Base(ValidationMixin):\n \"\"\"Base class for ORM objects\"\"\"\n query = session.query_property()\n auto_add = True\n\n @classmethod\n def get(cls, key):\n return cls.query.get(key)\n\n def __unicode__(self):\n namestr = ''\n if hasattr(self, 'name') and self.name:\n namestr = ' ' + self.name\n elif hasattr(self, 'text') and self.text:\n namestr = ' ' + self.text[:100]\n return u'<%s(%s)%s>' % (self.__class__.__name__, self.id, namestr)\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n\n def __repr__(self):\n return str(self)\n\n @classmethod\n def like(cls, name):\n return cls.query.filter(cls.name.like(\"%%%s%%\"%name)).first()\n\n @classmethod\n def all_like(cls, name):\n return cls.query.filter(cls.name.like(\"%%%s%%\"%name)).all()\n\n @classmethod\n def by_name(cls, name):\n return cls.query.filter_by(name = name).first()\n\n @classmethod\n def get_or_create(cls, name):\n return (cls.query.filter_by(name=name).first() or\n cls.create(name=name))\n\n @classmethod\n def _load_from_file(cls, path, cols, on_duplicate, lines_per_chunk=None, line_delimiter=None, post=None, **retry_args):\n from affine.model.load_data_infile import load_data_infile\n load_data_infile(cls.__tablename__, path, cols, on_duplicate, lines_per_chunk, line_delimiter=line_delimiter, post=post, **retry_args)\n\n @classmethod\n def create(cls, **kw):\n obj = cls(**kw)\n session.add(obj)\n session.flush()\n return obj\n\n def __init__(self, **kw):\n _declarative_constructor(self, **kw)\n if Base.auto_add:\n session.add(self)\n\n @property\n def errors(self):\n self._sav_validate(self, 'before_flush')\n return self.validation_errors\n\n\nBase = declarative_base(cls=Base, constructor=Base.__init__, metadata=metadata)\n","sub_path":"affine/model/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"162605432","text":"import logging\nimport sys\nimport json\n\nfrom datetime import datetime, timedelta\nfrom dateutil import parser\n\nimport CloverToXML\n\nfrom RitaStore import RitaStore\n\n\n#Set up the logger\nlogFileName = \"manager.log\"\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n\nfileHandler = logging.FileHandler(logFileName)\nfileHandler.setLevel(logging.DEBUG)\nfileHandler.setFormatter(formatter)\n\nstreamHandler = logging.StreamHandler()\nstreamHandler.setLevel(logging.ERROR)\nstreamHandler.setFormatter(formatter)\n\nlogger = logging.getLogger(logFileName)\nlogger.setLevel(logging.DEBUG)\nlogging.FileHandler(logFileName).setLevel(logging.DEBUG)\nlogging.basicConfig(fileName=logFileName, level=logging.DEBUG)\nlogger.addHandler(fileHandler)\nlogger.addHandler(streamHandler)\n\n\n#Our default stores\nstore1078 = RitaStore(1078, merchantID=\"CNH7H10A6ACV4\", authToken=\"f4551875dcfbae15de157fb6b55bb685\")\nstore1097 = RitaStore(1097, merchantID=\"15mf5fehcgfjc\", authToken=\"b0627954458edaf69bbaca7f67c6c306\")\nstore1044 = RitaStore(1044, merchantID=\"QZ9B6KSFXBXTY\", authToken=\"c007da1b-0c5c-b018-91c8-73311562a11b\")\n\ndefaultStores = []\ndefaultStores.append(store1078)\ndefaultStores.append(store1097)\ndefaultStores.append(store1044)\n\n\n#If we have a dictionary parameter, parse it\nif len(sys.argv) > 1:\n logger.info(\"Got commandline parameter: \" + sys.argv[1])\n\n try: \n parameters = json.loads(sys.argv[1])\n except:\n logger.error(\"Unable to parse commandline parameter: \" + sys.argv[1])\n raise ValueError(\"Unable to parse commandline parameter\")\n\n#No dictionary parameter, create our default values\nelse:\n logger.info(\"No JSON Dictionary parameter found in commandline\")\n\n parameters = {}\n stores = []\n for store in defaultStores:\n storeJSON = {\n \"api_token\": store.authToken,\n \"merchant_id\": store.merchantID,\n \"store_id\": store.outletID\n }\n stores.append(storeJSON)\n\n parameters[\"stores\"] = stores\n parameters[\"yesterday\"] = True\n\n\n#Check to see if we have stores\ntry:\n stores = parameters[\"stores\"]\nexcept:\n logger.error(\"No stores found in commandline parameter: \" + sys.argv[1])\n raise ValueError(\"No stores found in commandline parameter: \" + sys.argv[1])\n\n\n#Check our default save location\nsaveLocation = \"\"\nif \"save_location\" in parameters:\n saveLocation = parameters[\"save_location\"] + \"/\"\n\n\n#For holding the day increments to look at \ndays = []\n\n#If yesterday, just add 1; yesterday = today - 1\nif \"yesterday\" in parameters and bool(parameters[\"yesterday\"]):\n days.append(1)\n logger.info(\"Getting from yesterday\")\n\n\n#If we're given start/end days\nelif \"startday\" in parameters and \"endday\" in parameters:\n logger.info(\"Found start and end day:\" + parameters[\"startday\"] + \"\\t\" + parameters[\"endday\"])\n\n try:\n startday = parser.parse(parameters[\"startday\"])\n endday = parser.parse(parameters[\"endday\"])\n today = datetime.now().replace(hour=0, minute=0, second=0)\n\n daysBetween = (endday - startday).days\n daysSinceStart = (today - startday).days\n logger.info(\"Days between: \" + str(daysBetween))\n\n for i in range(0, abs(daysBetween) + 1):\n days.append(daysSinceStart - i)\n\n except:\n logger.error(\"Error parsing start and end day: \" + parameters)\n raise ValueError(\"Error parsing start and end day: \" + parameters)\n\n\n#No days given, so it's today\nelse:\n days.append(0)\n logger.info(\"Getting from today\")\n\n\n#For each store\nfor store in stores:\n\n #Get our authentication information\n try:\n api_token = store[\"api_token\"]\n except:\n logger.exception(\"No API Token found\" + str(store))\n sys.exit(1)\n\n try:\n merchant_id = store[\"merchant_id\"]\n except:\n logger.exception(\"No merchant ID found\" + str(store))\n sys.exit(1)\n\n try:\n store_id = store[\"store_id\"]\n except:\n logger.exception(\"No store ID found\" + str(store))\n sys.exit(1) \n\n\n #Create our store object\n ritaStore = RitaStore(store_id, merchantID=merchant_id, authToken=api_token)\n logger.info(\"Store: \" + json.dumps(store, indent=4))\n\n\n #For each day (in our range)\n for day in days:\n\n #Get the orders for that store on that day \n xmlRoot, fileName = CloverToXML.convertToXML(ritaStore, logger=logger, day=day)\n\n #Really long string with XML data formatted - can do anything with this\n xmlString = CloverToXML.prettify(xmlRoot)\n\n saveToFile = True\n if saveToFile:\n fileLocation = saveLocation + fileName\n CloverToXML.saveToFile(xmlRoot, fileName)\n logger.info(\"Successfully saved to \" + fileName)\n else:\n print(xmlString)\n","sub_path":"CloverApi/Manager.py","file_name":"Manager.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"205937274","text":"import itertools as _itertools\nimport functools as _functools\nfrom collections import namedtuple as _namedtuple\nimport numpy as _np\n\ndef _new_cost_matrix(ratio):\n levels = [\"a\", \"c\", \"g\", \"t\"]\n transversion_cost = ratio\n ans = {key:transversion_cost for key in _itertools.product(levels, levels)}\n for level in levels:\n ans[(level, level)] = 0\n transitions = [(\"a\", \"g\"), (\"g\", \"a\"), (\"c\", \"t\"), (\"t\", \"c\")]\n for each in transitions:\n ans[each] = 1\n return ans\n\nCOST_MATRIX = _new_cost_matrix(1.2)\n\nIUPAC_MAP = {\n \"a\": [\"a\"],\n \"c\": [\"c\"],\n \"g\": [\"g\"],\n \"t\": [\"t\"],\n \"m\": [\"a\", \"c\"],\n \"r\": [\"a\", \"g\"],\n \"w\": [\"a\", \"t\"],\n \"s\": [\"c\", \"g\"],\n \"y\": [\"c\", \"t\"],\n \"k\": [\"g\", \"t\"],\n \"v\": [\"a\", \"c\", \"g\"],\n \"h\": [\"a\", \"c\", \"t\"],\n \"d\": [\"a\", \"g\", \"t\"],\n \"b\": [\"c\", \"g\", \"t\"],\n \"n\": [\"a\", \"c\", \"g\", \"t\"], # \"n\" is the rare case in the database.\n \"-\": [\"a\", \"c\", \"g\", \"t\"],\n}\n\nEDGE_LIST = [\n (\"ancestry1\", \"hg\" ),\n (\"ancestry1\", \"panTro\" ),\n (\"ancestry2\", \"ancestry1\" ),\n (\"ancestry2\", \"gorGor\" ),\n (\"ancestry3\", \"ancestry2\" ),\n (\"ancestry3\", \"ponAbe\" ),\n (\"ancestry4\", \"ancestry3\" ),\n (\"ancestry4\", \"rheMac\" ),\n]\n\nEdgeTuple = _namedtuple(\"EdgeTuple\", [ \"_\".join(edge_a_b) for edge_a_b in EDGE_LIST])\nNodeTuple = _namedtuple(\"NodeTuple\", ['hg', 'panTro', 'gorGor', 'ponAbe', 'rheMac',\n 'ancestry1', 'ancestry2', 'ancestry3', 'ancestry4'])\n\ndef mkNodeTuple(res):\n return NodeTuple._make((res[k] for k in NodeTuple._fields))\n\ndef specialize_ambiguous_changes(nodes):\n \"\"\"\n Sample nodes argument could be\n res = {\n 'chromosome': 'chr8',\n 'position': 60099,\n 'ref': 'A',\n 'sample_number': 62784,\n 'allele_number': 125568,\n 'hg': 'w', 'panTro': 'a', 'gorGor': 'a', 'ponAbe': 'a', 'rheMac': 'a',\n 'ancestry1': 'a', 'ancestry2': 'a', 'ancestry3': 'a', 'ancestry4': 'a'\n }\n nodes = NodeTuple._make((res[k] for k in NodeTuple._fields))\n \"\"\"\n assert isinstance(nodes, NodeTuple)\n ambiguous_tree = {key: IUPAC_MAP[getattr(nodes, key)] for key in NodeTuple._fields}\n specialized_values = _itertools.product(*ambiguous_tree.values())\n specialized_keys = ambiguous_tree.keys()\n\n min_score = None\n maximum_parsimony_trees = None\n for values in specialized_values:\n specialized_tree = dict(zip(specialized_keys, values))\n parsimony_score = _calculate_parsimony_score(specialized_tree)\n if min_score is None or parsimony_score < min_score:\n min_score = parsimony_score\n maximum_parsimony_trees = [specialized_tree]\n elif parsimony_score == min_score:\n maximum_parsimony_trees.append(specialized_tree)\n return maximum_parsimony_trees\n\n# Argument is a specialized tree\ndef _calculate_parsimony_score(tree):\n final_score = 0\n for a, b in EDGE_LIST:\n code_a, code_b = tree[a], tree[b]\n change_score = COST_MATRIX[(code_a, code_b)]\n final_score += change_score\n return final_score\n\n# Argument is a specialized tree\ndef _calculate_num_changes(tree):\n s = (int(tree[a]!=tree[b]) for a, b in EDGE_LIST)\n return EdgeTuple._make(s)\n\n@_functools.lru_cache(maxsize=None)\ndef stat_edge_changes(nodes):\n trees = specialize_ambiguous_changes(nodes)\n change_tuples = [ _calculate_num_changes(tree) for tree in trees]\n return EdgeTuple._make(_np.mean(change_tuples, 0))\n","sub_path":"lib/ParsimonyInfer.py","file_name":"ParsimonyInfer.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"561510580","text":"import random\nfrom abc import ABC, abstractmethod\nfrom typing import TypeVar, List\n\nfrom custom.instance import Instance, PspInstance\nfrom custom.interval import Interval\nfrom jmetal.core.problem import Problem\nfrom jmetal.core.solution import FloatSolution, BinarySolution\n\nS = TypeVar('S')\n\n\nclass GDProblem(Problem[S], ABC):\n def __init__(self, instance_: Instance):\n super(GDProblem, self).__init__()\n self.instance_ = instance_\n self.number_of_variables = instance_.n_var\n self.number_of_objectives = instance_.n_obj\n self.number_of_constraints = instance_.n_constraints\n self.models = self.instance_.attributes['models']\n self.objectives_type = self.number_of_objectives * [False] # Minimization\n\n def get_preference_model(self, dm: int):\n return self.models[dm]\n\n @abstractmethod\n def generate_existing_solution(self, variables) -> [S]:\n pass\n\n\nclass BinaryProblemGD(GDProblem[BinarySolution], ABC):\n \"\"\" Class representing binary problems. \"\"\"\n\n def __init__(self, instance_):\n super(BinaryProblemGD, self).__init__(instance_)\n self.number_of_bits = instance_.n_var\n self.number_of_variables = 1\n\n def create_solution(self) -> BinarySolution:\n new_solution = BinarySolution(number_of_variables=self.number_of_variables,\n number_of_objectives=self.number_of_objectives)\n\n new_solution.variables[0] = \\\n [True if random.randint(0, 1) == 0 else False for _ in range(\n self.number_of_bits)]\n\n return new_solution\n\n def generate_existing_solution(self, variables: str) -> BinarySolution:\n new_solution = BinarySolution(number_of_variables=self.number_of_variables,\n number_of_objectives=self.number_of_objectives)\n new_solution.variables[0] = \\\n [True if variables[_] == '1' else False for _ in range(\n self.number_of_bits)]\n self.evaluate(new_solution)\n return new_solution\n\n\nclass FloatProblemGD(GDProblem[FloatSolution], ABC):\n \"\"\" Class representing float problems. \"\"\"\n\n def __init__(self, instance_):\n super(FloatProblemGD, self).__init__(instance_)\n self.lower_bound = []\n self.upper_bound = []\n\n def create_solution(self) -> FloatSolution:\n new_solution = FloatSolution(\n self.lower_bound,\n self.upper_bound,\n self.number_of_objectives,\n self.number_of_constraints)\n new_solution.variables = \\\n [random.uniform(self.lower_bound[index_var] * 1.0, self.upper_bound[index_var] * 1.0) for index_var in\n range(self.number_of_variables)]\n\n return new_solution\n\n def generate_existing_solution(self, variables: List[float], is_objectives: bool = False) -> FloatSolution:\n new_solution = FloatSolution(\n self.lower_bound,\n self.upper_bound,\n self.number_of_objectives,\n self.number_of_constraints)\n if not is_objectives:\n new_solution.variables = [variables[index_var] for index_var in range(self.number_of_variables)]\n self.evaluate(new_solution)\n else:\n new_solution.objectives = [variables[index_var] for index_var in range(self.number_of_objectives)]\n return new_solution\n\n\nclass PortfolioSocialProblem(BinaryProblemGD):\n def __init__(self, instance_: PspInstance):\n super(PortfolioSocialProblem, self).__init__(instance_)\n self.budget = instance_.budget\n\n def evaluate(self, solution: BinarySolution) -> BinarySolution:\n budget = 0\n objectives = self.number_of_objectives * [0.0]\n\n for index, bits in enumerate(solution.variables[0]):\n if bits:\n budget += self.instance_.projects[index][0]\n for obj in range(0, self.number_of_objectives):\n objectives[obj] += self.instance_.projects[index][obj + 3]\n solution.objectives = [-obj for obj in objectives]\n\n solution.constraints = [self.budget - budget]\n return solution\n\n def get_name(self) -> str:\n return 'PortfolioSocialProblem'\n\n\nclass PortfolioSocialProblemGD(BinaryProblemGD):\n def __init__(self, instance_: PspInstance):\n super(PortfolioSocialProblemGD, self).__init__(instance_)\n self.budget = instance_.budget\n self.positions = [idx for idx in range(self.number_of_bits)]\n self.objectives_type = self.number_of_objectives * [True]\n\n def create_solution(self) -> BinarySolution:\n new_solution = BinarySolution(number_of_variables=self.number_of_variables,\n number_of_objectives=self.number_of_objectives)\n\n new_solution.variables[0] = []\n budget = Interval(0)\n random.shuffle(self.positions)\n new_solution.variables[0] = self.number_of_bits * [False]\n for v in self.positions:\n tmp = budget + self.instance_.projects[v][0]\n poss = self.budget.poss_greater_than_or_eq(tmp)\n if poss >= self.get_preference_model(0).chi:\n new_solution.variables[0][v] = True\n budget = tmp\n return new_solution\n\n def create_from_string(self, variables: str) -> BinarySolution:\n new_solution = BinarySolution(number_of_variables=self.number_of_variables,\n number_of_objectives=self.number_of_objectives)\n\n new_solution.variables[0] = \\\n [True if variables[_] == '1' else False for _ in range(\n self.number_of_bits)]\n return new_solution\n\n def evaluate(self, solution: BinarySolution) -> BinarySolution:\n current_budget = Interval(0)\n objectives = self.number_of_objectives * [Interval(0)]\n for index, bits in enumerate(solution.variables[0]):\n if bits:\n current_budget += self.instance_.projects[index][0]\n for obj in range(0, self.number_of_objectives):\n objectives[obj] += self.instance_.projects[index][obj + 1]\n poss = self.budget.poss_greater_than_or_eq(current_budget)\n if poss < self.get_preference_model(0).chi:\n solution.constraints = [self.budget - current_budget]\n else:\n solution.constraints = [0]\n solution.budget = current_budget\n solution.objectives = objectives\n return solution\n\n def get_name(self) -> str:\n return 'PortfolioSocialProblemGD'\n","sub_path":"custom/gd_problems.py","file_name":"gd_problems.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"8919402","text":"'''\n题目:\n把一个数组最开始的若干个元素搬到数组的末尾,我们称之为数组的旋转。输入一个非递减排序的数组的一个旋转,输出旋转数组的\n最小元素。例如数组{3,4,5,1,2}为{1,2,3,4,5}的一个旋转,该数组的最小值为1。\n'''\n\n\nclass Solution:\n \"\"\"\n @param nums: a rotated sorted array\n @return: the minimum number in the array\n \"\"\"\n\n def findMin(self, nums):\n # write your code here\n if not isinstance(nums, list):\n raise TypeError(\"Numbers is not a list!\")\n if len(nums) == 0:\n raise ValueError(\"Numbers is empty!\")\n index1 = 0\n index2 = len(nums) - 1\n index_mid = index1\n while nums[index1] >= nums[index2]:\n if index2 - index1 == 1:\n index_mid = index2\n break\n index_mid = (index1 + index2) // 2\n if nums[index1] == nums[index2] and nums[index1] == nums[index_mid]:\n result = nums[index1]\n for i in range(index1 + 1, len(nums)):\n if result >= nums[i]:\n result = nums[i]\n return result\n if nums[index_mid] >= nums[index1]:\n index1 = index_mid\n elif nums[index_mid] <= nums[index2]:\n index2 = index_mid\n return nums[index_mid]\n","sub_path":"剑指Offer/11-旋转数组的最小数字.py","file_name":"11-旋转数组的最小数字.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"92596899","text":"def read_file():\n with open('input.txt', 'r') as reader:\n instructions = [[line[0],int(line[1:])] for line in reader.read().split('\\n')[:-1]]\n return instructions\n\ndef get_manhattan_distace(instructions):\n x = 0\n y = 0\n facing = 90\n for command, value in instructions:\n if command == 'N': y -= value\n elif command == 'S': y += value\n elif command == 'E': x += value\n elif command == 'W': x -= value\n elif command == 'L': facing = (360 + facing - value) % 360\n elif command == 'R': facing = (360 + facing + value) % 360\n elif command == 'F':\n if facing == 90: x += value\n elif facing == 180: y += value\n elif facing == 270: x -= value\n elif facing == 0: y -= value\n return abs(x) + abs(y)\n\ninstructions = read_file()\ndistance = get_manhattan_distace(instructions)\nprint(distance)\n","sub_path":"day12/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"43235092","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.11-x86_64/egg/tests/unit/test_client.py\n# Compiled at: 2015-12-08 14:33:04\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom pypermedia.client import HypermediaClient, ConnectError\nimport mock, requests, unittest2\n\nclass TestClient(unittest2.TestCase):\n \"\"\"\n This is kinda shit since it really\n needs to be integration tested.\n \"\"\"\n\n def test_connect(self):\n builder = mock.MagicMock()\n request_factory = mock.MagicMock()\n session = mock.MagicMock()\n resp = HypermediaClient.connect(b'blah', session=session, request_factory=request_factory, builder=builder)\n self.assertEqual(builder.return_value.from_api_response.return_value.as_python_object.return_value, resp)\n\n def test_send_and_construct(self):\n builder = mock.MagicMock()\n request_factory = mock.MagicMock()\n session = mock.MagicMock()\n request = mock.Mock(url=b'url')\n resp = HypermediaClient.send_and_construct(request, session=session, request_factory=request_factory, builder=builder)\n self.assertEqual(builder.return_value.from_api_response.return_value.as_python_object.return_value, resp)\n\n def test_send_and_construct_error(self):\n request = mock.Mock(url=b'url')\n session = mock.Mock(send=mock.Mock(side_effect=requests.exceptions.ConnectionError))\n self.assertRaises(ConnectError, HypermediaClient.send_and_construct, request, session=session)","sub_path":"pycfiles/pypermedia-0.4.2-py2.7/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"648230855","text":"import sqlite3\nfrom bottle import route, run\nfrom ipwhois import IPWhois\n\n\nconn = sqlite3.connect('ips.db')\n\n\ndef get_whois_data(ip):\n \"\"\"Extract company and IPS out form whois response body\"\"\"\n whois_nets_data = IPWhois(ip).lookup().get('nets')[0]\n c_name = whois_nets_data.get('name')\n isp = whois_nets_data.get('description')\n return ip, c_name, isp\n\n\ndef cache_ip_up(ip, c_name, isp):\n \"\"\"Caches whois data into sqlite3\"\"\"\n c = conn.cursor()\n c.execute('INSERT INTO ip_cache VALUES (?,?,?)', (ip, c_name, isp))\n conn.commit()\n return True\n\n\ndef ip_cache_check_up(ip):\n \"\"\"Detects does this ip was cached already\"\"\"\n c = conn.cursor()\n c.execute('SELECT company, isp FROM ip_cache WHERE ip=?', (ip,))\n ip_cached = c.fetchone()\n if ip_cached:\n return ip, ip_cached[0], ip_cached[1]\n else:\n return False\n\n\n@route('/')\ndef ipwhois(ip):\n ip_cached_data = ip_cache_check_up(ip)\n if ip_cached_data:\n ip, c_name, isp = ip_cached_data\n else:\n ip, c_name, isp = get_whois_data(ip.strip())\n cache_ip_up(ip, c_name, isp)\n return {\"ip\": ip, \"Company\": c_name, \"ISP\": isp}\n\nrun(host='localhost', port=8080, reloader=True, debug=True)\n","sub_path":"ip2org_api.py","file_name":"ip2org_api.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"77265729","text":"def hasThe(userInput) -> bool:\r\n\r\n userInput.lower()\r\n userList = userInput.split()\r\n for item in userList:\r\n if item == 'the':\r\n return True\r\n return False\r\n\r\n#-----------------------------\r\n\r\ndef main():\r\n\r\n userInput = input(\"Please enter your string: \")\r\n if hasThe(userInput):\r\n print(\"This statement has the in it\")\r\n else:\r\n print(\"This statement does not have the in it\")\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Chapter7/day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"208594698","text":"from keras.datasets import mnist #导入数据集\nfrom keras.utils import to_categorical #分类 one-hot编码\nfrom keras.models import Sequential #导入模型\nfrom keras.layers import Conv2D, MaxPool2D, Flatten, Dropout, Dense #导入卷积层、池化层\nfrom keras.losses import categorical_crossentropy #导入损失函数\nfrom keras.optimizers import Adadelta #导入优化器\n\n#数据预处理\ntrain_X, train_y = mnist.load_data()[0] #获取train_X,train_y\ntrain_X = train_X.reshape(-1, 28, 28, 1) #对train_X数据规范化\ntrain_X = train_X.astype('float32')\ntrain_X /= 255\ntrain_y = to_categorical(train_y, 10) #按照数字标签进行one_hot编码\n\n#模型训练\nmodel = Sequential() #使用keras.model库的Sequential方法实例化模型对象\nmodel.add(Conv2D(32, (5,5), activation='relu', input_shape=[28, 28, 1])) #向模型添加卷积层:卷积核的个数为32,卷积核大小为5*5,激活函数为relu,图的大小为28*28,通道为1个\nmodel.add(Conv2D(64, (5,5), activation='relu')) #向模型添加卷积层\nmodel.add(MaxPool2D(pool_size=(2,2))) #添加最大池化层、降采样\nmodel.add(Flatten()) #将模型中的数据矩阵展平\nmodel.add(Dropout(0.5)) #dropout随机丢弃一些神经元,防止过拟合\nmodel.add(Dense(128, activation='relu')) #添加全连接层\nmodel.add(Dropout(0.5)) #随机丢弃神经元\nmodel.add(Dense(10, activation='softmax')) #添加全连接层,使用relu作为激活函数\n\n#为模型指定损失函数、优化器、评判指标\nmodel.compile(loss=categorical_crossentropy,optimizer=Adadelta(),metrics=['accuracy'])\n\n#训练模型\n#设置批量梯度下降时的batch_size为100\n#遍历所有样本的次数epoch为8\nmodel.fit(train_X,train_y,batch_size=100,epochs=8)\n\n#模型评估\ntest_X, test_y = mnist.load_data()[1] #获取测试集数据\ntest_X = test_X.reshape(-1, 28, 28, 1) #test_X数据预处理\ntest_X = test_X.astype('float32')\ntest_X /= 255\ntest_y = to_categorical(test_y, 10) #对test_y进行one-hot编码\nloss, accuracy = model.evaluate(test_X, test_y, verbose=1) #使用测试集的数据进行模型评估、打印损失函数值和准确率\nprint('loss:%.4f accuracy:%.4f' %(loss, accuracy))\n","sub_path":"mnist/MNIST_data/cnn_mnist01.py","file_name":"cnn_mnist01.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"325171803","text":"from __future__ import absolute_import\nfrom typing import List\n\nfrom collections import OrderedDict\nimport importlib.util\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom . import Network as BaseNetwork\n\n\n## FIXME[todo]: check docstrings\n## FIXME[todo]: not tested yet!\n## FIXME[todo]: need to clean up\n\n## FIXME[todo]: cuda activation (if available)\n\n\nclass Network(BaseNetwork):\n \"\"\"\n A class implmeenting the network interface (BaseNetwork)\n to access feed forward networks implemented in (py)Torch.\n\n Some general remarks on (py)torch:\n\n * Torch convolution follows the channel first scheme, that is\n the shape of a 2D convolution is (batch, channel, height, width).\n\n * Torch does not store activation values. However, one can\n register hooks to be executed before or after the forward or\n backward propagation. These hooks can be use to access and\n store input and output values.\n\n * The nn.Module (layers) does not have a name. I store the\n key by which they have been registered in the parent Module\n under the property _name. These names will also function\n as layer_id to identify individual layers from the outside.\n \"\"\"\n\n _model : nn.Module\n _input_shapes : dict = None\n _output_shapes : dict = None\n _hooks : dict = {}\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Load Torch model.\n\n Parameters\n ----------\n model_file\n Path to the .h5 model file.\n \"\"\"\n\n i = 0\n data_loaded = False\n\n ##\n ## Try to get a model:\n ##\n\n if len(args) < 1:\n raise TypeError(\"TorchNetwork requires at least one argument\")\n\n if isinstance(args[i], nn.Module):\n self._model = args[i]\n i += 1\n elif isinstance(args[i], str) and args[i].endswith(\".pth.tar\"):\n self._model = torch.load(args[i])\n data_loaded = True\n i += 1\n elif isinstance(args[i], str) and args[i].endswith(\".py\"):\n spec = importlib.util.spec_from_file_location('torchnet', args[i])\n net_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(net_module)\n net_class_name = kwargs.get('network_class', 'Net')\n net_class = getattr(net_module, net_class_name)\n self._model = net_class()\n i += 1\n else:\n raise ValueError(\"Invalid arguments for constructing a TorchNetwork\")\n\n ##\n ## Load model parameter if specified:\n ##\n\n if i < len(args) and not data_loaded:\n if isinstance(args[i], str) and args[i].endswith(\".pth\"):\n self._model.load_state_dict(torch.load(args[i]))\n\n ## FIXME[todo]: set training/test state\n #self._model.train()\n #self._model.eval()\n\n ## FIXME[todo]: activate gpu support if available\n self._use_cuda = torch.cuda.is_available() and kwargs.get('use_cuda',\n True)\n # if self._use_cuda:\n # self._model.cuda()\n\n if 'input_shape' in kwargs:\n self._compute_layer_shapes(kwargs['input_shape'])\n\n\n def _compute_layer_shapes(self, input_shape : tuple) -> None:\n \"\"\"Compute the input and output shapes of all layers.\n The shapes are determined by probagating some dummy input through\n the network.\n\n input_shape:\n The shape of an input sample. May or may not include batch (B)\n or channel (C) dimension. If so, channel should be last, i.e.\n (N,H,W,C)\n \"\"\"\n\n input_shape = self._canonical_input_shape(input_shape)\n ## Torch convolution follows the channel first scheme, that is\n ## the shape of a 2D convolution is (batch, channel, height, width).\n torch_input_shape = tuple(input_shape[_] for _ in [0,3,1,2])\n\n self._input_shapes = {}\n self._output_shapes = {}\n self._prepare_hooks(self._shape_hook)\n self._model(Variable(torch.zeros(*torch_input_shape), volatile=True))\n self._remove_hooks()\n\n\n def _get_number_of_input_channels(self) -> int:\n \"\"\"Get the number of input channels for this network.\n This is the number of channels each input given to the network\n should have. Usually this coincides with the number of\n channels in the first layer of the network.\n\n Returns\n -------\n int\n The number of input channels or 0 if the network does not\n have input channels.\n \"\"\"\n first = self._get_first_layer()\n return first.in_channels if self.layer_is_convolutional(first) else 0\n\n\n def _prepare_hooks(self, hook, layer_ids = None) -> None:\n\n if layer_ids is None:\n layer_ids = self.layer_ids\n\n for id in layer_ids:\n module = self._model._modules[id]\n module._name = id # FIXME[hack]: how to acces the module name?\n self._hooks[id] = module.register_forward_hook(hook)\n\n def _remove_hooks(self, layer_ids = None) -> None:\n if layer_ids is None:\n layer_ids = list(self._hooks.keys())\n for id in layer_ids:\n self._hooks[id].remove()\n del self._hooks[id]\n\n\n def _shape_hook(self, module, input, output):\n name = module._name # FIXME[hack]: how to acces the module name?\n # input[0].size() will be (N, C, H, W) -> store (H ,W, C)\n input_shape = input[0].size()\n self._input_shapes[name] = (*input_shape[2:],input_shape[1])\n # output.size() will be (N, C, H, W) -> store (C, H ,W)\n output_shape = output.size()\n self._output_shapes[name] = (*output_shape[2:],output_shape[1])\n\n def _activation_hook(self, module, input, output):\n name = module._name # FIXME[hack]: how to acces the module name?\n # output is a torch.autograd.variable.Variable,\n # output.data holds the actual data\n # FIXME[quesion]: it is said in the documentation, that\n # the TorchTensor and the numpy array share the same memory.\n # However, the TorchTensor may be removed after usage (is this true?)\n # what will then happen with the numpy array? do we need to\n # copy or is that a waste of resources?\n self._activations[name] = output.data.numpy().copy()\n if len(output.data.size()) == 4: # convolution, (N,C,H,W)\n self._activations[name] = self._activations[name].transpose(0,2,3,1)\n\n\n def _get_layer(self, layer_id) -> nn.Module:\n \"\"\"Get a torch Module representing the layer for the given identifier.\n\n Parameters\n ----------\n layer_id:\n Identifier of a layer in this network.\n\n Returns\n -------\n The layer for the given identifier.\n \"\"\"\n return (layer_id if isinstance(layer_id,torch.nn.Module)\n else self._model._modules[layer_id])\n\n\n def _get_first_layer(self) -> nn.Module:\n \"\"\"Get a torch Module representing the first layer of this network.\n\n Returns\n -------\n nn.Module\n The first layer of this network.\n \"\"\"\n return self._get_layer(self.layer_ids[0])\n\n\n @property\n def layer_ids(self) -> list:\n \"\"\"Get list of layer ids. These ids can be used to access layers via\n this Network API.\n\n Returns\n -------\n The list of layer identifiers.\n \"\"\"\n return list(self._model._modules.keys())\n\n\n def layer_is_convolutional(self, layer_id) -> bool:\n \"\"\"Check if the given layer is a convolutional layer. If so,\n additional information can be obtained by the methods\n get_layer_kernel_size, get_layer_input_channels,\n get_layer_output_channels, get_layer_stride,\n get_layer_padding, get_layer_output_padding, and\n get_layer_dilation.\n\n Parameters\n ----------\n layer:\n Identifier of a layer in this network.\n\n Returns\n -------\n bool\n True for convolutional layers, else False.\n\n \"\"\"\n return isinstance(self._get_layer(layer_id),nn.modules.conv.Conv2d)\n\n\n def get_layer_kernel_size(self, layer_id) -> int:\n \"\"\"The size of the kernel in a cross-correlation/convolution\n operation. This is just the spatial extension and does not\n include the number of channels.\n\n Parameters\n ----------\n layer_id:\n Identifier of a convolutional layer in this network.\n\n Raises\n ------\n ValueError:\n If the layer_id fails to identify a convolutional layer.\n \"\"\"\n layer = self._get_layer(layer_id)\n self._check_layer_is_convolutional(layer)\n return layer.kernel_size\n\n\n def get_layer_input_channels(self, layer_id) -> int:\n \"\"\"The number of input channels for a cross-correlation/convolution\n operation.\n\n Parameters\n ----------\n layer_id:\n Identifier of a convolutional layer in this network.\n\n Raises\n ------\n ValueError:\n If the layer_id fails to identify a convolutional layer.\n \"\"\"\n layer = self._get_layer(layer_id)\n self._check_layer_is_convolutional(layer)\n return layer.in_channels\n\n\n def get_layer_output_channels(self, layer_id) -> int:\n \"\"\"The number of output channels for a cross-correlation/convolution\n operation.\n\n Parameters\n ----------\n layer_id:\n Identifier of a convolutional layer in this network.\n\n Raises\n ------\n ValueError:\n If the layer_id fails to identify a convolutional layer.\n \"\"\"\n layer = self._get_layer(layer_id)\n self._check_layer_is_convolutional(layer)\n return layer.out_channels\n\n\n def get_layer_stride(self, layer_id) -> (int, int):\n \"\"\"The stride for the cross-correlation/convolution operation.\n\n Parameters\n ----------\n layer_id:\n Identifier of a convolutional layer in this network.\n\n Raises\n ------\n ValueError:\n If the layer_id fails to identify a convolutional layer.\n\n \"\"\"\n layer = self._get_layer(layer_id)\n self._check_layer_is_convolutional(layer)\n return layer.stride\n\n\n def get_layer_padding(self, layer_id) -> (int,int):\n \"\"\"The padding for the cross-correlation/convolution operation, i.e,\n the number of rows/columns (on both sides) by which the input\n is extended (padded with zeros) before the operation is\n applied.\n\n Parameters\n ----------\n layer_id:\n Identifier of a convolutional layer in this network.\n\n Raises\n ------\n ValueError:\n If the layer_id fails to identify a convolutional layer.\n \"\"\"\n layer = self._get_layer(layer_id)\n self._check_layer_is_convolutional(layer)\n return layer.padding\n\n\n def get_layer_output_padding(self, layer_id) -> (int,int):\n \"\"\"The output padding for the cross-correlation/convolution operation.\n\n Parameters\n ----------\n layer_id:\n Identifier of a convolutional layer in this network.\n\n Raises\n ------\n ValueError:\n If the layer_id fails to identify a convolutional layer.\n \"\"\"\n layer = self._get_layer(layer_id)\n self._check_layer_is_convolutional(layer)\n return layer.output_padding\n\n\n def get_layer_dilation(self, layer_id) -> (int, int):\n \"\"\"The dilation for the cross-correlation/convolution operation, i.e,\n the horizontal/vertical offset between adjacent filter\n rows/columns.\n\n Parameters\n ----------\n layer_id:\n Identifier of a convolutional layer in this network.\n\n Raises\n ------\n ValueError:\n If the layer_id fails to identify a convolutional layer.\n \"\"\"\n layer = self._get_layer(layer_id)\n self._check_layer_is_convolutional(layer)\n return layer.dilation\n\n\n def get_layer_input_shape(self, layer_id) -> tuple:\n \"\"\"\n Give the shape of the input of the given layer.\n\n Parameters\n ----------\n layer_id\n\n Returns\n -------\n (units) for dense layers\n (height, width, channels) for convolutional layers\n\n Raises\n ------\n RuntimeError\n If the network shape is not determine yet.\n \"\"\"\n if self._input_shapes is None:\n raise RuntimeError(\"Network shapes have not been fixed yet.\")\n return self._input_shapes[layer_id]\n\n\n def get_layer_output_shape(self, layer_id) -> tuple:\n \"\"\"\n Give the shape of the output of the given layer.\n\n Parameters\n ----------\n layer_id\n\n Returns\n -------\n (units) for dense layers\n (height, width, channels) for convolutional layers\n\n Raises\n ------\n RuntimeError\n If the network shape is not determine yet.\n \"\"\"\n if self._output_shapes is None:\n raise RuntimeError(\"Network shapes have not been fixed yet.\")\n return self._output_shapes[layer_id]\n\n\n def get_layer_weights(self, layer_id) -> np.ndarray:\n \"\"\"\n Returns weights INCOMING to the\n layer of the model\n shape of the weights variable should be\n coherent with the get_layer_output_shape function.\n\n Parameters\n ----------\n layer_id :\n An identifier for a layer.\n\n Returns\n -------\n ndarray\n Weights of the layer. For convolutional layers this will\n be (H,W,C_in,C_out)\n\n \"\"\"\n layer = self._get_layer(layer_id)\n weights = layer.weight.data.numpy()\n if self.layer_is_convolutional(layer):\n weights = weights.transpose(2,3,0,1)\n return weigths\n\n\n def get_layer_biases(self, layer_id) -> np.ndarray:\n \"\"\"\n Returns weights INCOMING to the\n layer of the model\n shape of the weights variable should be\n coherent with the get_layer_output_shape function.\n\n Parameters\n ----------\n layer_id :\n An identifier for a layer.\n\n Returns\n -------\n ndarray\n Bias values for the layer. For convolutional layers this will\n be one bias value per (output) channel.\n \"\"\"\n layer = self._get_layer(layer_id)\n biases = layer.bias.data.numpy()\n return biases\n\n\n\n def get_activations(self, layer_ids, input_samples: np.ndarray) -> list:\n \"\"\"Gives activations values of the network/model\n for a given layername and an input (inputsample).\n\n Parameters\n ----------\n layer_ids: The layer(s) the activations should be fetched for,\n either an atomic id or a list of ids.\n input_samples:\n Array of samples the activations should be computed for.\n Default format is 4D: (batch,width,height,channel)\n If batch or channel is 1, it can be omitted.\n\n Returns\n -------\n np.ndarray\n\n \"\"\"\n ## We need to know the network input shape to get a cannonical\n ## representation of the input_samples.\n if self._input_shapes is None or self._output_shapes is None:\n self._compute_layer_shapes(input_samples.shape)\n\n _layer_ids, _input_samples = \\\n super().get_activations(layer_ids, input_samples)\n\n ## pytorch expects channel first (N,C,H,W)\n # FIXME[concept]: we may want to directly get the preferred order\n # from the _canonical_input_data method!\n _input_samples = _input_samples.transpose((0,3,1,2))\n\n torch_samples = torch.from_numpy(_input_samples)\n torch_input = Variable(torch_samples, volatile=True)\n\n ## FIXME[todo]: use GPU\n # if self._use_cuda:\n # torch_input = torch_input.cuda()\n\n ## prepare to record the activations\n self._activations = {}\n self._prepare_hooks(self._activation_hook, _layer_ids)\n\n torch_output = self._model(torch_input)\n\n self._remove_hooks(layer_ids)\n\n ## if no batch data was provided, remove batch dimension from\n ## activations\n if (_input_samples.shape[0] == 1\n and len(input_samples.shape) < 4\n and input_samples.shape[0] != 1):\n for id in self._activations.keys():\n self._activations[id] = self._activations[id].squeeze(0)\n\n ## if a single layer_id was given (not a list), then just return\n ## a single activtion array\n return ([self._activations[_] for _ in layer_ids]\n if isinstance(layer_ids, list)\n else self._activations[layer_ids])\n\n\n\n\n\n\n\n\n","sub_path":"network/torch.py","file_name":"torch.py","file_ext":"py","file_size_in_byte":17062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"389318263","text":"# print fields\ndef print_field(field):\n print('+---+---+---+---+')\n print('| | 1 | 2 | 3 |')\n print('+---+---+---+---+')\n print('| 1 | %s |' % ' | '.join(field[0]))\n print('+---+---+---+---+')\n print('| 2 | %s |' % ' | '.join(field[1]))\n print('+---+---+---+---+')\n print('| 3 | %s |' % ' | '.join(field[2]))\n print('+---+---+---+---+')\n\n\n# check input values or row and column of user\ndef check_range(x, y):\n try:\n x = int(x)\n y = int(y)\n if 1 <= x <= 3 and 1 <= y <= 3: # проще записать 1 <= x <= 3 and 1 <= y <= 3\n return True\n else:\n return False\n except:\n return False\n\n\n# check, if corresponding cell is available\ndef make_turn(field, row_val, col_val, val):\n if field[row_val - 1][col_val - 1] != ' ':\n return False\n else:\n field[row_val - 1][col_val - 1] = val\n return True\n\n\ndef check_field(field, symbol):\n if check_row(field, symbol):\n return True\n elif check_cols(field, symbol):\n return True\n else:\n return check_diagonals(field, symbol)\n\n\ndef check_row(field, symbol):\n result = False\n for i in range(len(field)):\n for j in range(len(field[i])):\n if not result:\n if field[i][j] != symbol:\n break\n else:\n if j == 2:\n result = True\n print(\"Row is filled with %s!\" % symbol)\n else:\n break\n return result\n\n\ndef check_cols(field, symbol):\n result = False\n for i in range(len(field)):\n for j in range(len(field[i])):\n if not result:\n if field[j][i] != symbol:\n break\n else:\n if j == 2:\n result = True\n print(\"Column is filled with %s!\" % symbol)\n else:\n break\n\n return result\n\n\ndef check_diagonals(field, symbol):\n result = False\n\n # check first diagonal\n for i in range(3):\n if field[i][i] != symbol:\n break\n else:\n if i == 2:\n result = True\n print(\"Diagonal is filled with %s!\" % symbol)\n break\n\n if not result:\n # check second diagonal\n k = 2\n for i in range(3):\n if field[k][i] != symbol:\n break\n else:\n k -= 1\n if i == 2:\n result = True\n print(\"Diagonal is filled with %s!\" % symbol)\n\n return result\n","sub_path":"hw2/game/game_krestiki_noliki/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"472858544","text":"from listaExercicio.uteis.util import Util\n\ndef main():\n Util().enunciadoEmDuasPartes('LEIA UMA TEMPERATURA EM GRAUS KELVIN E APRESENTE-A CONVERTIDA EM GRAUS CELSIUS. ',\n 'A FÓRMULA DE CONVERSÃO É : C = K − 273 . 15 , SENDO C A TEMPERATURA EM CELSIUS E K A TEMPERATURA EM KELVIN.', 180)\n kelvin = round(float(input(f'Digite a temperatura em Kelvin: ').replace(',', '.')), 2)\n celsius = round(kelvin-237.15, 2)\n print('\\nTemperatura digitada: ', kelvin)\n print('Temperatura em Celsius: ', celsius)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"exercicio07/exercicio07.py","file_name":"exercicio07.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"374069296","text":"######## \n### plot_radvQ2max_radcorr.py\n### Plot radius vs Q2max using different schemes for radiative corrections.\n########\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nfrom matplotlib.ticker import FixedLocator, MultipleLocator, FormatStrFormatter\n\n# Use LaTeX font.\nplt.rc('text', usetex=True)\nplt.rc('font',**{'family':'serif','serif':['Computer Modern Roman'],'size':20})\n\n# Set parameters and folder locations.\nkmax = '12'\nbnd = '5'\nfitdata = 'mainz' # dataset used\nfolder = '../'+fitdata+'_leastsq/z'+kmax+'/gb'+bnd+'/' # folder containing central fits\nfolder_radcorr = '../'+fitdata+'_leastsq/z'+kmax+'/radcorr_gb'+bnd+'/' # folder containing fits with other radiative corrections\nstart = 1 # index of Q2max array to start plotting (Mainz, world): (1,0) for 0.05 GeV^2, (5,3) for 0.2 GeV^2\n\n# Define arrays for data.\nQ2max = []\nerad_fit = []\nerad_non = []\nerad_dip = []\nerad_oth = [] # Fesh for world, world+pol, Blun for Mainz\nmrad_fit = []\nmrad_non = []\nmrad_dip = []\nmrad_oth = [] # Fesh for world, world+pol, Blun for Mainz\nchi2_fit = []\nchi2_non = []\nchi2_dip = []\nchi2_oth = [] # Fesh for world, world+pol, Blun for Mainz\n\n# Extract results from text files.\nfor l in open(folder+'results_'+fitdata+'_leastsq_z'+kmax+'_gb'+bnd+'.txt'):\n values = l.split()\n Q2max.append(float(values[0]))\n erad_fit.append(float(values[1]))\n mrad_fit.append(float(values[2]))\n chi2_fit.append(float(values[3]))\nfor l in open(folder_radcorr+'results_'+fitdata+'_leastsq_z'+kmax+'_gb'+bnd+'_tpenon.txt'):\n values = l.split()\n erad_non.append(float(values[1]))\n mrad_non.append(float(values[2]))\n chi2_non.append(float(values[3]))\nfor l in open(folder_radcorr+'results_'+fitdata+'_leastsq_z'+kmax+'_gb'+bnd+'_tpedip.txt'): \n values = l.split()\n erad_dip.append(float(values[1]))\n mrad_dip.append(float(values[2]))\n chi2_dip.append(float(values[3]))\nfor l in open(folder_radcorr+'results_'+fitdata+'_leastsq_z'+kmax+'_gb'+bnd+'_tpeblu.txt'): # Fesh for world, world+pol, Blun for Mainz\n values = l.split()\n erad_oth.append(float(values[1]))\n mrad_oth.append(float(values[2]))\n chi2_oth.append(float(values[3]))\n\n# Create figure, subplots, title.\nf, (axre, axrm) = plt.subplots(2, 1, sharex=True, figsize=(8,8))\nf.subplots_adjust(bottom=0.10, top=0.96, hspace=0.1)\n\n# Conventions:\n# Feshbach: ko-, black, solid with points\n# SIFF Blunden/sum of monopoles: bo--, blue, dashed with points\n# SIFF dipole: go-., green, dot-dashed with points\n# No TPE: ro:, red, dotted with points\n\n# Subplot for rE.\n#axre.title('Mainz $r_E, r_M$ vs $Q^2_{\\mathrm{max}}$, z-exp, $|a_k|_{\\mathrm{max}} $ = '+bnd+', $k_{\\mathrm{max}}$ = '+kmax) # title\n#axre.plot(Q2max[start:], erad_fit[start:], 'ko-', label='Fesh') # Mainz\naxre.plot(Q2max[start:], erad_fit[start:], 'bo--', label='Blun') # world, world+pol\naxre.plot(Q2max[start:], erad_non[start:], 'ro:', label='none')\naxre.plot(Q2max[start:], erad_dip[start:], 'go-.', label='dip')\n#axre.plot(Q2max[start:], erad_oth[start:], 'bo--', label='Blun') # Mainz\naxre.plot(Q2max[start:], erad_oth[start:], 'ko-', label='Fesh') # world, world+pol\nmuHx=[0,1]\nmuHy=[0.84087,0.84087]\naxre.plot(muHx, muHy, 'r-.', label='muH')\n# y-axis labelling, etc.\naxre.set_ylabel('$r_E$ [fm]') # label, y-axis\naxre.set_ylim([0.82,0.94])\naxre.yaxis.set_major_locator(MultipleLocator(0.02))\naxre.yaxis.set_minor_locator(MultipleLocator(0.005))\n#axre.yaxis.set_major_locator(MultipleLocator(0.01)) # world, start = 3\n#axre.yaxis.set_minor_locator(MultipleLocator(0.005)) \n\n# Subplot for rM.\n#axrm.plot(Q2max[start:], mrad_fit[start:], 'ko-', label='Fesh') # Mainz\naxrm.plot(Q2max[start:], mrad_fit[start:], 'bo--', label='Blun') # world, world+pol\naxrm.plot(Q2max[start:], mrad_non[start:], 'ro:', label='none')\naxrm.plot(Q2max[start:], mrad_dip[start:], 'go-.', label='dip')\n#axrm.plot(Q2max[start:], mrad_oth[start:], 'bo--', label='Blun') # Mainz\naxrm.plot(Q2max[start:], mrad_oth[start:], 'ko-', label='Fesh') # world, world+pol\n# y-axis labelling, etc.\naxrm.set_ylabel('$r_M$ [fm]') # label, y-axis\n# x-axis labelling, etc.\naxrm.set_xlabel('$Q^2_{\\mathrm{max}}$ [GeV${}^2$]') # label, x-axis\nif start == 0 or start == 1:\n axrm.set_xlim(0, max(Q2max)) # specify x-axis limits\n axrm.yaxis.set_major_locator(MultipleLocator(0.1)) # Mainz, specify major tick interval on y-axis.\n axrm.yaxis.set_minor_locator(MultipleLocator(0.05)) # Mainz, specify minor tick interval on y-axis.\n #axrm.yaxis.set_major_locator(MultipleLocator(0.05)) # world\n #axrm.yaxis.set_minor_locator(MultipleLocator(0.025)) # world\nelse:\n axrm.set_xlim(Q2max[start], max(Q2max)) # specify x-axis limits\n axrm.yaxis.set_major_locator(MultipleLocator(0.04)) # specify major tick interval on y-axis.\n axrm.yaxis.set_minor_locator(MultipleLocator(0.01)) # specify minor tick interval on y-axis.\naxrm.xaxis.set_major_locator(MultipleLocator(0.1)) # specify x-axis ticks\naxrm.xaxis.set_minor_locator(MultipleLocator(0.05))\n#plt.locator_params(nbins=8) # specify number of ticks on x-axis\n\n#plt.xticks(np.arange(0, max(Q2max), 0.1)) # specify ticks and limits\n#plt.locator_params(nbins=10) # specify number of ticks on x-axis.\n#start, end = pltrm.get_ylim() # matplotlib auto limits for y-axis\n#pltrm.yaxis.set_ticks(np.arange(start, end, 0.02)) # reset ticks for y-axis\n#pltrm.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))\n\n# Shrink subplot below which legend will be placed.\n#box = pltrm.get_position()\n#pltrm.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])\n# Put a legend below current axis.\n#pltrm.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=4)\n#plt.legend(loc='upper right', ncol=2)\n\n#plt.savefig('../plots/fig_rErMvQ2max_radcorr_'+fitdata+'_kmax'+kmax+'_gb'+bnd+'.pdf')\nplt.savefig('../plots/fig_rErMvQ2max0-2-1-0_radcorr_'+fitdata+'_kmax'+kmax+'_gb'+bnd+'.pdf')\nplt.show()\n","sub_path":"Charge_radius/plots_scripts/plot_radvQ2max_radcorr.py","file_name":"plot_radvQ2max_radcorr.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"526000339","text":"# package : pybitcointools (https://pypi.python.org/pypi/bitcoin written by vitalik\n# private key generation by wkim, 2018. 11. 03\n\nimport bitcoin.main as btc\n\nimport os\nimport random\nimport time\nimport hashlib\n\n# secp256k1 domain paramter (order of G)\n\nN = 115792089237316195423570985008687907852837564279074904382605163141518161494337\n\n# CSPRNG : os.urandom(), random() \ndef random_key():\n r = str(os.urandom(32)) \\\n + str(random.randrange(2**256)) \\\n + str(int(time.time() * 1000000))\n r = bytes(r, 'utf-8')\n h = hashlib.sha256(r).digest()\n key = ''.join('{:02x}'.format(y) for y in h)\n return key\n\n\nprivkey = random_key()\nprint(\"\\n === privKey Generation ====\\n\", privkey) \n\ninput(\"\\n\\n\\t\\t if you wanna stop it, pls enter\")\n","sub_path":"private_key.py","file_name":"private_key.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"360740800","text":"import sys\nimport os\nimport requests\nimport yaml\n\n# Import config files\nconfig_file_paths = [os.path.join(os.path.realpath('..'), 'config', 'config.yml')]\nconfig = {}\nfor cf in config_file_paths:\n f = open(cf,'r')\n conf = yaml.load(f, Loader=yaml.FullLoader)\n config.update(conf)\n f.close()\n\n\ndef format_cookie(cookie, hostname):\n result = {}\n ck = str(cookie)\n j_session_begin = ck.find('JSESSIONID=') + len('JSESSIONID=')\n jsessionid = ck[j_session_begin:ck.find(' ', j_session_begin + 1)]\n path_begin = ck.find(hostname) + len(hostname)\n path = ck[path_begin:ck.find('>,', path_begin + 1)]\n result.update({\"Cookie\": \"$Version=0; JSESSIONID=\" + jsessionid + \"; $Path=\" + path})\n return result\n\n\ndef main(output):\n\n # SET URLS\n\n hostname = config[\"jasperserver\"][\"hostname\"]\n port = config[\"jasperserver\"][\"port\"]\n root = os.path.join('http://' + str(hostname) + ':' + str(port) + '/jasperserver/rest_v2/')\n\n req_dict = {'urls': [\n {'name': 'auth', 'url': os.path.join(root, 'login'), 'headers': {\"j_username\": config[\"jasperserver\"][\"username\"], \"j_password\": config[\"jasperserver\"][\"password\"]}, 'method': 'post'},\n {'name': 'server', 'url': os.path.join(root, 'serverInfo'), 'headers': {}, 'method': 'get' },\n {'name': 'run', 'url': os.path.join(root, config[\"jasperserver\"][\"report_path\"]), 'headers': {}, 'method': 'get' }\n ]}\n\n\n # SEND REQUESTS\n\n response_dict = {'responses': []}\n for entry in req_dict['urls']:\n\n # EXECUTUTE REQUEST\n try:\n if entry['method'] == 'get':\n response = requests.get(entry['url'], headers=entry['headers'])\n if entry['method'] == 'post':\n response = requests.post(entry['url'], data=entry['headers'])\n\n response_dict['responses'].append(\n {'name': entry['name'], 'url': response.url, 'status_code': response.status_code,\n 'reason': response.reason, 'content': response.content}\n )\n\n if entry['name'] == 'auth':\n cookie = format_cookie(str(response.cookies), hostname)\n for x in req_dict['urls']:\n x['headers'].update(cookie)\n\n except:\n print('Unable to send the following request: '+ entry['url'])\n\n\n # WRITE RESULTS\n\n for response in response_dict['responses']:\n\n if response['name'] == 'run' and response['status_code'] == 200:\n with open(output, 'wb') as file:\n file.write(response['content'])\n\n elif response['status_code'] != 200:\n print('Content of failed request for ' + response['name'] + ':\\n' )\n for key, val in response.items():\n print(key, \" : \", val)\n print()\n\n else:\n pass\n\n\nif __name__ == \"__main__\":\n output = sys.argv[1]\n main(output)","sub_path":"workflow/scripts/jasper_api/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"324634456","text":"n = int(input())\nclasses = {}\nfor i in range(n):\n line = input()\n parts = line.split(\" : \")\n cls = parts[0]\n if len(parts) == 1:\n classes[cls] = []\n else:\n classes[cls] = parts[1].split(\" \")\n\n\ndef check(src, dest):\n if src == dest:\n return True\n return any([check(child, dest) for child in classes[src]])\n\n\nm = int(input())\nused = []\n\nfor i in range(m):\n cls = input()\n if any([check(cls, used_one) for used_one in used]):\n print(cls)\n used.append(cls)","sub_path":"Python_Code_Kata/ErrorClassGraph/errorClasses.py","file_name":"errorClasses.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"277677616","text":"from .tokenizers import sentence_tokenizer\nimport logging\n\n\nclass decaps_text(object):\n\n \"\"\"\n Normalizes capitalization patterns. Words with only a single capital\n will be converted into lower case.\n \"\"\"\n\n def diffn(self, s1, s2):\n \"\"\" Returns the number of different characters between two strings.\"\"\"\n return len([a for a, b in zip(s1, s2) if a != b])\n\n def __init__(self):\n \"\"\" Initialize the parser. \"\"\"\n self.logger = logging.getLogger(__name__)\n\n def modify_word(self, org):\n '''\n Changes a word to lower case if it contains exactly one capital letter.\n\n Args:\n org: a string\n Returns:\n lower: the lowercase of org, a string\n '''\n\n lower = org.lower()\n\n if self.diffn(org, lower) > 1:\n return org\n elif org != lower:\n self.logger.info('Decapitalizing word %s to %s' % (org, lower))\n return lower\n\n def __call__(self, text):\n \"\"\"\n Runs the parser.\n\n Args:\n text: a string document\n Returns:\n doc2: a string document\n \"\"\"\n\n sentences = sentence_tokenizer(text)\n\n doc2 = []\n\n for sent in sentences:\n\n sent = [self.modify_word(w) for w in sent]\n doc2.append(' '.join(sent))\n\n doc2 = '\\n'.join(doc2)\n\n return doc2\n","sub_path":"nlpre/decaps_text.py","file_name":"decaps_text.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"424337224","text":"import seagul.envs\nimport gym\n\nenv_name = \"su_acro_drake-v0\"\nenv = gym.make(env_name)\n\nimport datetime\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom multiprocessing import Process\nfrom seagul.rl.run_utils import run_sg, run_and_save_bs\nfrom seagul.rl.algos import ppo, ppo_switch\nfrom seagul.rl.models import PPOModel, SwitchedPPOModel, PPOModelActHold\nfrom seagul.nn import MLP, CategoricalMLP\nimport time\n\n# init policy, valuefn\ninput_size = 4\noutput_size = 1\nlayer_size = 0\nnum_layers = 0\nactivation = nn.ReLU\n\n\nproc_list = []\n\nfor seed in range(200)[-7:]:\n # policy = MLP(input_size, output_size, num_layers, layer_size, activation)\n policy = torch.load(\"warm/LQR_policy\")\n value_fn = MLP(input_size, 1, num_layers, layer_size, activation)\n model = PPOModel(\n policy=policy,\n value_fn = value_fn,\n discrete=False,\n# hold_count = 0\n )\n\n def reward_fn(ns, act):\n return -1e-2*((ns[0] - np.pi)**2 + ns[1]**2 + .1*ns[2]**2 + .2*ns[3]**2)\n #return 1e-2*(np.cos(ns[0]) + np.cos(ns[0] + ns[1]))\n \n env_config = {\n \"max_torque\" : 25,\n \"init_state\" : [np.pi, 0.0, 0.0, 0.0],\n \"init_state_weights\" : np.array([0, 0, 0, 0]),\n \"dt\" : .02,\n \"max_t\" : 1,\n \"act_hold\" : 1,\n \"fixed_step\" : True,\n \"reward_fn\" : reward_fn,\n \"th1_range\" : [-2*np.pi, 2*np.pi],\n # \"max_th1dot\" : 20,\n # \"max_th2dot\" : 40\n }\n \n alg_config = {\n \"env_name\": env_name,\n \"model\": model,\n \"act_var_schedule\": [.1],\n \"seed\": seed, # int((time.time() % 1)*1e8),\n \"total_steps\" : 1e6,\n \"epoch_batch_size\": 2048,\n \"reward_stop\" : None,\n \"gamma\": 1,\n \"pol_epochs\": 10,\n \"val_epochs\": 10,\n \"env_config\" : env_config\n }\n \n run_name = \"swingup\" + str(seed)\n \n p = Process(target=run_sg, args=(alg_config, ppo, run_name , \"training linear policy\", \"/data_linear_sd/trial20/\"))\n p.start()\n proc_list.append(p)\n \n\nfor p in proc_list:\n p.join()\n","sub_path":"switching/meta_lin.py","file_name":"meta_lin.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"42362877","text":"import unittest\n\ndef checkPermutation(str1, str2):\n if len(str1) != len(str2):\n return False\n \n s1 = ''.join(sorted(str1))\n s2 = ''.join(sorted(str2))\n\n return s1 == s2\n\nclass Test(unittest.TestCase):\n dataT = (\n ('abcd', 'bacd'),\n ('3563476', '7334566'),\n ('wef34f', 'wffe34'),\n )\n dataF = (\n ('abcd', 'd2cba'),\n ('2354', '1234'),\n ('dcw4f', 'dcw5f'),\n )\n\n def test_cp(self):\n # true check\n for test_strings in self.dataT:\n result = checkPermutation(*test_strings)\n self.assertTrue(result)\n # false check\n for test_strings in self.dataF:\n result = checkPermutation(*test_strings)\n self.assertFalse(result)\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"ch1/checkPermutation.py","file_name":"checkPermutation.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"542074468","text":"#!/usr/bin/env python2\n\"\"\"\nResets the browser during each task. \n\"\"\"\n\nfrom __future__ import print_function\n\nfrom undaemon import Undaemon\n\nimport logging.config\nimport os\nimport time\nimport sys\nimport subprocess as sp\nimport subprocess\nimport re\nimport shlex\nimport signal\nimport threading\nfrom functools import partial\nfrom logging.handlers import SysLogHandler\n\n\nDEFAULT_POLL_ENDPOINT = \"https://instr.httpdos.com:1070/startbrowser/\"\nNOTIFY_UPDATE_COMPLETE_ENDPOINT = \"https://instr.httpdos.com:1070/killbrowser/\"\nNOTIFY_READY = \"https://instr.httpdos.com:1070/browserready/\"\nDNSMASQ_CONFIG_PLACE = \"/home/{user}/dnsmasq_more.conf\".format(user=os.environ[\"USER\"])\nAUX_SSL_PATH = \"/opt/openssl-1.0.2/\"\nLD_LIBRARY_PATH =\"/opt/openssl-1.0.2/lib\"\nSTART_TOKEN = \"KDDFQ\"\nEND_TOKEN = \"EAJ\"\nCHROME_CGROUP = \"/sys/fs/cgroup/chrome\"\n# Time in seconds to wait\nKILL_BROWSER_AFTER = 40\n# Debug time\n#KILL_BROWSER_AFTER = 60\nTIMES_TO_CHECK_FOR_CHROME = 50\n\n\nexec_env = os.environ.copy()\nexec_env[\"PATH\"] = AUX_SSL_PATH + \"/bin:\" + exec_env[\"PATH\"]\nexec_env[\"LD_LIBRARY_PATH\"] = LD_LIBRARY_PATH\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': 'RESETTER %(levelname)s %(message)s'\n },\n },\n 'handlers': {\n 'syslog':{\n 'level':'DEBUG',\n 'class':'logging.handlers.SysLogHandler',\n 'formatter': 'simple',\n 'facility': SysLogHandler.LOG_LOCAL2,\n 'address': '/dev/log'\n }\n },\n 'loggers': {\n 'browser_resetter': {\n 'handlers':['syslog'],\n 'propagate': True,\n 'level':'INFO',\n },\n 'undaemon': {\n 'handlers':['syslog'],\n 'propagate': True,\n 'level':'INFO',\n }\n },\n}\nlogging.config.dictConfig(LOGGING)\nlogger = logging.getLogger(\"browser_resetter\")\nlogger.info(\"STARTING BROWSER RESETTER\")\n\n\nstation_name = open(\"/home/ubuntu/Station\").read()\n\n\ndef curl_arguments(endpoint, data_binary=\"\", use_output=False):\n # This function is a bit different because we don't need actual data, \n # but we do need a status....\n return [\n \"curl\", \n \"-s\", ] + \\\n ([ \"-o\", \"/dev/null\" ] if not use_output else []) + \\\n [\"-w\", \"%{http_code}\",\n \"--data-binary\", '{0}'.format(repr(data_binary.encode('ascii'))),\n # I don't think any data needs to be submitted\n \"-X\", \"POST\", \"--http2\", endpoint\n ]\n\n\ndef main():\n os.environ[\"PATH\"] = os.path.join(AUX_SSL_PATH, \"bin/\") + \":\" + os.environ[\"PATH\"]\n os.environ[\"LD_LIBRARY_PATH\"] = LD_LIBRARY_PATH\n set_signal_handlers()\n while True:\n try:\n work()\n except Exception as e:\n logger.error(\"Got exception: %s\", str(e))\n \n \ncurrent_kill_watch = None\n\n\ndef on_usr1(arg1, arg2):\n logger.info(\"Received SIGUSR1\")\n if current_kill_watch is not None:\n current_kill_watch(arg1, arg2)\n\n \ndef set_signal_handlers():\n signal.signal(signal.SIGUSR1, on_usr1)\n #signal.signal(signal.SIGALRM, self._alarm_handler)\n #signal.signal(signal.SIGTERM, self._kill_all) \n\n\nclass BrowserKillWatch(object):\n \n def __init__( self, undaemon_instance, hash_id):\n self._undaemon_instance = undaemon_instance\n self._hashid = hash_id \n self._on_exit_lock = threading.Lock()\n self._browser_already_killed = threading.Event()\n self._browser_on_normal_exit = threading.Event()\n global current_kill_watch\n current_kill_watch = self.on_usr1\n \n def run(self):\n self._kill_timer = threading.Timer(KILL_BROWSER_AFTER, self.timed_kill)\n self._kill_timer.start()\n self.on_browser_should_finish()\n \n def on_usr1(self, arg1, arg2):\n if self._browser_on_normal_exit.is_set() :\n logger.info(\"Received SIGUSR1 on normal exit\")\n else:\n logger.info(\"Received unexpected SIGUSR1\")\n # Force the remaining finalization mechanisms to acknowledge that the browser exited,\n # so that we can start over.\n self._browser_already_killed.set()\n \n def __call__(self):\n self.run()\n\n def on_browser_should_finish(self):\n try:\n undaemon_instance, hashid = self._undaemon_instance, self._hashid\n args_get = [\n \"curl\",\n \"-s\", # Silent mode\n \"-w\", \"status=%{http_code}\",\n \"--max-time\", \"1\",\n \"-m\", str(KILL_BROWSER_AFTER), # Establish a timeout\n \"--data-binary\", END_TOKEN.encode('ascii'),\n # I don't think any data needs to be submitted\n \"-X\", \"POST\", \"--http2\", NOTIFY_UPDATE_COMPLETE_ENDPOINT+station_name\n ]\n enter_time = time.time()\n while True:\n if self._browser_already_killed.is_set():\n # Well, well met\n break\n try:\n logger.debug(\"Executing: %s\", \" \".join(args_get))\n process_output = sp.check_output(args_get)\n except Exception as e:\n logger.debug(\" .... Err invoking curl, %d \", repr(e))\n # Sleep a little bit\n time.sleep(1.0)\n else:\n status_code, returned_hash_id = token_and_status_from_curl_output(process_output)\n logger.info(\"For killing, just obtained status_code %s and hash %s\", status_code, returned_hash_id)\n # Got a token?\n if status_code==\"200\":\n self.log_and_kill_the_browser()\n else:\n logger.error(\"When-to-kill returned NOOK status code: %s\", status_code )\n time.sleep(1.0)\n except Exception as e:\n self.log_and_kill_the_browser()\n logger.error(\"Exception in on_browser_should_finish: %s\", repr(e))\n\n def timed_kill(self):\n if not self._browser_already_killed.is_set():\n logger.info(\"Attemping timed kill of the browser\")\n self.log_and_kill_the_browser()\n\n def log_and_kill_the_browser(self):\n if not self._browser_already_killed.is_set():\n self._browser_on_normal_exit.set()\n with self._on_exit_lock:\n undaemon_instance = self._undaemon_instance\n logger.info(\"Going to kill the Chrome process and all its descendance, if this is the last message you see, we have a problem\")\n (terminated, killed) = undaemon_instance.kill_all()\n logger.info(\"Terminated: %d, Killed: %d\", terminated, killed)\n self._browser_already_killed.set()\n\n\ndef work():\n args_get = [\n \"curl\", \n \"-s\", # Silent mode\n \"-w\", \"status=%{http_code}\",\n \"--max-time\", \"1\",\n \"--data-binary\", START_TOKEN.encode('ascii'),\n # I don't think any data needs to be submitted\n \"-X\", \"POST\", \"--http2\", DEFAULT_POLL_ENDPOINT+station_name\n ] \n try:\n logger.debug(\"Executing: %s\", \" \".join(args_get))\n process_output = sp.check_output(args_get)\n except sp.CalledProcessError as e:\n logger.error(\" .... Err in curl, returncode: %d \", e.returncode)\n # Sleep a little bit\n time.sleep(1.0)\n else:\n try:\n status_code, hashid = token_and_status_from_curl_output(process_output)\n except (AttributeError,ValueError,re.error):\n status_code = \"501\"\n hashid = \"error-calling-curl\"\n logger.info(\"Start browser, status_code=%s, hashid=%s\", status_code, hashid)\n if \"200\" in status_code:\n try:\n chrome_process = chrome_run()\n except Exception as e:\n logger.error(\"Chrome run: %s\", str(e))\n return\n args_ready = curl_arguments(NOTIFY_READY+station_name, data_binary=START_TOKEN)\n # The daemon needs to know when the browser is ready to deliver the url, otherwise\n # the url can be delivered too early ..... \n sp.check_call(args_ready)\n # Run a thread to watch for the reset\n # signal\n kill_watch = BrowserKillWatch(chrome_process, hashid)\n watch = threading.Thread(target = kill_watch)\n watch.start()\n\n # And now just wait for the watcher before doing anything...\n watch.join()\n\n # And then, exit and re-spawn (not that this may solve the problem)\n else:\n logger.info(\"Invalid status code in HTTP response: %s\", status_code )\n time.sleep(1.0)\n\n\ndef token_and_status_from_curl_output(process_output):\n # Got a good result code?\n mo = re.search(r\"status=(\\d+)\", process_output )\n status_code = mo.group(1)\n # And maybe a token?\n mo = re.search(r\"hashid=([A-Za-z0-9]{3,})\", process_output)\n hashid = mo and mo.group(1)\n return status_code, hashid\n\n\n#google-chrome\ndef tool(cmdstr):\n pieces = shlex.split(cmdstr)\n o = subprocess.check_output(pieces)\n return o \n\n\ndef run(cmdstr):\n pieces = shlex.split(cmdstr)\n p = subprocess.Popen(pieces)\n return p\n\n\ndef restore_chrome_profile():\n # Most likely you will need to create this directory by hand \n try:\n subprocess.check_call(shlex.split(\"rm -rf /home/ubuntu/.config\"))\n cmd_out = open(\"/dev/null\", \"a\")\n subprocess.check_call( \n shlex.split(\"rsync -avz /home/ubuntu/pristine-config/ /home/ubuntu/.config\"),\n stdout = cmd_out\n )\n except subprocess.CalledProcessError:\n print(\"Didn't work Chrome restore profile\")\n\n\ndef chrome_run():\n restore_chrome_profile()\n chrome_process = Undaemon(\n shlex.split(\"google-chrome --disable-gpu --disable-async-dns --dns-prefetch-disable --disable-web-security --disable-seccomp-filter-sandbox --disable-preconnect --disable-offline-auto-reload \"),\n user=1000,\n undaemon_cgroup_path = CHROME_CGROUP\n )\n undaemon_thread = threading.Thread(\n target= partial(chrome_process.undaemon, set_signal_handlers = False),\n )\n undaemon_thread.start()\n times_tried = 0\n while True:\n time.sleep(1.4)\n s = tool(\"xwininfo -tree -root\")\n mo = re.search(r\"\\s+(0x[a-f0-9]+) \\\".*?Google Chrome\\\"\", s)\n if mo is None:\n logger.warning(\"Couldn't find Google chrome windows, maybe re-trying \")\n if times_tried > TIMES_TO_CHECK_FOR_CHROME:\n logger.error(\"Exiting chrome script because chrome windows didn't open\")\n sys.exit(1) \n else:\n times_tried += 1\n else:\n logger.info(\"Found Chrome windows\")\n break\n winid = mo.group(1)\n logger.info(\"Win id: %s\", winid)\n tool(\"xdotool windowsize --sync {0} 100% 100%\".format(winid))\n tool(\"xdotool click --window {0} 1\".format(winid))\n time.sleep(1.5)\n\n # Let's press this key combination a few times to be sure that it works....\n tool(\"xdotool key --window {0} \\\"ctrl+shift+i\\\"\".format(winid)) # Show\n time.sleep(2.5)\n tool(\"xdotool key --window {0} \\\"ctrl+shift+i\\\"\".format(winid)) # Hide \n time.sleep(1.5)\n tool(\"xdotool key --window {0} \\\"ctrl+shift+i\\\"\".format(winid)) # Show again\n time.sleep(1.5)\n\n # Get chrome as full-screen, so to make taking screenshots easier.\n tool(\"xdotool key --window {0} \\\"F11\\\"\".format(winid))\n logger.info(\"Starting wait for Chrome process to exit\")\n\n return chrome_process\n\n\nif __name__ == \"__main__\":\n main() ","sub_path":"scripts/browser_resetter.py","file_name":"browser_resetter.py","file_ext":"py","file_size_in_byte":12019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"535585999","text":"from __future__ import print_function\nfrom keras.preprocessing.image import ImageDataGenerator\nimport numpy as np \nimport os\nimport glob\nimport skimage.io as io\nimport skimage.transform as trans\nfrom skimage import img_as_ubyte\nfrom skimage.color import rgb2gray, gray2rgb\nfrom skimage.transform import resize\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nimport cv2\nfrom sklearn.model_selection import train_test_split\n\nStreet = [0, 0, 255]\nBuilding = [255, 0, 0]\nUnlabelled = [0,0,0]\n\nCOLOR_DICT = np.array([Street, Building, Unlabelled])\n\n\nim_width = 512\nim_height = 512\nborder = 1\nimagesNumber = 100\npath_image = './data/berlin/image/'\npath_label = './data/berlin/label/'\n\n\ndef get_path(path_type, id):\n return path_type + str(id) + '.png'\n\n\n\ndef get_data(): \n X = np.zeros((imagesNumber, im_height, im_width, 1), dtype=np.float32)\n y = np.zeros((imagesNumber, im_height, im_width, 1), dtype=np.float32)\n\n for id in range(1, imagesNumber + 1): \n\n x_img = img_to_array(load_img(get_path(path_image, id), color_mode=\"grayscale\" )) \n x_img = resize(x_img, (im_height, im_width, 1), mode='constant', preserve_range=True)\n\n mask_org = img_to_array(load_img(get_path(path_label, id)))\n mask_org[np.where((mask_org==[255,0, 0]).all(axis=2))] = [255,255,255]\n mask = rgb2gray(mask_org)\n mask = resize(mask, (im_height, im_width, 1), mode='constant', preserve_range=True) \n\n X[id-1] = x_img / 255\n y[id-1] = mask / 255\n\n return X, y\n\n\n\n\ndef labelVisualize(num_class,color_dict,img):\n img = img[:,:,0] if len(img.shape) == 3 else img\n img_out = np.zeros(img.shape + (3,))\n for i in range(num_class):\n img_out[img == i,:] = color_dict[i]\n return img_out\n\n\n\ndef saveResult(save_path, npyfile, originals, ground_truths, flag_multi_class = False,num_class = 2):\n for i,item in enumerate(npyfile):\n # img = labelVisualize(num_class,COLOR_DICT,item) \n # print(np.unique(img)) \n img = item[:,:,0] if len(item.shape) == 3 else item\n img = img_as_ubyte(img)\n\n original = originals[i][:, :, 0] if len(originals[i].shape) == 3 else originals[i]\n original = img_as_ubyte(original)\n\n ground_truth = ground_truths[i][:, :, 0] if len(ground_truths[i].shape) == 3 else ground_truths[i]\n ground_truth = img_as_ubyte(ground_truth)\n \n\n merge = np.concatenate((img, original, ground_truth), axis=1 )\n io.imsave(os.path.join(save_path,\"%d_predict.png\"%i), merge)","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"619976721","text":"#!/usr/bin/python\n#\n# kill_test: tests the kill command with the default\n# semantics of:\n#\n# kill \n#\n# This test may require updating such that we test other signals\n# \n# Requires the following commands to be implemented\n# or otherwise usable:\n#\n#\tkill, sleep\n#\n\nimport sys, imp, atexit\nsys.path.append(\"/home/courses/cs3214/software/pexpect-dpty/\");\nimport pexpect, shellio, signal, time, os, re, proc_check\n\n\n#Ensure the shell process is terminated\ndef force_shell_termination(shell_process):\n\tc.close(force=True)\n\n# pulling in the regular expression and other definitions\ndefinitions_scriptname = sys.argv[1]\ndef_module = imp.load_source('', definitions_scriptname)\nlogfile = None\nif hasattr(def_module, 'logfile'):\n logfile = def_module.logfile\n\n# spawn an instance of the shell\nc = pexpect.spawn(def_module.shell, drainpty=True, logfile=logfile)\natexit.register(force_shell_termination, shell_process=c)\n\n# set timeout for all following 'expect*' calls to 2 seconds\nc.timeout = 2\n\n# ensure that the shell prints the expected prompt\nassert c.expect(def_module.prompt) == 0, \"Shell did not print expected prompt\"\n\n\n\n# run a command\nc.sendline(\"sleep 30 &\")\n\n# parse the jobid and pid output\n(jobid, pid) = shellio.parse_regular_expression(c, def_module.bgjob_regex)\n\n# ensure that the shell prints the expected prompt\nassert c.expect(def_module.prompt) == 0, \"Shell did not print expected prompt\"\n\n# The job needs to be running when we call kill\nproc_check.count_children_timeout(c, 1, 1)\n\n# Run the kill command and kill the sleep process in the background\nc.sendline(def_module.builtin_commands['kill'] % jobid)\n\n# ensure that the shell prints the expected prompt\nassert c.expect(def_module.prompt) == 0, \"Shell did not print expected prompt\"\n\n# ensure there is enough time for the process to be killed\ntime.sleep(.5)\n\n\n\n# check the proc file that the process has actually been stopped\n# the proc file should not exist\nassert not os.path.exists(\"/proc/\" + pid + \"/stat\"), 'the process was not \\\nkilled'\n\n\n# end the shell program by sending it an end-of-file character\nc.sendline(\"exit\");\n\n# ensure that no extra characters are output after exiting\nassert c.expect_exact(\"exit\\r\\n\") == 0, \"Shell output extraneous characters\"\n\n\n# the test was successful\nshellio.success()\n","sub_path":"systems_cs_3214/group451/esh/src/tests/basic/kill_test.py","file_name":"kill_test.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"304537654","text":"\"\"\"\nManage the application configuration for an application.\n\nThis class is only partially implemented, but allows the whole startup\ninfrastructure to be tested. It is intended that constructing the configuration\nfor an application will provide it with all the information needed to run. This\ninformation can come from a variety of sources. The default implementation will\nprovide three sources but the software will allow any number of sources to be\ndefined. The sources are considered to be hierarchical so configuration\nelements at a lower level will override the same elements from a higher level.\nConfiguration data will be persisted in\n`JSON `_ files which match closely in\nstructure with the dictionary structure used internally to hold configuration\ndata.\n\nExternal configuration is stored in JSON files and is loaded into the computer\nas a dictionary that contains the configuration data for the application. Third\nparty configuration modules must be capable of translating their information\ninto a JSOM compatible dictionary. Each source is responsible for the\nmaintenance of it's configuration data through a graphical program that has not\nyet been developed.\n\nMuch of this data is stored in configuration files that are located via a\nconfiguration index file that contains entries for the address of each\napplication. Configuration element names are grouped into functional groups\nthat work together. Groups may not be implemented in the initial configuration\nmodule. The following groups are supported by this configuration and should be\nsupported by the user's interface module to any third party configuration\nsystem:\n\n* user |br| \n This group contains personal information related to a specific user.\n Ultimately this information is supplied by the user or by the Human Resources\n department of am organization. This group is mandatory as it allows the\n tracking of problems related to the application.\n A user entry may specify one or more roles that a user fulfills in an\n organization.\n* user roles |br| \n These roles define what the user does within an organization. In many cases\n they actually define job titles. Roles are implemented as groups in their own\n right and provide a description of how the role relates to the organization.\n Roles include:\n\n * developer |br|\n This group contains a list of information related to the original\n developer(s) of this application. It may contain a list of links to\n group(s) that define each developer involved in the creation of this\n application. It contains information relating to the original developer\n of the application. It is used for problem tracking and is optional. It\n is only used when ???\n * team_developer|br| \n This list of entries defines the members of the team that developed this\n application.\n\n.. only:: development_administrator\n\n Created on Apr. 19, 2020\n \n @author: Jonathan Gossage\n\n\"\"\"\n#TODO: Complete documentation of the configuration process - Issue 1\n\n#import json\n#from json import JSONEncoder\nfrom argparse import SUPPRESS, FileType, REMAINDER, Action\nimport sys\nfrom typing import (Any, Optional, Dict, Mapping, Tuple, Callable,\n Literal, Union)\n\nfrom lib.parse_arguments import Arguments as _a\n\n#import lib.version\n#v = lib.version.Version\n\n# Define the types of testing supported - used in the `test` configuration\n# entry.\nunittests = 0x01\nfunctionaltests = 0x02\nintegrationtests = 0x04\nacceptancetests = 0x08\nfuzzytests = 0x10\n# Configuration item keys. These variables are for commonly used configuration\n# elements. Others are organization or application specific and their keys will\n# not appear here.\n\nuserid = 'userid'\n\"\"\"\n*userid* is the operating system user identification of the user running the\nprogram.\n\"\"\"\nusername = 'username'\n\"\"\" *username* is the full name of the user running the program.\"\"\"\nuid = 'uid'\n\"\"\" *uid* is the numeric identifier of the user on Linux\"\"\"\ngid = 'gid'\n\"\"\" *gid* is the numeric identifier of the user's primary group in Linux\"\"\"\ncomputer_name = 'computer_name'\n\"\"\"The name of the node that is running this program\"\"\"\ndebug = 'debug'\n\"\"\"\nRun the program in `debugging `_\nmode in the Python interactive console or in other consoles such as\n`Idle `_ that support Python\ndebugging.\n\"\"\"\ntestrun = 'testrun'\n\"\"\"\nRun the program in `doctest `_\nmode.\n\"\"\"\nprofile = 'profile'\n\"\"\"\n`Profile `_ the python program.\n\"\"\"\nversion = 'version'\n\"\"\"\nAbbreviated program version. Global Village uses and supports\n`semantic versioning `_ ???\n\"\"\"\nrelease = 'release'\n\"\"\"\n`Full `_ program version.\n\"\"\"\ndatecr = 'datecr'\n\"\"\"Date program was created.\"\"\"\ndateup = 'dateup'\n\"\"\"Date program was last updated.\"\"\"\nverbose = 'verbose'\n\"\"\"\nRun program in verbose mode. This item can have values that range from 0-3\nwhich controls how verbose the program is, i.e. how much debugging data is\nprovided. 0 is the most terse.\n\"\"\"\npname = 'pname'\n\"\"\"The name of the program, as seen by the operating system\"\"\"\n\nplid = 'plid'\n\"\"\"\nName of the operating system platform that is running.\n\"\"\"\nppath = 'ppath'\n\"\"\"\nPath to the directory that contains the platform specific modules for this\napplication.\n\"\"\"\ncommandargs = 'commandargs'\n\"\"\"\nA list of the arguments passed from the command line when the application is\ninvoked. This is used in situations where the command line is emulated.\n\"\"\"\numname = 'umname'\n\"\"\"Name of the application specific high-level module.\"\"\"\numpkg = 'umpkg'\n\"\"\"Name of the package that contains the application module.\"\"\"\numclass = 'umclass'\n\"\"\"\nName of the class that contains the high-level application specific code\n\"\"\"\nlog = 'log'\n\"\"\"\nThe log to use throughout the application. This is a Python type.\n\"\"\"\nuac = 'uac'\n\"\"\"\nThe name of the application specific main-line function for this application\n\"\"\"\ncmdargs = 'cmdargs'\n\"\"\"\nA list of arguments to be used in place of the command line. Mainly used for\ndebugging with tools like `unittest`.\"\"\"\ncmdfile = 'cmdfile'\n\"\"\"\nThe path to the file that is to be used for the configuration of this\napplication for a specific run.\n\"\"\"\nnoupdate = 'noupdate'\n\"\"\"\nFlag that supresses the update of a configuration value\n\"\"\"\nnologging = 'nologging'\n\"\"\"\nFlag that suppresses the use of the gvLogging facility, leaving only the\nability to write messages to stderr.\n\"\"\"\nnoargs = 'noargs'\n\"\"\"\nFlag that suppresses the use of command line arguments\n\"\"\"\nnoconfig = 'noconfig'\n\"\"\"\nFlag that suppresses the use of configuration files leaving only the\nconfiguration supplied by the Global Village\n\"\"\"\nlogsys = 'logsys'\n\"\"\"\nThe name of the module that invokes or supplies a third party gvLogging system\nThis module should be callable. We make use of the initialization and call\nmethods from this module.\n\"\"\"\nargsys = 'argsys'\n\"\"\"\nThe name of the module that invokes or supplies a third party command line\nargument processing system.\n\"\"\"\ntest = 'test'\n\"\"\"\nDefines the kind of testing being done on this run\n\"\"\"\n\n#TODO: Update configuration.py with key names from cfg.data\n\n# The action types\nACTLIT = Literal[ 'store,', 'store_const', 'store-true', 'store_false',\n 'append', 'append_const', 'count', 'help', 'version',\n 'extend']\nclass ArgDescriptor(dict):\n \"\"\"\n Describes how to define a command line override for a configuration entry.\n This is an attribute of the `CfgEntry` for a configuration item.\n If it has no value, the configuration item cannot be overridden from the\n command line.\n \"\"\"\n def __init__(self: 'ArgDescriptor',\n dest: str,\n keywordDefs: Tuple[str, ...],\n positional: str,\n type_: Union[FileType, Callable[[str], type], type],\n nargs: Union[Literal['?', '*', '+'], REMAINDER, int],\n default: Any = SUPPRESS,\n const: Optional[Any]=None,\n action: Union[ACTLIT, Action]='store'\n ) -> None:\n dict.__init__(self,\n _dest=dest,\n _keywordDefs=keywordDefs,\n _positional=positional,\n _type=type_,\n _default=default,\n _const=const,\n _nargs=nargs,\n _action=action)\n\n @property\n def dest(self: 'ArgDescriptor') -> str:\n return super()['_dest']\n\n @dest.setter\n def dest(self: 'ArgDescriptor',\n type_: str) -> None:\n super()['_dest'] = type_\n\n @property\n def keywordDefs(self: 'ArgDescriptor') -> Optional[str]:\n return super()['_keywordDefs']\n\n @keywordDefs.setter\n def keywordDefs(self: 'ArgDescriptor',\n keywordDefs: str) -> None:\n super()['_keywordDefs'] = keywordDefs\n\n @property\n def positional(self: 'ArgDescriptor') -> Optional[str]:\n return super()['_positional']\n\n @positional.setter\n def positional(self: 'ArgDescriptor',\n positional: str) -> None:\n super()['_positional'] = positional\n\n @property\n def type(self: 'ArgDescriptor') -> str:\n return super()['_type']\n\n @type.setter\n def type(self: 'ArgDescriptor',\n type_: str) -> None:\n super()['_type'] = type_\n\n @property\n def default(self: 'ArgDescriptor') -> str:\n return super()['_default']\n\n @default.setter\n def default(self: 'ArgDescriptor',\n default: str) -> None:\n super()['_default'] = default\n\n @property\n def const(self: 'ArgDescriptor') -> Any:\n return super()['_const']\n\n @const.setter\n def const(self: 'ArgDescriptor',\n const: str) -> None:\n super()['_const'] = const\n\n @property\n def nargs(self: 'ArgDescriptor') -> Any:\n return super()['_nargs']\n\n @nargs.setter\n def nargs(self: 'ArgDescriptor',\n nargs: str) -> None:\n super()['_nargs'] = nargs\n\nclass CfgAdmin(dict):\n \"\"\"\n This class contains the administrative data associated with a configuration\n item. If it has no value, administrative ability for this configuration\n item will be limited.\n \"\"\"\n def __init__(self: 'CfgAdmin',\n owner:Optional[str]=None,\n overideable: bool=False) -> None:\n dict.__init__(self,\n _owner=owner,\n _overideable=overideable)\n\n @property\n def owner(self: 'CfgAdmin') -> str:\n return super()['_owner']\n\n @owner.setter\n def owner(self: 'CfgAdmin',\n owner: str) -> None:\n super()['_owner'] = owner\n\n @property\n def overideable(self: 'CfgAdmin') -> str:\n return super()['_overideable']\n\n @overideable.setter\n def overideable(self: 'CfgAdmin',\n overideable: str) -> None:\n super()['_overideable'] = overideable\n\nclass CfgEntry(dict):\n \"\"\"\n Encapsulates all the components of a configuration entry.\n \"\"\"\n\n def __init__(self: 'CfgEntry',\n name: str, # This is the key of the entry in the\n # configuration dictionary\n value: Optional[Any],\n description: Optional[str]=None,\n ad: Optional[ArgDescriptor]=None,\n flags: int=0,\n admin: Optional[CfgAdmin]=None) -> None:\n \"\"\"\n :param str name: Name of the configuration item\n :param Any value: The value for the dictionary entry\n :param str description: Describes the purpose and use of the \n configuration item\n :param ArgDescriptor ad: The argparse definition. If present, the\n argument can be overridden from the command\n line.\n :param int flags: Flags that control the usage of the argument\n They are independent of the administrative\n environment\n :param CfgAdmin admin: Administrative data for this entry\n \"\"\" \n dict.__init__(self,\n _name=name,\n _value=value,\n _description=description,\n _ad=ad,\n _flags=flags,\n _admin=admin)\n\n @property\n def name(self: 'CfgEntry') -> str:\n return super()['_name']\n\n @name.setter\n def name(self: 'CfgEntry',\n name: str):\n super()['_name'] = name\n\n @property\n def value(self: 'CfgEntry') -> Any:\n return super()['_value']\n\n @value.setter\n def value(self: 'CfgEntry',\n v: Any):\n super()['_value'] = v\n\n @property\n def description(self: 'CfgEntry') -> str:\n return super()['_description']\n\n @description.setter\n def description(self: 'CfgEntry',\n description):\n super()['_description'] = description\n\n @property\n def argDes(self: 'CfgEntry'):\n return self['_ad']\n\n @argDes.setter\n def argDes(self: 'CfgEntry',\n ad: ArgDescriptor):\n super()['_ad'] = ad\n\n @property\n def flags(self: 'CfgEntry') -> int:\n return super()['_flags']\n\n @flags.setter\n def flags(self: 'CfgEntry',\n flags: int):\n super()['_flags'] = flags\n\n @property\n def admin(self: 'CfgEntry'):\n return super()['_admin']\n\n @admin.setter\n def admin(self: 'CfgEntry',\n admin: CfgAdmin):\n super()['_admin'] = admin\n\n\nclass Configuration():\n \"\"\"\n classdocs TBA\n \"\"\"\n\n def __init__(self: 'Configuration') -> None:\n \"\"\"\n \"\"\"\n\n self._cfg: Dict[str, CfgEntry] = {}\n # Gives default values for critical configuration entries that may not\n # be specified elsewhere\n default_cfg = ((debug, False), (profile, False), (noupdate, False),\n (nologging, False), (noargs, False), (noconfig, True),\n (cmdargs, None), (cmdfile, None), (version, '0.1'),\n (release, '0.1.0'), (verbose, 0), (uac, None),\n (test, None))\n default_admin = CfgAdmin(overideable=True)\n for k, v in default_cfg:\n if k not in self._cfg:\n self._cfg[k] = CfgEntry(k,\n v,\n default_admin)\n\n # Load the master preliminary configuration - All the work is done\n # within the loaded module as a result of importing it so we don't need\n # to use anything from it.\n print(sys.path)\n import gvConfig.master\n gvConfig.master.Master()() # Load all the disk based configuration\n\n # Get the command line arguments if supported by this application.\n # By this time we will know whether the application supports command\n # line arguments.\n\n if not self._cfg.get(noargs):\n self._cfg.update(_a().Parse())\n\n @property\n def cfg(self: 'Configuration') -> Dict[str, CfgEntry]:\n return self._cfg\n\n def setMember(self: 'Configuration',\n key: str,\n value: Any) -> None:\n \"\"\"\n Sets an entry in the configuration given the key and value as separate\n entities. Default value are used for the ArgDescriptor and CfgAdmin\n properties of CfgEntry.\n \"\"\"\n if key not in self._cfg:\n self._cfg[key] = CfgEntry(key,\n value)\n\n def add(self: 'Configuration',\n entry: Mapping[str, Any]) -> None:\n \"\"\"\n Adds the contents of a Mapping to the configuration. The values are\n converted to CfgEntries if necessary.\n \"\"\"\n for k, v in entry:\n if self._cfg.get(k) is None:\n raise(KeyError,\n f'{k} is already in configuration - cannot add')\n self._cfg[k] = CfgEntry(v) if not isinstance(v, CfgEntry) else v\n\n def delete(self: 'Configuration',\n entry: CfgEntry):\n if isinstance(entry,\n CfgEntry):\n if entry.key() not in self._cfg: # Got an entry object\n raise(KeyError,\n f'{entry.key} is not in configuration - cannot delete')\n else:\n del self._cfg[entry.key()]\n else: # Got a text key\n if entry.key() in self._cfg:\n raise(KeyError,\n f'{entry} is not in configuration - cannot delete')\n else:\n del self._cfg[entry]\n\n def get(self: 'Configuration',\n key: str) -> Optional[Any]:\n return self._cfg.get(key)\n\n def len(self) -> int:\n return len(self._cfg)\n#TODO: Load configuration data from file\n","sub_path":"lib/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":17260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"90585169","text":"from keras.layers.core import Dense, SpatialDropout1D\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.pooling import GlobalMaxPooling1D\nfrom keras.models import Sequential\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\nimport collections\nimport nltk\nimport numpy as np\nfrom make_tensorboard import make_tensorboard\nimport os\nimport codecs\n\n\nnp.random.seed(42)\n\nINPUT_FILE = \"data/umich-sentiment-train.txt\"\nGLOVE_MODEL = \"data/glove.6B.300d.txt\"\nVOCAB_SIZE = 5000\nEMBED_SIZE = 300\nNUM_FILTERS = 256\nNUM_WORDS = 3\nBATCH_SIZE = 64\nNUM_EPOCHS = 10\n\ncounter = collections.Counter()\nfin = codecs.open(INPUT_FILE, \"r\", encoding='utf-8')\nmaxlen = 0\nfor line in fin:\n _, sent = line.strip().split(\"\\t\")\n words = [x.lower() for x in nltk.word_tokenize(sent)]\n if len(words) > maxlen:\n maxlen = len(words)\n for word in words:\n counter[word] += 1\nfin.close()\n\nword2index = collections.defaultdict(int)\nfor wid, word in enumerate(counter.most_common(VOCAB_SIZE)):\n word2index[word[0]] = wid + 1\nvocab_sz = len(word2index) + 1\nindex2word = {v: k for k, v in word2index.items()}\n\nxs, ys = [], []\nfin = codecs.open(INPUT_FILE, \"r\", encoding='utf-8')\nfor line in fin:\n label, sent = line.strip().split(\"\\t\")\n ys.append(int(label))\n words = [x.lower() for x in nltk.word_tokenize(sent)]\n wids = [word2index[word] for word in words]\n xs.append(wids)\nfin.close()\nX = pad_sequences(xs, maxlen=maxlen)\nY = np_utils.to_categorical(ys)\n\nXtrain, Xtest, Ytrain, Ytest = \\\n train_test_split(X, Y, test_size=0.3, random_state=42)\nprint(Xtrain.shape, Xtest.shape, Ytrain.shape, Ytest.shape)\n\n# load GloVe vectors\nword2emb = {}\nfglove = open(GLOVE_MODEL, \"rb\")\nfor line in fglove:\n cols = line.strip().split()\n word = cols[0]\n embedding = np.array(cols[1:], dtype=\"float32\")\n word2emb[word] = embedding\nfglove.close()\nembedding_weights = np.zeros((vocab_sz, EMBED_SIZE))\nfor word, index in word2index.items():\n try:\n embedding_weights[index, :] = word2emb[word]\n except KeyError:\n pass\n\nmodel = Sequential()\nmodel.add(Embedding(vocab_sz, EMBED_SIZE, input_length=maxlen,\n weights=[embedding_weights],\n trainable=True))\nmodel.add(SpatialDropout1D(0.2))\nmodel.add(Conv1D(filters=NUM_FILTERS, kernel_size=NUM_WORDS,\n activation=\"relu\"))\nmodel.add(GlobalMaxPooling1D())\nmodel.add(Dense(2, activation=\"softmax\"))\n\nmodel.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\ntensorboard, log_dir = make_tensorboard(\n set_dir_name='keras_finetune_glove_embeddings',\n embeddings_freq=1,\n embeddings_metadata='embedding/metadata.tsv'\n)\n\nhistory = model.fit(Xtrain, Ytrain, batch_size=BATCH_SIZE,\n epochs=NUM_EPOCHS,\n callbacks=[tensorboard],\n validation_data=(Xtest, Ytest))\n\n\n# evaluate model\nscore = model.evaluate(Xtest, Ytest, verbose=1)\nprint(\"Test score: {:.3f}, accuracy: {:.3f}\".format(score[0], score[1]))\n\nword_list = []\nfor wid, word in enumerate(counter.most_common(VOCAB_SIZE)):\n word_list.append(word[0])\nlog_dir = log_dir + \"/embedding/\"\nif os.path.exists(log_dir) is not True:\n os.mkdir(log_dir)\nmeta_data_file = log_dir + 'metadata.tsv'\nwith open(meta_data_file, 'w', encoding='utf8') as f:\n for name in word_list:\n f.write('%s\\n' % str(name))\n f.write('__UNK__\\n')\n","sub_path":"Chapter05/finetune_glove_embeddings_tensorboard_embedding.py","file_name":"finetune_glove_embeddings_tensorboard_embedding.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"541875545","text":"import sys\nfrom flask import jsonify\n\nfrom errors.operation_outcome import OperationOutcome\nfrom db import get_db_connection\n\n\nclass Resource:\n resource = None\n\n def __init__(self, id=None, resource=None):\n \"\"\"Initializes a Resource resource instance.\n The ID must be provided if the resource already exists.\n \"\"\"\n if not resource and not id:\n raise OperationOutcome('An id or a resource must be provided')\n self.db = get_db_connection()\n self.id = id\n self.resource = resource\n self.resource_type = type(self).__name__\n\n def json(self):\n \"\"\"Returns the JSON serialization of the Resource resource\"\"\"\n if self.resource:\n return jsonify(self.resource)\n return jsonify({'id': self.id})\n\n def create(self):\n \"\"\"Creates a Resource instance in fhirbase.\"\"\"\n if not self.resource:\n raise OperationOutcome('Missing resource data \\\nto create a Resource')\n if self.id:\n raise OperationOutcome('Cannot create a resource with an ID')\n\n if self.resource.get('id'):\n del resource['id']\n self.resource = self.db.create({\n 'resourceType': self.resource_type,\n **self.resource\n })\n self.id = self.resource['id']\n return self\n\n def read(self):\n \"\"\"Returns a Resource instance filled with the fhirbase data.\"\"\"\n if not self.id:\n raise OperationOutcome('Resource ID is required')\n\n self.resource = self.db.read({\n 'resourceType': self.resource_type,\n 'id': self.id\n })\n return self\n\n def update(self, resource):\n \"\"\"Updates a Resource instance in fhirbase.\n If provided, resource.id must match self.id\"\"\"\n if not resource:\n raise OperationOutcome('Resource data is required \\\nto update a resource')\n if not self.id:\n if resource.get('id'):\n del resource['id']\n self.resource = self.db.create({\n 'resourceType': self.resource_type,\n **resource\n })\n self.id = self.resource['id']\n else:\n if self.read().resource is None:\n raise OperationOutcome(f'Resource {self.id} does not exist')\n self.resource = self.db.update({\n 'id': self.id,\n 'resourceType': self.resource_type,\n **resource\n })\n return self\n\n def patch(self, patch):\n \"\"\"Performs a patch operation on a Resource instance in fhirbase.\"\"\"\n if not patch:\n raise OperationOutcome('Patch data is required \\\nto patch a resource')\n if not self.id:\n raise OperationOutcome('Resource ID is required \\\nto patch a resource')\n\n self.read()\n self.resource = self.db.update({\n 'resourceType': self.resource_type,\n **self.resource,\n **patch\n })\n return self\n\n def delete(self):\n if not self.id:\n raise OperationOutcome('Resource ID is required to delete it')\n\n self.resource = self.db.delete({\n 'resourceType': self.resource_type,\n 'id': self.id\n })\n self.id = None\n return self\n\n def search(self, params):\n query = f'SELECT * from {self.resource_type} r'\n args = []\n for param, value in params.items():\n jsonb_path = f\"{{ {param.replace('.', ',')} }}\"\n query += f' WHERE r.resource#>>%s = %s'\n args.extend([jsonb_path, value])\n with self.db.execute(query, params=args) as cursor:\n print(' ----> QUERY PG :: ', cursor.query, flush=True)\n iter_results = cursor.fetchall()\n\n results = list(iter_results)\n return results\n\n def history(self):\n pass\n","sub_path":"fhir-api/models/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"160329656","text":"import module\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport sys\nimport os\nimport data\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.nn import functional as F\nimport torchvision.utils as vutils\nfrom torch.utils.data import Subset, DataLoader\n\n\nclass Reconstructor(object):\n def __init__(self, args):\n self.reconstruction_path = args.reconstruction_path\n if not os.path.exists(self.reconstruction_path):\n os.makedirs(self.reconstruction_path)\n\n self.beta = args.beta\n self.train_batch_size = args.train_batch_size\n self.test_batch_size = args.test_batch_size\n self.epochs = args.epochs\n self.early_stop = args.early_stop\n self.early_stop_observation_period = args.early_stop_observation_period\n self.use_scheduler = False\n self.print_training = args.print_training\n self.class_num = args.class_num\n self.disentangle_with_reparameterization = args.disentangle_with_reparameterization\n\n self.z_dim = args.z_dim\n self.disc_input_dim = int(self.z_dim / 2)\n self.class_idx = range(0, self.disc_input_dim)\n self.membership_idx = range(self.disc_input_dim, self.z_dim)\n\n self.nets = dict()\n\n if args.dataset in ['MNIST', 'Fashion-MNIST', 'CIFAR-10', 'SVHN']:\n if args.dataset in ['MNIST', 'Fashion-MNIST']:\n self.num_channels = 1\n elif args.dataset in ['CIFAR-10', 'SVHN']:\n self.num_channels = 3\n\n self.nets['encoder'] = module.VAEConvEncoder(self.z_dim, self.num_channels)\n self.nets['decoder'] = module.VAEConvDecoder(self.z_dim, self.num_channels)\n\n elif args.dataset in ['adult', 'location']:\n self.nets['encoder'] = module.VAEFCEncoder(args.encoder_input_dim, self.z_dim)\n self.nets['decoder'] = module.FCDecoder(args.encoder_input_dim, self.z_dim)\n\n self.discs = {\n 'class_fz': module.ClassDiscriminator(self.z_dim, args.class_num),\n 'class_cz': module.ClassDiscriminator(self.disc_input_dim, args.class_num),\n 'class_mz': module.ClassDiscriminator(self.disc_input_dim, args.class_num),\n\n 'membership_fz': module.MembershipDiscriminator(self.z_dim + args.class_num, 1),\n 'membership_cz': module.MembershipDiscriminator(self.disc_input_dim + args.class_num, 1),\n 'membership_mz': module.MembershipDiscriminator(self.disc_input_dim + args.class_num, 1),\n }\n\n self.recon_loss = self.get_loss_function()\n self.class_loss = nn.CrossEntropyLoss(reduction='sum')\n self.membership_loss = nn.BCEWithLogitsLoss(reduction='sum')\n\n # optimizer\n self.optimizer = dict()\n for net_type in self.nets:\n self.optimizer[net_type] = optim.Adam(self.nets[net_type].parameters(), lr=args.recon_lr,\n betas=(0.5, 0.999))\n self.discriminator_lr = args.disc_lr\n for disc_type in self.discs:\n self.optimizer[disc_type] = optim.Adam(self.discs[disc_type].parameters(), lr=self.discriminator_lr,\n betas=(0.5, 0.999))\n\n self.weights = {\n 'recon': args.recon_weight,\n 'class_cz': args.class_cz_weight,\n 'class_mz': args.class_mz_weight,\n 'membership_cz': args.membership_cz_weight,\n 'membership_mz': args.membership_mz_weight,\n }\n\n self.scheduler_enc = StepLR(self.optimizer['encoder'], step_size=50, gamma=0.1)\n self.scheduler_dec = StepLR(self.optimizer['decoder'], step_size=50, gamma=0.1)\n\n # to device\n self.device = torch.device(\"cuda:{}\".format(args.gpu_id))\n for net_type in self.nets:\n self.nets[net_type] = self.nets[net_type].to(self.device)\n for disc_type in self.discs:\n self.discs[disc_type] = self.discs[disc_type].to(self.device)\n\n self.disentangle = (self.weights['class_cz'] + self.weights['class_mz']\n + self.weights['membership_cz'] + self.weights['membership_mz'] > 0)\n\n self.start_epoch = 0\n self.best_valid_loss = float(\"inf\")\n # self.train_loss = 0\n self.early_stop_count = 0\n\n self.acc_dict = {\n 'class_fz': 0, 'class_cz': 0, 'class_mz': 0,\n 'membership_fz': 0, 'membership_cz': 0, 'membership_mz': 0,\n }\n self.best_acc_dict = {}\n\n if 'cuda' in str(self.device):\n cudnn.benchmark = True\n\n if args.resume:\n print('==> Resuming from checkpoint..')\n try:\n self.load()\n except FileNotFoundError:\n print('There is no pre-trained model; Train model from scratch')\n\n #########################\n # -- Base operations -- #\n #########################\n def load(self):\n # print('====> Loading checkpoint {}'.format(self.reconstruction_path))\n checkpoint = torch.load(os.path.join(self.reconstruction_path, 'ckpt.pth'))\n for net_type in self.nets:\n self.nets[net_type].load_state_dict(checkpoint[net_type])\n for disc_type in self.discs:\n self.discs[disc_type].load_state_dict(checkpoint[disc_type])\n self.start_epoch = checkpoint['epoch']\n\n def train_epoch(self, train_ref_loader, epoch):\n for net_type in self.nets:\n self.nets[net_type].train()\n for disc_type in self.discs:\n self.discs[disc_type].train()\n\n total = 0\n\n losses = {\n 'MSE': 0., 'KLD': 0.,\n 'class_fz': 0., 'class_cz': 0., 'class_mz': 0.,\n 'membership_fz': 0., 'membership_cz': 0., 'membership_mz': 0.,\n }\n\n corrects = {\n 'MSE': 0., 'KLD': 0.,\n 'class_fz': 0., 'class_cz': 0., 'class_mz': 0.,\n 'membership_fz': 0., 'membership_cz': 0., 'membership_mz': 0.,\n }\n\n for batch_idx, (inputs, targets, inputs_ref, targets_ref) in enumerate(train_ref_loader):\n inputs, targets = inputs.to(self.device), targets.to(self.device)\n inputs_ref, targets_ref = inputs_ref.to(self.device), targets_ref.to(self.device)\n\n total += targets.size(0)\n\n # ---- Reconstruction (Encoder & Decoder) ---- #\n recon_loss, MSE, KLD = self.train_reconstructor(inputs)\n losses['MSE'] += MSE\n losses['KLD'] += KLD\n\n # ---- Class discriminators ---- #\n correct_class_fz, loss_class_fz = self.train_disc_class_fz(inputs, targets)\n correct_class_cz, loss_class_cz = self.train_disc_class_cz(inputs, targets)\n correct_class_mz, loss_class_mz = self.train_disc_class_mz(inputs, targets)\n\n corrects['class_fz'] += correct_class_fz\n corrects['class_cz'] += correct_class_cz\n corrects['class_mz'] += correct_class_mz\n losses['class_fz'] += loss_class_fz\n losses['class_cz'] += loss_class_cz\n losses['class_mz'] += loss_class_mz\n\n # ---- Membership discriminators ---- #\n correct_membership_fz, loss_membership_fz = self.train_disc_membership_fz(inputs, targets,\n inputs_ref, targets_ref)\n correct_membership_cz, loss_membership_cz = self.train_disc_membership_cz(inputs, targets,\n inputs_ref, targets_ref)\n correct_membership_mz, loss_membership_mz = self.train_disc_membership_mz(inputs, targets,\n inputs_ref, targets_ref)\n corrects['membership_fz'] += correct_membership_fz\n corrects['membership_cz'] += correct_membership_cz\n corrects['membership_mz'] += correct_membership_mz\n losses['membership_fz'] += loss_membership_fz\n losses['membership_cz'] += loss_membership_cz\n losses['membership_mz'] += loss_membership_mz\n\n if self.disentangle:\n self.disentangle_z(inputs, targets)\n\n # ---- Swap membership info ---- #\n z_tr = self.inference_z(inputs)\n cz_tr, mz_tr = self.split_class_membership(z_tr)\n\n z_re = self.inference_z(inputs_ref)\n cz_re, mz_re = self.split_class_membership(z_re)\n\n z_ctr_mre = torch.cat([cz_tr, mz_re])\n z_cre_mtr = torch.cat([cz_re, mz_tr])\n\n recon_ctr_mre = self.nets['decoder'](z_ctr_mre)\n recon_cre_mtr = self.nets['decoder'](z_cre_mtr)\n\n\n\n # todo : loop\n self.acc_dict['class_fz'] = corrects['class_fz'] / total\n self.acc_dict['class_cz'] = corrects['class_cz'] / total\n self.acc_dict['class_mz'] = corrects['class_mz'] / total\n\n self.acc_dict['membership_fz'] = corrects['membership_fz'] / (2 * total)\n self.acc_dict['membership_cz'] = corrects['membership_cz'] / (2 * total)\n self.acc_dict['membership_mz'] = corrects['membership_mz'] / (2 * total)\n\n if self.print_training:\n print(\n '\\nEpoch: {:>3}, Acc) Class (fz, cz, mz) : {:.4f}, {:.4f}, {:.4f}, Membership (fz, cz, mz) : {:.4f}, {:.4f}, {:.4f}'.format(\n epoch, self.acc_dict['class_fz'], self.acc_dict['class_cz'], self.acc_dict['class_mz'],\n self.acc_dict['membership_fz'], self.acc_dict['membership_cz'], self.acc_dict['membership_mz'], ))\n\n for loss_type in losses:\n losses[loss_type] = losses[loss_type] / (batch_idx + 1)\n print(\n 'Losses) MSE: {:.2f}, KLD: {:.2f}, Class (fz, cz, mz): {:.2f}, {:.2f}, {:.2f}, Membership (fz, cz, mz): {:.2f}, {:.2f}, {:.2f},'.format(\n losses['MSE'], losses['KLD'], losses['class_fz'], losses['class_cz'], losses['class_mz'],\n losses['membership_fz'], losses['membership_cz'], losses['membership_mz'], ))\n\n def train_reconstructor(self, inputs):\n self.optimizer['encoder'].zero_grad()\n self.optimizer['decoder'].zero_grad()\n mu, logvar = self.nets['encoder'](inputs)\n z = self.reparameterize(mu, logvar)\n recons = self.nets['decoder'](z)\n recon_loss, MSE, KLD = self.recon_loss(recons, inputs, mu, logvar)\n recon_loss = self.weights['recon'] * recon_loss\n recon_loss.backward()\n self.optimizer['encoder'].step()\n self.optimizer['decoder'].step()\n return recon_loss.item(), MSE.item(), KLD.item()\n\n def train_disc_class_fz(self, inputs, targets):\n self.optimizer['class_fz'].zero_grad()\n z = self.inference_z(inputs)\n pred = self.discs['class_fz'](z)\n class_loss_full = self.class_loss(pred, targets)\n class_loss_full.backward()\n self.optimizer['class_fz'].step()\n\n _, pred_class_from_full = pred.max(1)\n return pred_class_from_full.eq(targets).sum().item(), class_loss_full.item()\n\n def train_disc_class_cz(self, inputs, targets):\n self.optimizer['class_cz'].zero_grad()\n z = self.inference_z(inputs)\n class_z, _ = self.split_class_membership(z)\n pred = self.discs['class_cz'](class_z)\n class_loss = self.class_loss(pred, targets)\n class_loss.backward()\n self.optimizer['class_cz'].step()\n\n _, pred_class = pred.max(1)\n return pred_class.eq(targets).sum().item(), class_loss.item()\n\n def train_disc_class_mz(self, inputs, targets):\n self.optimizer['class_mz'].zero_grad()\n z = self.inference_z(inputs)\n _, membership_z = self.split_class_membership(z)\n pred = self.discs['class_mz'](membership_z)\n class_loss_membership = self.class_loss(pred, targets)\n class_loss_membership.backward()\n self.optimizer['class_mz'].step()\n\n _, pred_class_from_membership = pred.max(1)\n return pred_class_from_membership.eq(targets).sum().item(), class_loss_membership.item()\n\n def train_disc_membership_fz(self, inputs, targets, inputs_ref, targets_ref):\n self.optimizer['membership_fz'].zero_grad()\n\n z = self.inference_z(inputs)\n targets_onehot = torch.zeros((len(targets), self.class_num)).to(self.device)\n targets_onehot = targets_onehot.scatter_(1, targets.reshape((-1, 1)), 1)\n z = torch.cat((z, targets_onehot), dim=1)\n pred = self.discs['membership_fz'](z)\n in_loss = self.membership_loss(pred, torch.ones_like(pred))\n\n z_ref = self.inference_z(inputs_ref)\n targets_ref_onehot = torch.zeros((len(targets_ref), self.class_num)).to(self.device)\n targets_ref_onehot = targets_ref_onehot.scatter_(1, targets_ref.reshape((-1, 1)), 1)\n z_ref = torch.cat((z_ref, targets_ref_onehot), dim=1)\n pred_ref = self.discs['membership_fz'](z_ref)\n out_loss = self.membership_loss(pred_ref, torch.zeros_like(pred_ref))\n\n membership_loss = in_loss + out_loss\n membership_loss.backward()\n self.optimizer['membership_fz'].step()\n\n pred = pred.cpu().detach().numpy().squeeze()\n pred_ref = pred_ref.cpu().detach().numpy().squeeze()\n pred_concat = np.concatenate((pred, pred_ref))\n inout_concat = np.concatenate((np.ones_like(pred), np.zeros_like(pred_ref)))\n\n return np.sum(inout_concat == np.round(pred_concat)), membership_loss.item()\n\n def train_disc_membership_cz(self, inputs, targets, inputs_ref, targets_ref):\n self.optimizer['membership_cz'].zero_grad()\n\n z = self.inference_z(inputs)\n class_z, _ = self.split_class_membership(z)\n targets_onehot = torch.zeros((len(targets), self.class_num)).to(self.device)\n targets_onehot = targets_onehot.scatter_(1, targets.reshape((-1, 1)), 1)\n class_z = torch.cat((class_z, targets_onehot), dim=1)\n pred = self.discs['membership_cz'](class_z)\n in_loss = self.membership_loss(pred, torch.ones_like(pred))\n\n z_ref = self.inference_z(inputs_ref)\n class_z_ref, _ = self.split_class_membership(z_ref)\n targets_ref_onehot = torch.zeros((len(targets_ref), self.class_num)).to(self.device)\n targets_ref_onehot = targets_ref_onehot.scatter_(1, targets_ref.reshape((-1, 1)), 1)\n class_z_ref = torch.cat((class_z_ref, targets_ref_onehot), dim=1)\n pred_ref = self.discs['membership_cz'](class_z_ref)\n out_loss = self.membership_loss(pred_ref, torch.zeros_like(pred_ref))\n\n membership_loss = in_loss + out_loss\n membership_loss.backward()\n self.optimizer['membership_cz'].step()\n\n pred = pred.cpu().detach().numpy().squeeze()\n pred_ref = pred_ref.cpu().detach().numpy().squeeze()\n pred_concat = np.concatenate((pred, pred_ref))\n inout_concat = np.concatenate((np.ones_like(pred), np.zeros_like(pred_ref)))\n\n return np.sum(inout_concat == np.round(pred_concat)), membership_loss.item()\n\n def train_disc_membership_mz(self, inputs, targets, inputs_ref, targets_ref):\n self.optimizer['membership_mz'].zero_grad()\n\n z = self.inference_z(inputs)\n _, membership_z = self.split_class_membership(z)\n targets_onehot = torch.zeros((len(targets), self.class_num)).to(self.device)\n targets_onehot = targets_onehot.scatter_(1, targets.reshape((-1, 1)), 1)\n membership_z = torch.cat((membership_z, targets_onehot), dim=1)\n pred = self.discs['membership_mz'](membership_z)\n in_loss = self.membership_loss(pred, torch.ones_like(pred))\n\n z_ref = self.inference_z(inputs_ref)\n _, membership_z_ref = self.split_class_membership(z_ref)\n targets_ref_onehot = torch.zeros((len(targets_ref), self.class_num)).to(self.device)\n targets_ref_onehot = targets_ref_onehot.scatter_(1, targets_ref.reshape((-1, 1)), 1)\n membership_z_ref = torch.cat((membership_z_ref, targets_ref_onehot), dim=1)\n pred_ref = self.discs['membership_mz'](membership_z_ref)\n out_loss = self.membership_loss(pred_ref, torch.zeros_like(pred_ref))\n\n membership_loss = in_loss + out_loss\n membership_loss.backward()\n self.optimizer['membership_mz'].step()\n\n pred = pred.cpu().detach().numpy().squeeze()\n pred_ref = pred_ref.cpu().detach().numpy().squeeze()\n pred_concat = np.concatenate((pred, pred_ref))\n inout_concat = np.concatenate((np.ones_like(pred), np.zeros_like(pred_ref)))\n\n return np.sum(inout_concat == np.round(pred_concat)), membership_loss.item()\n\n def disentangle_z(self, inputs, targets):\n self.optimizer['encoder'].zero_grad()\n loss = 0\n\n z = self.inference_z(inputs)\n cz, mz = self.split_class_membership(z)\n targets_onehot = torch.zeros((len(targets), self.class_num)).to(self.device)\n targets_onehot = targets_onehot.scatter_(1, targets.reshape((-1, 1)), 1)\n\n if self.weights['class_cz'] != 0:\n pred = self.discs['class_cz'](cz)\n loss += self.weights['class_cz'] * self.class_loss(pred, targets)\n\n if self.weights['class_mz'] != 0:\n pred = self.discs['class_mz'](mz)\n loss += -self.weights['class_mz'] * self.class_loss(pred, targets)\n\n if self.weights['membership_cz'] != 0:\n pred = self.discs['membership_cz'](torch.cat((cz, targets_onehot), dim=1))\n # pred = self.discs['membership_cz'](cz)\n loss += - self.weights['membership_cz'] * self.membership_loss(pred, torch.ones_like(pred))\n\n if self.weights['membership_mz'] != 0:\n pred = self.discs['membership_mz'](torch.cat((mz, targets_onehot), dim=1))\n # pred = self.discs['membership_mz'](mz)\n loss += self.weights['membership_mz'] * self.membership_loss(pred, torch.ones_like(pred))\n\n loss.backward()\n self.optimizer['encoder'].step()\n\n def inference(self, loader, epoch, type='valid'):\n for net_type in self.nets:\n self.nets[net_type].eval()\n for disc_type in self.discs:\n self.discs[disc_type].eval()\n\n loss = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(loader):\n inputs, targets = inputs.to(self.device), targets.to(self.device)\n\n mu, logvar = self.nets['encoder'](inputs)\n z = self.reparameterize(mu, logvar)\n\n recons = self.nets['decoder'](z)\n recon_loss, MSE, KLD = self.recon_loss(recons, inputs, mu, logvar)\n loss += recon_loss.item()\n\n if type == 'valid':\n if loss < self.best_valid_loss:\n state = {\n 'best_valid_loss': loss,\n 'epoch': epoch,\n }\n\n for net_type in self.nets:\n state[net_type] = self.nets[net_type].state_dict()\n for disc_type in self.discs:\n state[disc_type] = self.discs[disc_type].state_dict()\n\n torch.save(state, os.path.join(self.reconstruction_path, 'ckpt.pth'))\n self.best_valid_loss = loss\n self.early_stop_count = 0\n self.best_acc_dict = self.acc_dict\n\n np.save(os.path.join(self.reconstruction_path, 'acc.npy'), self.best_acc_dict)\n vutils.save_image(recons, os.path.join(self.reconstruction_path, '{}.png'.format(epoch)), nrow=10)\n\n else:\n self.early_stop_count += 1\n if self.print_training:\n print('Early stop count: {}'.format(self.early_stop_count))\n\n if self.early_stop_count == self.early_stop_observation_period:\n print(self.best_acc_dict)\n if self.print_training:\n print('Early stop count == {}; Terminate training\\n'.format(self.early_stop_observation_period))\n self.train_flag = False\n\n def train(self, train_set, valid_set=None, ref_set=None):\n print('==> Start training {}'.format(self.reconstruction_path))\n self.train_flag = True\n if self.early_stop:\n valid_loader = DataLoader(valid_set, batch_size=self.train_batch_size, shuffle=True, num_workers=2)\n for epoch in range(self.start_epoch, self.start_epoch + self.epochs):\n permutated_idx = np.random.permutation(ref_set.__len__())\n ref_set = Subset(ref_set, permutated_idx)\n train_ref_set = data.DoubleDataset(train_set, ref_set)\n train_ref_loader = DataLoader(train_ref_set, batch_size=self.train_batch_size, shuffle=True, num_workers=2)\n if self.train_flag:\n self.train_epoch(train_ref_loader, epoch)\n if self.use_scheduler:\n self.scheduler_enc.step()\n self.scheduler_dec.step()\n if self.early_stop:\n self.inference(valid_loader, epoch, type='valid')\n else:\n break\n\n def reconstruct(self, dataset_dict, reconstruction_type_list):\n try:\n self.load()\n except FileNotFoundError:\n print('There is no pre-trained model; First, train a reconstructor.')\n sys.exit(1)\n self.nets['encoder'].eval()\n self.nets['decoder'].eval()\n\n mse_list = []\n recon_dict = dict()\n\n for recon_idx, reconstruction_type in enumerate(reconstruction_type_list):\n recon_datasets_dict = {}\n for dataset_type, dataset in dataset_dict.items():\n loader = DataLoader(dataset, batch_size=self.test_batch_size, shuffle=False, num_workers=2)\n raws = []\n recons = []\n labels = []\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(loader):\n inputs = inputs.to(self.device)\n mu, logvar = self.nets['encoder'](inputs)\n\n z = torch.zeros_like(mu).to(self.device)\n\n mu_class, mu_membership = self.split_class_membership(mu)\n logvar_class, logvar_membership = self.split_class_membership(logvar)\n\n if reconstruction_type == 'cb_mb':\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership\n elif reconstruction_type == 'cb_mz':\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = torch.zeros_like(mu_membership).to(self.device)\n elif reconstruction_type == 'cz_mb':\n z[:, self.class_idx] = torch.zeros_like(mu_class).to(self.device)\n z[:, self.membership_idx] = mu_membership\n elif reconstruction_type == 'cs1.2_ms0.8': # scaling\n z[:, self.class_idx] = mu_class * 1.2\n z[:, self.membership_idx] = mu_membership * 0.8\n elif reconstruction_type == 'cb_ms0.8': # scaling\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership * 0.8\n elif reconstruction_type == 'cb_ms0.5': # scaling\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership * 0.5\n elif reconstruction_type == 'cb_ms0.25': # scaling\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership * 0.25\n elif reconstruction_type == 'cb_ms0.1': # scaling\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership * 0.1\n elif reconstruction_type == 'cb_mb_n1': # + noise\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership + torch.randn_like(mu_membership).to(self.device)\n elif reconstruction_type == 'cb_mb_n0.5': # + noise\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership + 0.5 * torch.randn_like(mu_membership).to(self.device)\n elif reconstruction_type == 'cb_mb_n0.1': # + noise\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership + 0.1 * torch.randn_like(mu_membership).to(self.device)\n elif reconstruction_type == 'cb_mr':\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = self.reparameterize(mu_membership, logvar_membership)\n elif reconstruction_type == 'cb_ms0.5_n0.5': # scaling\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership * 0.5 + 0.5 * torch.randn_like(mu_membership).to(self.device)\n elif reconstruction_type == 'cb_ms0.5_n0.1': # scaling\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership * 0.5 + 0.1 * torch.randn_like(mu_membership).to(self.device)\n elif reconstruction_type == 'cb_ms0.8_n0.2': # scaling\n z[:, self.class_idx] = mu_class\n z[:, self.membership_idx] = mu_membership * 0.8 + 0.2 * torch.randn_like(mu_membership).to(self.device)\n elif reconstruction_type == 'cb_mConstant':\n z[:, self.class_idx] = mu_class\n for idx in range(z.shape[0]):\n z[idx, self.membership_idx] = mu_membership[0]\n elif reconstruction_type == 'cb_mConstant0.8':\n z[:, self.class_idx] = mu_class\n mu_membership_constant = 0.8 * mu_membership[0]\n for idx in range(z.shape[0]):\n z[idx, self.membership_idx] = mu_membership_constant\n elif reconstruction_type == 'cb_mInter0.8':\n z[:, self.class_idx] = mu_class\n mu_membership_constant = 0.2 * mu_membership[0]\n for idx in range(z.shape[0]):\n z[idx, self.membership_idx] = 0.8 * mu_membership[idx] + mu_membership_constant\n\n elif reconstruction_type == 'cb_mAvg':\n z[:, self.class_idx] = mu_class\n mu_membership_constant = torch.mean(mu_membership, dim=0)\n for idx in range(z.shape[0]):\n z[idx, self.membership_idx] = mu_membership_constant\n\n elif reconstruction_type == 'cb_mr1.2':\n z[:, self.class_idx] = mu_class\n std = torch.exp(0.5 * logvar_membership)\n eps = torch.randn_like(std)\n z[:, self.membership_idx] = mu_membership + 1.2 * std * eps\n\n elif reconstruction_type == 'cb_mr2.0':\n z[:, self.class_idx] = mu_class\n std = torch.exp(0.5 * logvar_membership)\n eps = torch.randn_like(std)\n z[:, self.membership_idx] = mu_membership + 2. * std * eps\n\n # print(mu_membership.shape)\n # print(mu_membership[0].shape)\n # z[:, self.membership_idx] = mu_membership[0]\n # print(torch.repeat_interleave(mu_membership[0], mu_membership.shape[0], 1).shape)\n # sys.exit(1)\n\n\n # if reconstruction_type == 'cb_mb_sb':\n # z[:, self.class_idx] = mu_class\n # z[:, self.membership_idx] = mu_membership\n # z[:, self.style_idx] = mu_style\n #\n # elif reconstruction_type == 'cb_mb_sz':\n # z[:, self.class_idx] = mu_class\n # z[:, self.membership_idx] = mu_membership\n # z[:, self.style_idx] = torch.zeros_like(mu_style).to(self.device)\n #\n # elif reconstruction_type == 'cb_mz_sb':\n # z[:, self.class_idx] = mu_class\n # z[:, self.membership_idx] = torch.zeros_like(mu_membership).to(self.device)\n # z[:, self.style_idx] = mu_style\n #\n # elif reconstruction_type == 'cb_mz_sz':\n # z[:, self.class_idx] = mu_class\n # z[:, self.membership_idx] = torch.zeros_like(mu_membership).to(self.device)\n # z[:, self.style_idx] = torch.zeros_like(mu_style).to(self.device)\n #\n # elif reconstruction_type == 'cz_mb_sb':\n # z[:, self.class_idx] = torch.zeros_like(mu_class).to(self.device)\n # z[:, self.membership_idx] = mu_membership\n # z[:, self.style_idx] = mu_style\n #\n # elif reconstruction_type == 'cz_mb_sz':\n # z[:, self.class_idx] = torch.zeros_like(mu_class).to(self.device)\n # z[:, self.membership_idx] = mu_membership\n # z[:, self.style_idx] = torch.zeros_like(mu_style).to(self.device)\n\n\n #\n # elif reconstruction_type == 'cr_mb':\n # z[:, self.class_idx] = self.reparameterize(mu_class, logvar_class)\n # z[:, self.membership_idx] = mu_membership\n #\n # elif reconstruction_type == 'cr_mr':\n # z[:, self.class_idx] = self.reparameterize(mu_class, logvar_class)\n # z[:, self.membership_idx] = self.reparameterize(mu_membership, logvar_membership)\n #\n # elif reconstruction_type == 'cb_mn':\n # z[:, self.class_idx] = mu_class\n # z[:, self.membership_idx] = torch.randn_like(mu_membership).to(self.device)\n\n recons_batch = self.nets['decoder'](z).cpu()\n labels_batch = targets\n\n if len(recons) == 0:\n raws = inputs.cpu()\n recons = recons_batch\n labels = labels_batch\n\n if dataset_type == 'train':\n vutils.save_image(recons, os.path.join(self.reconstruction_path,\n '{}.png'.format(reconstruction_type)), nrow=10)\n recon_dict[reconstruction_type] = recons\n\n if recon_idx == 0:\n vutils.save_image(raws, os.path.join(self.reconstruction_path, 'raw.png'), nrow=10)\n\n else:\n raws = torch.cat((raws, inputs.cpu()), axis=0)\n recons = torch.cat((recons, recons_batch), axis=0)\n labels = torch.cat((labels, labels_batch), axis=0)\n\n recon_datasets_dict[dataset_type] = {\n 'recons': recons,\n 'labels': labels,\n }\n\n mse_list.append(F.mse_loss(recons, raws).item())\n\n # todo : refactor dict to CustomDataset\n torch.save(recon_datasets_dict,\n os.path.join(self.reconstruction_path, 'recon_{}.pt'.format(reconstruction_type)))\n\n np.save(os.path.join(self.reconstruction_path, 'mse.npy'), mse_list)\n\n @staticmethod\n def reparameterize(mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n\n return mu + std * eps\n\n def inference_z(self, z):\n mu, logvar = self.nets['encoder'](z)\n if self.disentangle_with_reparameterization:\n return self.reparameterize(mu, logvar)\n else:\n return mu\n\n def split_class_membership(self, z):\n class_z = z[:, self.class_idx]\n membership_z = z[:, self.membership_idx]\n\n return class_z, membership_z\n\n def get_loss_function(self):\n def loss_function(recon_x, x, mu, logvar):\n MSE = F.mse_loss(recon_x, x, reduction='sum')\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()).sum()\n return MSE + self.beta * KLD, MSE, KLD\n\n return loss_function\n","sub_path":"code_backup/reconstruction_swap.py","file_name":"reconstruction_swap.py","file_ext":"py","file_size_in_byte":33478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"58997876","text":"from .component import Component\nimport pyglet.window\nimport configparser\nimport io\n\n\"\"\"\nManage keyboard input.\n\"\"\"\n\n\ndef _lookup_binding_value(name):\n \"\"\"Translate an @'d name into the pyglet value for that name.\"\"\"\n if \"@\" in name:\n if name[1:].upper() in pyglet.window.key.__dict__:\n return pyglet.window.key.__dict__[name[1:].upper()]\n return name\n\n\ndef abstract_load_config(keymanager, reset=False, binddict={}, impulselist=[], watchlist=[]):\n \"\"\"Implement your own key configuration loader by calling this with a series of lists and dictionaries.\"\"\"\n if reset:\n keymanager.reset()\n\n for binding in binddict:\n keymanager.alias(_lookup_binding_value(binding), _lookup_binding_value(binddict[binding]))\n keymanager.track(_lookup_binding_value(binddict[binding]))\n\n for impulse in impulselist:\n keymanager.impulse(_lookup_binding_value(impulse))\n\n for watch in watchlist:\n keymanager.track(_lookup_binding_value(watch))\n return keymanager\n\n\ndef load_config(keymanager, reset=False, configfile=\"\", defaultconfig=\"\"):\n \"\"\"\n Load an INI style config file.\n\n\n Recognizes two sections, Options and Bindings. Options can contain\n the keys impulses and watch, which use a CR delimited list of key\n names to indicate keys which are tracked and produce impulses.\n In the Bindings section, each key is interpreted as a key name which\n is aliased to its value.\n\n Keynames are either strings, or strings starting with an @ symbol\n indicating as predefined key name in Boop such as @F9.\n \"\"\"\n cf = configparser.SafeConfigParser()\n if defaultconfig:\n cf.readfp(io.BytesIO(defaultconfig))\n cf.read(configfile)\n\n impulselist = []\n watchlist = []\n bindings = {}\n if cf.has_section(\"Options\"):\n if cf.has_option(\"Options\", \"impulses\"):\n impulselist = cf.get(\"Options\", \"impulses\").split(\"\\n\")\n if cf.has_option(\"Options\", \"watch\"):\n watchlist = cf.get(\"Options\", \"watch\").split(\"\\n\")\n if cf.has_section(\"Bindings\"):\n for binding in cf.items(\"Bindings\"):\n bindings[binding[0]] = binding[1]\n return abstract_load_config(\n keymanager, reset=reset, binddict=bindings, impulselist=impulselist, watchlist=watchlist\n )\n\n\nclass KeyManager(Component):\n \"\"\"\n This is a component which records up/down state of keyboard and and mouse buttons and prouduces impulses.\n\n Add this to your Window or Scene to manage keys, aliasing keys, track keys, and create secondary handlers\n for keys.\n\n Order of operations:\n 1. Assign current key based on aliases.\n 2. Emit impulse event for key up / down.\n 3. Update key_states\n 4. Re-emit event if it was alias indirected. This can create an infinite looop if there are loops in\n the aliasing.\n\n Note that aliases can be aribtrary values, so they can be used to assign non-overlapping values for\n keys, for, for example, creating key mappings to arbitrary end points.\n\n The key manager also acts as a dictionary. Accessing the value will return the current key state (or\n up if it is an untracked key).\n\n Impulses are basically second-order keyboard events that only emit on key down.\n \"\"\"\n\n def __init__(self, reemit=True):\n Component.__init__(self)\n\n self.key_aliases = {}\n self.key_impulses = set()\n self.key_trackstates = set()\n self.key_states = {}\n self.reemit = reemit\n\n def reset(self, key=None):\n \"\"\"Set all key states to \"up\".\"\"\"\n if key is None:\n for key in self.key_states:\n self.key_states[key] = False\n else:\n self.key_states[key] = False\n\n def state(self, key):\n \"\"\"Get the state of a particlar key.\"\"\"\n if key in self.key_states:\n return self.key_states[key]\n return None\n\n def alias(self, key, value):\n \"\"\"Assign an alias to a particular key.\"\"\"\n self.key_aliases[key] = value\n\n def delalias(self, key):\n \"\"\"Remove a key alias.\"\"\"\n if key in self.key_aliases:\n del self.key_aliases[key]\n\n def track(self, key):\n \"\"\"Tell the keyboard manager to start tracking the states of a particular key.\"\"\"\n self.key_trackstates.add(key)\n\n def deltrack(self, key):\n \"\"\"Tell the keyboard manager to stop tracking the states of a particular key.\"\"\"\n self.key_trackstates.remove(key)\n if key in self.key_states:\n del self.key_states[key]\n\n def impulse(self, key):\n \"\"\"Ask for impulses to be emitted for a specific key.\"\"\"\n self.key_impulses.add(key)\n\n def delimpulse(self, key):\n \"\"\"Stop emitting impulses for a specific key.\"\"\"\n self.key_impulses.remove(key)\n\n def on_key_press(self, state, startkey, modifiers):\n \"\"\"Internal keypress handler.\"\"\"\n # print(state, pyglet.window.key.symbol_string(startkey), modifiers)\n if startkey in self.key_aliases:\n key = self.key_aliases[startkey]\n else:\n key = startkey\n\n if key in self.key_impulses:\n state.window.dispatch_event(\"on_impulse\", key)\n\n if key in self.key_trackstates:\n self.key_states[key] = True\n\n if key != startkey and self.reemit:\n state.window.dispatch_event(\"on_key_press\", key, modifiers)\n\n def on_key_release(self, state, startkey, modifiers):\n \"\"\"Internal keyrelease handler.\"\"\"\n if startkey in self.key_aliases:\n key = self.key_aliases[startkey]\n else:\n key = startkey\n\n if key in self.key_states:\n self.key_states[key] = False\n\n if key != startkey and self.reemit:\n state.window.dispatch_event(\"on_key_release\", key, modifiers)\n","sub_path":"boop/keymanager.py","file_name":"keymanager.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"410234316","text":"\"\"\"\nCreated : 10/04/2017\n@author : Cuong Nguyen Ba\n\"\"\"\n\nimport math\nimport time\n\nimport numpy as n\nfrom scipy.special import gammaln, psi\n\n\n# n.random.seed(100000001)\nclass VbObtm(object):\n def __init__(self, num_term, num_topic, alpha, beta,\n stepCount, stepPower, stepOffset, r_drop):\n self._W = num_term\n self._K = num_topic\n\n self._r_drop = r_drop\n\n self._alpha = alpha\n self._beta = beta\n\n self._stepCount = stepCount\n self._stepPower = stepPower\n self._stepOffset = stepOffset\n\n self._phi = 1 * n.random.gamma(100.0, 1.0 / 100.0, (self._K, self._W))\n self._theta = 1 * n.random.gamma(100.0, 1.0 / 100.0, (1, self._K))\n\n # normalize phi\n _phi_norm = self._phi.sum(axis=1)\n self._phi /= _phi_norm[:, n.newaxis]\n # normalize theta\n self._theta /= sum(self._theta)\n\n self._thetaSS = n.zeros(self._K)\n self._phiSS = n.zeros((self._K, self._W))\n\n self._alphaVec = n.ones(self._K) * alpha\n self._betaMat = n.ones((self._K, self._W)) * beta\n\n def fitMiniBatch(self, biterm1ids, biterm2ids):\n self._stepCount += 1\n c_thetaSS = n.zeros(self._K)\n t_nk = n.zeros(self._K)\n c_phiSS = n.zeros((self._K, self._W))\n\n n_biterm = len(biterm1ids)\n biterms_list = self.gen_biterm_list(n_biterm)\n n_iter = float(n_biterm)\n\n start = time.time()\n if self._r_drop == 0:\n for i in range(0, n_biterm):\n t_nk[:] = self._theta * self._phi[:, biterm1ids[i]] * self._phi[:, biterm2ids[i]]\n t_nk /= sum(t_nk)\n\n c_thetaSS += t_nk\n c_phiSS[:, biterm1ids[i]] += t_nk\n c_phiSS[:, biterm2ids[i]] += t_nk\n else:\n n_iter = float(len(biterms_list))\n for i in range(0, len(biterms_list)):\n # print biterm1ids[i]\n id_n = biterms_list[i]\n t_nk[:] = self._theta * self._phi[:, biterm1ids[id_n]] * self._phi[:, biterm2ids[id_n]]\n t_nk /= sum(t_nk)\n\n c_thetaSS += t_nk\n c_phiSS[:, biterm1ids[id_n]] += t_nk\n c_phiSS[:, biterm2ids[id_n]] += t_nk\n\n c_thetaSS /= n_iter\n c_phiSS /= n_iter\n\n end1 = time.time()\n\n if self._stepCount == 1:\n self._thetaSS = c_thetaSS\n self._phiSS = c_phiSS\n else:\n stepSize = math.pow(self._stepCount + self._stepOffset, - self._stepPower)\n self._thetaSS = (1 - stepSize) * self._thetaSS + stepSize * c_thetaSS\n self._phiSS = (1 - stepSize) * self._phiSS + stepSize * c_phiSS\n\n # update and norm theta\n self._theta = self._thetaSS + self._alpha\n self._theta /= sum(self._theta)\n # update and norm phi\n self._phi = self._phiSS + self._betaMat\n # normalize phi\n _phi_norm = self._phi.sum(axis=1)\n self._phi /= _phi_norm[:, n.newaxis]\n end2 = time.time()\n\n return (end1 - start, end2 - end1, self._phi, self._theta)\n\n def gen_biterm_list(self, num_biterms):\n biterms_rand = n.random.binomial(1, self._r_drop, num_biterms)\n biterms_list = list()\n for topic_index in range(0, len(biterms_rand)):\n if biterms_rand[topic_index] == 0:\n biterms_list.append(topic_index)\n return biterms_list\n","sub_path":"vb/online_vb_obtm/btm/drop_vb_obtm.py","file_name":"drop_vb_obtm.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"139038301","text":"from setuptools import setup\nfrom codecs import open\nimport numpy\nimport re\nimport os\nfrom Cython.Build import cythonize\n\n# dirty but working (from POT)\n__version__ = re.search(\n r'__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', # It excludes inline comment too\n open('tslearn/__init__.py').read()).group(1)\n# The beautiful part is, I don't even need to check exceptions here.\n# If something messes up, let the build process fail noisy, BEFORE my release!\n\n# thanks Pipy for handling markdown now\nROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(ROOT, 'README.md'), encoding=\"utf-8\") as f:\n README = f.read()\n\nsetup(\n name=\"tslearn\",\n description=\"A machine learning toolkit dedicated to time-series data\",\n long_description=README,\n long_description_content_type='text/markdown',\n include_dirs=[numpy.get_include()],\n packages=['tslearn'],\n package_data={\"tslearn\": [\".cached_datasets/Trace.npz\"]},\n data_files=[(\"\", [\"LICENSE\"])],\n install_requires=['numpy', 'scipy', 'scikit-learn', 'Cython', 'numba'],\n extras_require={'tests': ['pytest']},\n ext_modules=cythonize(\"tslearn/*.pyx\", include_path=[numpy.get_include()]),\n version=__version__,\n url=\"http://tslearn.readthedocs.io/\",\n author=\"Romain Tavenard\",\n author_email=\"romain.tavenard@univ-rennes2.fr\"\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"296170102","text":"import os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nclass LoadData():\n def __init__(self):\n self.airflow_entry = \"/usr/local/airflow\"\n self.airflow_wd = './dags'\n self.local_wd = './dags'\n self.ingested_data = '/data/download_data.csv'\n self.prepared_train = '/data/prepared_train_data.csv'\n self.prepared_test = '/data/prepared_test_data.csv'\n self.target_column = 'quality'\n self.seed = 0\n self.test_size = 0.3\n \n def check_dir(self):\n if os.path.abspath(os.getcwd()) == self.airflow_entry:\n self.wd = self.airflow_wd\n else:\n self.wd = self.local_wd\n return self.wd\n\n def split_data(self):\n df = pd.read_csv(self.wd+self.ingested_data)\n y = df.pop(self.target_column)\n X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=self.test_size, random_state=self.seed)\n return X_train, X_test, y_train, y_test\n \n def get_data(self, datatype='both'):\n if datatype == 'train':\n df = pd.read_csv(self.wd+self.prepared_train)\n y = df.pop(self.target_column)\n x = df\n return x, y\n elif datatype == 'test':\n df = pd.read_csv(self.wd+self.prepared_test)\n y = df.pop(self.target_column)\n x = df\n return x, y\n elif datatype == 'both':\n df = pd.read_csv(self.wd+self.prepared_train)\n df_ = pd.read_csv(self.wd+self.prepared_test)\n y_train = df.pop(self.target_column)\n X_train = df\n y_test = df_.pop(self.target_column)\n X_test = df_\n return X_train, X_test, y_train, y_test\n","sub_path":"dags/scripts/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"295216511","text":"import requests\nfrom fastapi import FastAPI\n\napp = FastAPI()\n\n\n@app.get('/fetch/')\nasync def get_songs():\n song_resp = requests.get(\n 'http://ws.audioscrobbler.com/2.0/?method=chart.gettoptracks&api_key=fff670dc3c7f1b019ac5c1ae930e4ce4&format=json')\n songs = song_resp.json()['tracks']['track']\n newSongs = []\n for song in songs:\n # print(song)\n newSongs.append(\n {\n 'name': song['name'],\n 'artist': song['artist']['name'],\n 'playcount': song['playcount'],\n 'listeners': song['listeners'],\n 'url': song['url']\n }\n )\n\n return newSongs","sub_path":"music/fetcher/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"110561993","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n# @Time : 2018/8/30 15:19 \n# @Author : virus \n# @File : 2-2.py \n# @Desp : python\n\n# 输出所有形如aabb的4位完全平方数 7744问题(2)\nimport math\n\ni = 1\nwhile i > 0:\n n = i * i\n i += 1\n if n > 9999:\n break\n if n > 1000:\n hi = int(n/100)\n lo = int(n%100)\n if (int(hi / 10) == hi % 10) & (int(lo / 10) == lo % 10):\n print(n)\n else:\n continue\n\n","sub_path":"language/example/2-3.py","file_name":"2-3.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36671962","text":"import os\nimport shutil\nimport json\nimport pymake\n\nimport pytest\n\ncpth = os.path.abspath(os.path.join(\"temp\", \"t011\"))\n\n\ndef initialize_working_dir():\n # make sure the test directory exists\n os.makedirs(cpth, exist_ok=True)\n\n\ndef export_code_json():\n # make sure the test directory exists\n initialize_working_dir()\n\n # make the json file\n fpth = os.path.join(cpth, \"code.test.json\")\n pymake.usgs_program_data.export_json(\n fpth=fpth,\n current=True,\n write_markdown=True,\n verbose=True,\n )\n\n # check that the json file was made\n msg = \"did not make...{}\".format(fpth)\n assert os.path.isfile(fpth), msg\n\n return fpth\n\n\n@pytest.mark.requests\ndef test_usgsprograms():\n print(\"test_usgsprograms()\")\n upd = pymake.usgs_program_data().get_program_dict()\n\n all_keys = list(upd.keys())\n\n get_keys = pymake.usgs_program_data.get_keys()\n\n msg = \"the keys from program_dict are not equal to .get_keys()\"\n assert all_keys == get_keys, msg\n\n\n@pytest.mark.requests\ndef test_target_key_error():\n print(\"test_target_key_error()\")\n with pytest.raises(KeyError):\n pymake.usgs_program_data.get_target(\"error\")\n\n\n@pytest.mark.requests\ndef test_target_keys():\n print(\"test_target_keys()\")\n prog_dict = pymake.usgs_program_data().get_program_dict()\n targets = pymake.usgs_program_data.get_keys()\n for target in targets:\n target_dict = pymake.usgs_program_data.get_target(target)\n test_dict = prog_dict[target]\n\n msg = (\n \"dictionary from {} \".format(target)\n + \"does not match dictionary from .get_target()\"\n )\n assert target_dict == test_dict, msg\n\n\n@pytest.mark.requests\ndef test_usgsprograms_export_json():\n # export code.json and return json file path\n fpth = export_code_json()\n\n # test the json export\n with open(fpth, \"r\") as f:\n json_dict = json.load(f)\n json_keys = list(json_dict.keys())\n\n current_keys = pymake.usgs_program_data.get_keys(current=True)\n\n msg = \"the number of current keys is not equal to json keys\"\n assert len(json_keys) == len(current_keys), msg\n\n prog_dict = pymake.usgs_program_data().get_program_dict()\n for key, value in json_dict.items():\n temp_dict = prog_dict[key]\n # fill keys that are programmatically filled\n fill_keys = (\"url_download_asset_date\",)\n for fill_key in fill_keys:\n temp_dict[fill_key] = value[fill_key]\n msg = (\n \"json dictionary for {} key \".format(key)\n + \"is not equal to the .usgs_prog_data dictionary\"\n )\n assert value == temp_dict, msg\n\n\n@pytest.mark.requests\ndef test_usgsprograms_load_json_error():\n print(\"test_usgsprograms_load_json_error()\")\n\n initialize_working_dir()\n\n fpth = os.path.join(cpth, \"code.test.error.json\")\n my_dict = {\"mf2005\": {\"bad\": 12, \"key\": True}}\n pymake.usgs_program_data.export_json(\n fpth=fpth, prog_data=my_dict, update=False\n )\n\n with pytest.raises(KeyError):\n pymake.usgs_program_data.load_json(fpth=fpth)\n\n\n@pytest.mark.requests\ndef test_usgsprograms_load_json():\n print(\"test_usgsprograms_load_json()\")\n\n # export code.json and return json file path\n fpth = export_code_json()\n\n json_dict = pymake.usgs_program_data.load_json(fpth)\n\n # check that the json file was loaded\n msg = \"could not load {}\".format(fpth)\n assert json_dict is not None, msg\n\n\n@pytest.mark.requests\ndef test_usgsprograms_list_json_error():\n print(\"test_usgsprograms_list_json_error()\")\n\n # make sure the example directory exists\n initialize_working_dir()\n\n fpth = os.path.join(cpth, \"does.not.exist.json\")\n with pytest.raises(IOError):\n pymake.usgs_program_data.list_json(fpth=fpth)\n\n\n@pytest.mark.requests\ndef test_usgsprograms_list_json():\n print(\"test_usgsprograms_list_json()\")\n\n # export code.json and return json file path\n fpth = export_code_json()\n\n # list the contents of the json file\n pymake.usgs_program_data.list_json(fpth=fpth)\n\n\n@pytest.mark.requests\ndef test_shared():\n print(\"test_shared()\")\n target_dict = pymake.usgs_program_data.get_target(\"libmf6\")\n assert target_dict.shared_object, \"libmf6 is a shared object\"\n\n\n@pytest.mark.requests\ndef test_not_shared():\n print(\"test_not_shared()\")\n target_dict = pymake.usgs_program_data.get_target(\"mf6\")\n assert not target_dict.shared_object, \"mf6 is not a shared object\"\n\n\n@pytest.mark.requests\ndef test_clean_up():\n shutil.rmtree(cpth)\n\n\nif __name__ == \"__main__\":\n test_usgsprograms()\n test_target_key_error()\n test_target_keys()\n test_usgsprograms_export_json()\n test_usgsprograms_load_json_error()\n test_usgsprograms_load_json()\n test_usgsprograms_list_json_error()\n test_usgsprograms_list_json()\n test_shared()\n test_not_shared()\n test_clean_up()\n","sub_path":"autotest/t011_test.py","file_name":"t011_test.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"350676964","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom scrapy.exceptions import DropItem\nimport json\nimport re\n\nclass SohuPipeline(object):\n def __init__(self):\n self.file=open('sohu.jl','w')\n self.seen=set()\n def process_item(self, item, spider):\n if item['link'] in self.seen:\n raise DropItem('Duplicate link %s' % item['link'])\n self.seen.add(item['link'])\n line = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n self.file.write(line)\n return item\n","sub_path":"spider/sohu/sohu/pipelines_bk.py","file_name":"pipelines_bk.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"510238717","text":"from math import sqrt, cos, trunc, pi, sin\n\nprint(\"Введите следующие значения функции\")\ng1 = float(input(\"Начальное значение: \"))\nn = float(input(\"Шаг вычисления значений: \"))\ngn = float(input(\"Конечное значение: \"))\nprint()\n\n\"\"\"\ng1 = float(-12)\ngn = float(6)\nn = 1\n\"\"\"\n\nif gn <= g1 or n <= 0:\n print(\"Введеные значения некорректны\")\nelse:\n \n\n frange = trunc((gn - g1) / n + 1)\n\n g = g1\n\n a1 = []\n a2 = []\n a3 = []\n a4 = []\n\n line = \"-------------------------------------------------------------------\"\n \n print(\"| a1 | a2 | a3 | g |\")\n print(line)\n\n t = 0;\n\n #Вычисление значений трех функций\n for i in range(frange):\n a1.append(g**3 + 6.1 * g * g - 35.4 * g - 25.7)\n a2.append(g * g - cos(g*pi))\n #a3.append(sqrt(a1[i]**2 + a2[i]**2))\n a3.append(g*20)\n a4.append(20 * sin(g) - 80)\n a = []\n \n point_f = '| {:^15.6}'\n point_f *= 3\n\n #Построение таблицы значений\n\n ln = point_f.format(a1[i], a2[i], a3[i]) + '| {:^12.2f}'.format(g) + \" |\"\n print(ln)\n \n print(line)\n \n g += n\n\n a.append(a1)\n a.append(a2)\n a.append(a3)\n a.append(a4)\n \n print(\"\\nВведите через пробел номера графиков,\", end=\"\")\n print(\" которые будет напечатаны: \")\n\n #Выбор графика для печатиx\n \n fxn = 0\n\n fMin = min(a[0])\n fMax = max(a[0])\n \n for fx in a:\n if min(fx) < fMin:\n fMin = min(fx)\n if max(fx) > fMax:\n fMax = max(fx)\n\n _indent = 66\n\n if fMin != fMax:\n koef = (0 - fMin)/(fMax-fMin)\n zero_point = round(_indent * koef)\n else:\n zero_point = _indent // 2\n\n if (fMax != fMin) and (fMin > 0):\n axis = False\n else:\n axis = True\n\n \n g = g1\n\n _format = ' {:^6.2f} '\n\n k = 0\n\n print(\"Обозначения:\\n* - a1\\n@ - a2\\no - a3\\n+ - a4\\n\")\n print(' ' * (zero_point-10), \"График трех функций:\")\n print(' ' * (_indent+8), 'g')\n \n for i in range(frange):\n \n #Нахождение максимальн��го и минимального значений\n\n arr_pos = [0] * (_indent + 2)\n\n if axis:\n arr_pos[zero_point] = -1\n\n fxn += 1\n #print(fx)\n\n fxn = 0\n\n for j in range(len(a)):\n\n fxn += 1\n \n if fMin != fMax:\n koef = (a[j][i] - fMin)/(fMax - fMin)\n pos = round(_indent * koef)\n if pos == 0 and a[j][i] > 0:\n pos += 1\n elif a[j][i] > 0:\n #pos += 1\n k += 1\n \n else:\n if fMax < 0:\n pos = 16\n elif fMax == 0:\n pos = 33\n else:\n pos = 50\n\n arr_pos[pos] = fxn\n \n line = ' '\n for j in range(_indent+1):\n if arr_pos[j] == 0:\n line += ' '\n elif arr_pos[j] == -1:\n line += '|'\n elif arr_pos[j] == 1:\n line += '*'\n elif arr_pos[j] == 2:\n line += '@'\n elif arr_pos[j] == 3:\n line += 'o'\n elif arr_pos[j] == 4:\n line += '+'\n print(line, ' ', _format.format(g), sep='')\n g += n\n \n \n if axis:\n indent = zero_point\n print(\" \" * (indent), \"|\")\n print(\" \" * (indent), \"V\")\n print(\" \" * (indent), \"g\")\n\n \n\n \n print(\"\\nКоличество значений функции a\",\n \", которые больше нуля: \", k, sep=\"\")\n\n print()\n","sub_path":"lab_5_graphic.py","file_name":"lab_5_graphic.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"611514943","text":"import boto3\nimport json\n\ndef saveOrder():\n f = open(\"orders.json\", 'a')\n sqs = boto3.client('sqs')\n response = sqs.receive_message(\n QueueUrl='https://sqs.us-east-1.amazonaws.com/292274580527/cc406_team3'\n )\n \n # If the response is empty, close the file and return\n if response == None:\n f.close()\n return\n \n # Save json strings from response in orders.json and print them to system.out\n for order in response[\"Messages\"]:\n f.write(order[\"Body\"] + \"\\n\")\n print(order)\n f.close()\n\ndef readSQS():\n listOrders = []\n sqs = boto3.client('sqs')\n response = sqs.receive_message(\n QueueUrl='https://sqs.us-east-1.amazonaws.com/292274580527/cc406_team3',\n MaxNumberOfMessages=3\n )\n\n if response != None:\n for order in response[\"Messages\"]:\n json_order = json.loads(order[\"Body\"])\n json_order[\"ReceiptHandle\"] = order[\"ReceiptHandle\"]\n listOrders.append(json_order)\n\n return listOrders\n\ndef putSQS(message):\n sqs = boto3.resource('sqs')\n queue = sqs.get_queue_by_name(QueueName='cc406_response3')\n print(queue.url)\n response = queue.send_message(MessageBody=message)\n\ndef deleteSQS(receiptHandle):\n sqs = boto3.client('sqs')\n sqs.delete_message(QueueUrl='https://sqs.us-east-1.amazonaws.com/292274580527/cc406_team3', ReceiptHandle=receiptHandle)\n","sub_path":"SQS.py","file_name":"SQS.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"258650227","text":"\"\"\"\nCreated on Mon Nov 28 16:35:10 2016\n\n@author: Kyle\n\"\"\"\n\nimport sys\nfrom math import sqrt\nfrom math import floor\nimport csv\n\nclass state(object):\n \"\"\"\n Each class object represents one state, it is initialized with a name and population.\n From there it calculates its own priority and keeps track of its own number of reps.\n Call addrep() to increase reps by 1 and update priority.\n \"\"\"\n def __init__(self,name,pop):\n self.name=name\n self.pop=pop\n self.Priority=pop/sqrt(2)\n self.reps=1\n \n def addrep(self):\n self.Priority=sqrt(self.reps/(self.reps+2))*self.Priority\n self.reps+=1\n\n# Read data from csv file\nslist=[]\ninfile = sys.argv[1]\nwith open(infile, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n for row in csvreader:\n slist.append(state(row[0],int(row[1])))\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# I have included several potential methods to\n# determine the number of Representatives. \n# - - -\n# A: Set Number of Reps\n# If you want to set the number of reps, include the\n# desired number in the command line after the csv filename\n#\n# It will allocate the seats, if the number of seats is\n# less than the number of states each will still be given 1.\n# \n# ex:\n# >python3 apportion.py 2010.csv 500\n# - - -\n# B: Cube Root Rule\n# Have the entry in the command line after the csv file\n# be the word cube.\n#\n# The number of seats will be equal to the cube root of\n# the total population of all states combined rounded\n# to the nearest whole number.\n# \n# ex:\n# >python3 apportion.py 2010.csv cube\n# - - -\n# C: Wyoming Rule\n# Have the entry in the command line after the csv file\n# be the word Wyoming.\n#\n# The number of seats will be the total population divided\n# by the population of the smallest state (not necessarily Wyoming),\n# rounded to the nearest whole number.\n# \n# ex:\n# >python3 apportion.py 2010.csv wyoming\n# - - -\n# D: If it fails to detect one of these options it will\n# default to 435, the current size.\n# \n# ex:\n# >python3 apportion.py 2010.csv\n# # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n# Choose the number of reps to apportion\nif len(sys.argv)>2:\n method = sys.argv[2]\nelse:\n method = '435'\n\nif method.isdigit():\n nreps = int(method)\nelif method.lower() == \"cube\":\n nreps = round((sum(st.pop for st in slist))**(1/3.)-2*len(slist))\nelif method.lower() == \"wyoming\":\n slist.sort(key=lambda x: x.pop)\n nreps = round((sum(st.pop for st in slist)/slist[0].pop))\nelse:\n nreps = 435\n\n# create file to output to named Apportionment_{csv filename}.txt\nout = open('Apportionment_{}.txt'.format(infile[:-4]),'w')\n\n# Loop through the states, adding one representative at a time\n# until the desired number have been apportioned.\nprint('- '*10)\nprint('Last 10 Seats Given:')\nout.write('Order of Seats Given:\\n')\nfor i in range(nreps - len(slist)):\n slist.sort(key=lambda x: x.Priority,reverse=True)\n slist[0].addrep()\n out.write(slist[0].name+',')\n if(i > nreps - len(slist) - 11):\n print(slist[0].name,end=',') \nprint('\\n'+'- '*10)\nout.write('\\n'+'- '*10+'\\n')\n\n# Show state names in order of highest to lowest priority.\nprint('Priority of States After Last Seat Given:')\nout.write('Priority of States After Last Seat Given:\\n')\nslist.sort(key=lambda x: x.Priority,reverse=True) \nfor i in slist:\n print(i.name,end=',')\n out.write(i.name+',')\nprint('\\n'+'- '*10)\nout.write('\\n'+'- '*10+'\\n')\n\n# Get string lengths for formatting results\nslist.sort(key=lambda x: x.reps,reverse=True)\nreplen=len(str(slist[0].reps))\nslist.sort(key=lambda x: x.pop/x.reps,reverse=True)\npopreplen=len(str(int(slist[0].pop/slist[0].reps)))+3\n\n# Display results\nprint('Results:')\nout.write('Results:\\n')\nslist.sort(key=lambda x: x.pop,reverse=True)\nfor i in slist:\n temp_str =i.name+\": {0:{2}} Reps, {1:{3}.2f} pop/rep\".format(i.reps,i.pop/i.reps,replen,popreplen)\n print(temp_str)\n out.write(temp_str+'\\n')\n\n# Display total number of representatives\nprint('\\nRepresentatives apportioned: {}'.format(nreps),end='')\nout.write('\\nRepresentatives apportioned: {}'.format(nreps))\nout.close()","sub_path":"apportion.py","file_name":"apportion.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"357229089","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/electrum_chi/electrum/gui/qt/stylesheet_patcher.py\n# Compiled at: 2019-08-24 06:06:43\n# Size of source mod 2**32: 706 bytes\n\"\"\"This is used to patch the QApplication style sheet.\nIt reads the current stylesheet, appends our modifications and sets the new stylesheet.\n\"\"\"\nfrom PyQt5 import QtWidgets\n\ndef patch_qt_stylesheet(use_dark_theme: bool) -> None:\n if not use_dark_theme:\n return\n app = QtWidgets.QApplication.instance()\n style_sheet = app.styleSheet()\n style_sheet = style_sheet + '\\n /* PayToEdit text was being clipped */\\n QAbstractScrollArea {\\n padding: 0px;\\n }\\n /* In History tab, labels while edited were being clipped (Windows) */\\n QAbstractItemView QLineEdit {\\n padding: 0px;\\n show-decoration-selected: 1;\\n }\\n '\n app.setStyleSheet(style_sheet)","sub_path":"pycfiles/Electrum_CHI-3.3.8-py3.7/stylesheet_patcher.cpython-37.py","file_name":"stylesheet_patcher.cpython-37.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"569094969","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n# 时间复杂度O(m)+O(n)+O(max(m,n)),空间复杂度O(max(m,n))\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n i = 0\n num1 = 0\n while l1 != None:\n num1 += l1.val * 10 ** i\n i += 1\n l1 = l1.next\n j = 0\n num2 = 0\n while l2 != None:\n num2 += l2.val * 10 ** j\n j += 1\n l2 = l2.next\n num = num1 + num2\n node = ListNode(num % 10)\n cur = node\n num = num // 10\n while num != 0:\n node.next = ListNode(num % 10)\n num = num // 10\n node = node.next\n return cur\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n# 时间复杂度O(max(m,n)),空间复杂度O(max(m,n))\nclass Solution:\n # 直接对两个链表操作;对应节点上的值做加法运算,过10则进位。位数不够则补0\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n dummy_head = ListNode(0)\n cur = dummy_head\n # 定义两数相加是否大于10进位\n carry = 0\n while l1 or l2:\n # 因为l1与l2链表的长度可能不同,此时需要将空缺的位置设为0\n l1_val = l1.val if l1 else 0\n l2_val = l2.val if l2 else 0\n two_sum = l1_val + l2_val + carry\n if two_sum < 10:\n cur.next = ListNode(two_sum)\n carry = 0\n else:\n cur.next = ListNode(two_sum % 10)\n # carry只可能是0和1,(9+9+1)=19,最大进位为1\n carry = 1\n # carry = two_sum // 10\n cur = cur.next\n l1 = l1.next if l1 else None\n l2 = l2.next if l2 else None\n # 最后一步的进位\n if carry == 1:\n cur.next = ListNode(1)\n return dummy_head.next\n","sub_path":"题目分类/链表/add_two_numbers_2.py","file_name":"add_two_numbers_2.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"265060164","text":"import torch\r\nimport cfg\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\nnet=torch.load(cfg.save_path).to(device)\r\nwith open(cfg.word_path,encoding='UTF-8') as f:\r\n a=f.read().split()\r\ndicn2w=dict(zip(range(len(a)),a))\r\ndicw2n=dict(zip(a,range(len(a))))\r\n\r\ns='爸'\r\nx=torch.tensor([[]]).long()\r\nfor i in s:\r\n ss=dicw2n[i]\r\n x=torch.cat((x,torch.tensor([[int(ss)]])),1)\r\n\r\nx=x.to(device)\r\np = torch.range(0,len(s)-1).long().view(1,-1).to(device)\r\n\r\n#a=False\r\na=True\r\nfor i in range(512-len(s)):\r\n y = net(x, p)\r\n y = y[:, -1:]\r\n v, y = torch.topk(y, 8, dim=-1)\r\n v, y = v.reshape(-1, 8), y.reshape(-1, 8)\r\n v = torch.multinomial(torch.softmax(v, dim=-1), 1)\r\n y = torch.gather(y, -1, v)\r\n x = torch.cat([x, y], dim=1)\r\n p = torch.tensor([range(i+len(s)+1)]).to(device)\r\n\r\nz=x.view(-1).clone().cpu().numpy().tolist()\r\nfor i in z:\r\n d=dicn2w[i]\r\n if d == '[start]':\r\n a=True\r\n if a:\r\n if d== '[space]':\r\n d='\\n'\r\n print(d,end='')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"NLP/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"615270494","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Any, List, Optional\n\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.plugins.sweeper import Sweeper\nfrom hydra.types import TaskFunction\nfrom omegaconf import DictConfig\n\nfrom .config import SamplerConfig\n\n\nclass OptunaSweeper(Sweeper):\n \"\"\"Class to interface with Optuna\"\"\"\n\n def __init__(\n self,\n sampler: SamplerConfig,\n direction: Any,\n storage: Optional[str],\n study_name: Optional[str],\n n_trials: int,\n n_jobs: int,\n search_space: Optional[DictConfig],\n ) -> None:\n from ._impl import OptunaSweeperImpl\n\n self.sweeper = OptunaSweeperImpl(\n sampler, direction, storage, study_name, n_trials, n_jobs, search_space\n )\n\n def setup(\n self,\n config: DictConfig,\n config_loader: ConfigLoader,\n task_function: TaskFunction,\n ) -> None:\n self.sweeper.setup(config, config_loader, task_function)\n\n def sweep(self, arguments: List[str]) -> None:\n return self.sweeper.sweep(arguments)\n","sub_path":"plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/optuna_sweeper.py","file_name":"optuna_sweeper.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"395728645","text":"from hikka.decorators import auth_required, permission_required\nfrom hikka.services.permissions import PermissionService\nfrom hikka.services.descriptors import DescriptorService\nfrom werkzeug.datastructures import FileStorage\nfrom hikka.services.anime import AnimeService\nfrom hikka.services.files import FileService\nfrom hikka.services.teams import TeamService\nfrom hikka.services.users import UserService\nfrom hikka.tools.upload import UploadHelper\nfrom flask_restful import Resource\nfrom flask_restful import reqparse\nfrom hikka.errors import abort\nfrom flask import Response\nfrom flask import request\nfrom hikka import utils\n\nclass NewAnime(Resource):\n @auth_required\n @permission_required(\"global\", \"publishing\")\n def post(self):\n result = {\"error\": None, \"data\": {}}\n\n parser = reqparse.RequestParser()\n parser.add_argument(\"franchises\", type=list, default=[], location=\"json\")\n parser.add_argument(\"subtitles\", type=list, default=[], location=\"json\")\n parser.add_argument(\"voiceover\", type=list, default=[], location=\"json\")\n parser.add_argument(\"aliases\", type=list, default=[], location=\"json\")\n parser.add_argument(\"genres\", type=list, default=[], location=\"json\")\n parser.add_argument(\"description\", type=str, required=True)\n parser.add_argument(\"category\", type=str, required=True)\n parser.add_argument(\"title\", type=dict, required=True)\n parser.add_argument(\"team\", type=str, required=True)\n parser.add_argument(\"year\", type=int, required=True)\n parser.add_argument(\"state\", type=str, default=None)\n args = parser.parse_args()\n\n title_parser = reqparse.RequestParser()\n title_parser.add_argument(\"jp\", type=str, default=None, location=(\"title\",))\n title_parser.add_argument(\"ua\", type=str, location=(\"title\",))\n title_args = title_parser.parse_args(req=args)\n\n for alias in args[\"aliases\"]:\n if type(alias) is not str:\n return abort(\"general\", \"alias-invalid-type\")\n\n team = TeamService.get_by_slug(args[\"team\"])\n if team is None:\n return abort(\"team\", \"not-found\")\n\n if request.account not in team.members:\n return abort(\"account\", \"not-team-member\")\n\n if not PermissionService.check(request.account, \"global\", \"publishing\"):\n return abort(\"account\", \"permission\")\n\n category = DescriptorService.get_by_slug(\"category\", args[\"category\"])\n if category is None:\n return abort(\"category\", \"not-found\")\n\n state = DescriptorService.get_by_slug(\"state\", args[\"state\"])\n if state is None:\n return abort(\"state\", \"not-found\")\n\n if args[\"description\"] is None:\n return abort(\"general\", \"missing-field\")\n\n genres = []\n for slug in args[\"genres\"]:\n genre = DescriptorService.get_by_slug(\"genre\", slug)\n if genre is not None:\n genres.append(genre)\n else:\n return abort(\"genre\", \"not-found\")\n\n franchises = []\n for slug in args[\"franchises\"]:\n franchise = DescriptorService.get_by_slug(\"franchise\", slug)\n if franchise is not None:\n franchises.append(franchise)\n else:\n return abort(\"franchise\", \"not-found\")\n\n subtitles = []\n for username in args[\"subtitles\"]:\n subtitles_account = UserService.get_by_username(username)\n if subtitles_account is not None:\n subtitles.append(subtitles_account)\n\n else:\n return abort(\"account\", \"not-found\")\n\n voiceover = []\n for username in args[\"voiceover\"]:\n voiceover_account = UserService.get_by_username(username)\n if voiceover_account is not None:\n voiceover.append(voiceover_account)\n\n else:\n return abort(\"account\", \"not-found\")\n\n title = AnimeService.get_title(title_args[\"ua\"], title_args[\"jp\"])\n search = utils.create_search(title_args[\"ua\"], title_args[\"jp\"], args[\"aliases\"])\n slug = utils.create_slug(title_args[\"ua\"])\n\n anime = AnimeService.create(\n title,\n slug,\n args[\"description\"],\n args[\"year\"],\n search,\n category,\n state,\n genres,\n franchises,\n [team],\n subtitles,\n voiceover,\n args[\"aliases\"]\n )\n\n result[\"data\"] = anime.dict()\n return result\n\nclass Upload(Resource):\n @auth_required\n @permission_required(\"global\", \"publishing\")\n def put(self):\n result = {\"error\": None, \"data\": []}\n choices = (\"poster\", \"banner\")\n\n parser = reqparse.RequestParser()\n parser.add_argument(\"file\", type=FileStorage, location=\"files\")\n parser.add_argument(\"type\", type=str, choices=choices)\n parser.add_argument(\"slug\", type=str, required=True)\n args = parser.parse_args()\n\n anime = AnimeService.get_by_slug(args[\"slug\"])\n if anime is None:\n return abort(\"anime\", \"not-found\")\n\n if args[\"file\"] is not None:\n helper = UploadHelper(request.account, args[\"file\"], args[\"type\"])\n data = helper.upload_image()\n\n if type(data) is Response:\n return data\n\n if anime[args[\"type\"]] is not None:\n FileService.destroy(anime[args[\"type\"]])\n\n anime[args[\"type\"]] = data\n anime.save()\n\n result[\"data\"] = anime.dict()\n return result\n\nclass GetAnime(Resource):\n def get(self, slug):\n anime = AnimeService.get_by_slug(slug)\n if anime is None:\n return abort(\"anime\", \"not-found\")\n\n return anime.dict(True)\n\nclass Search(Resource):\n def post(self):\n result = {\"error\": None, \"data\": []}\n\n parser = reqparse.RequestParser()\n parser.add_argument(\"franchises\", type=list, default=[], location=\"json\")\n parser.add_argument(\"categories\", type=list, default=[], location=\"json\")\n parser.add_argument(\"states\", type=list, default=[], location=\"json\")\n parser.add_argument(\"genres\", type=list, default=[], location=\"json\")\n parser.add_argument(\"teams\", type=list, default=[], location=\"json\")\n parser.add_argument(\"query\", type=str, default=\"\")\n parser.add_argument(\"page\", type=int, default=0)\n parser.add_argument(\"year\", type=dict)\n args = parser.parse_args()\n\n year_parser = reqparse.RequestParser()\n year_parser.add_argument(\"min\", type=int, default=None, location=(\"year\",))\n year_parser.add_argument(\"max\", type=int, default=None, location=(\"year\",))\n year_args = year_parser.parse_args(req=args)\n\n query = utils.search_query(args[\"query\"])\n categories = []\n franchises = []\n genres = []\n states = []\n teams = []\n\n for slug in args[\"categories\"]:\n category = DescriptorService.get_by_slug(\"category\", slug)\n if category is not None:\n categories.append(category)\n else:\n return abort(\"category\", \"not-found\")\n\n for slug in args[\"genres\"]:\n genre = DescriptorService.get_by_slug(\"genre\", slug)\n if genre is not None:\n genres.append(genre)\n else:\n return abort(\"genre\", \"not-found\")\n\n for slug in args[\"franchises\"]:\n franchise = DescriptorService.get_by_slug(\"franchise\", slug)\n if franchise is not None:\n franchises.append(franchise)\n else:\n return abort(\"franchise\", \"not-found\")\n\n for slug in args[\"states\"]:\n state = DescriptorService.get_by_slug(\"state\", slug)\n if state is not None:\n genres.append(state)\n else:\n return abort(\"state\", \"not-found\")\n\n for slug in args[\"teams\"]:\n team = TeamService.get_by_slug(slug)\n if team is not None:\n teams.append(team)\n else:\n return abort(\"team\", \"not-found\")\n\n anime = AnimeService.search(\n query,\n year_args,\n categories,\n genres,\n franchises,\n states,\n teams,\n args[\"page\"]\n )\n\n for anime in anime:\n result[\"data\"].append(anime.dict())\n\n return result\n","sub_path":"hikka/modules/anime.py","file_name":"anime.py","file_ext":"py","file_size_in_byte":8575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"605760783","text":"\nimport argparse\nfrom datetime import datetime, timedelta\nimport json\nimport os\nimport requests\nimport sys\nimport time\nimport traceback\n\n# Using the grpc client in AzureML Accelerated Models SDK\nfrom azureml.accel import PredictionClient\n\n# The device connection string to authenticate the device with your IoT hub.\n# These environment variables are set in IoT Edge Module settings (see deployment_template.json)\nDEVICE_CONNECTION_STRING = os.environ.get(\"DEVICE_CONNECTION_STRING\")\nif DEVICE_CONNECTION_STRING:\n # Using the Python Device SDK for IoT Hub:\n # https://github.com/Azure/azure-iot-sdk-python\n # The sample connects to a device-specific MQTT endpoint on your IoT Hub.\n from iothub_client import IoTHubClient, IoTHubClientError, IoTHubTransportProvider, IoTHubClientResult\n from iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError, DeviceMethodReturnValue\n from iothub_service_client import IoTHubDeviceTwin, IoTHubError\n\n # Using the MQTT protocol.\n PROTOCOL = IoTHubTransportProvider.MQTT\n MESSAGE_TIMEOUT = 10000\n\ndef send_confirmation_callback(message, result, user_context):\n print ( \"IoT Hub responded to message with status: %s\" % (result) )\n\ndef send_iothub_message(iothub_client, msg):\n # Send result to IOT hub\n try:\n message = IoTHubMessage(msg)\n iothub_client.send_event_async(message, send_confirmation_callback, None)\n except IoTHubError as iothub_error:\n print ( \"Unexpected error %s from IoTHub\" % iothub_error )\n return\n except KeyboardInterrupt:\n print ( \"IoTHubClient sample stopped\" ) \n\ndef main(args):\n prediction_client = PredictionClient(args.address, args.port)\n if DEVICE_CONNECTION_STRING and not args.suppress_messages:\n iothub_client = IoTHubClient(DEVICE_CONNECTION_STRING, PROTOCOL)\n classes_entries = requests.get(\"https://raw.githubusercontent.com/Lasagne/Recipes/master/examples/resnet50/imagenet_classes.txt\").text.splitlines()\n\n while True:\n for image in os.listdir(args.image_dir):\n # score image\n try:\n start_time = time.time()\n results = prediction_client.score_file(path=os.path.join(args.image_dir, image), \n input_name=args.input_tensors, \n outputs=args.output_tensors)\n inference_time = (time.time() - start_time) * 1000\n # map results [class_id] => [confidence]\n results = enumerate(results)\n # sort results by confidence\n sorted_results = sorted(results, key=lambda x: x[1], reverse=True)\n top_result = sorted_results[0]\n msg_string = \"(%.0f ms) The image %s was classified as %s with confidence %s.\" % (inference_time, os.path.join(args.image_dir, image), \n classes_entries[top_result[0]], \n top_result[1])\n print(msg_string)\n except: \n tb = traceback.format_exc()\n if \"StatusCode.UNAVAILABLE\" in tb:\n msg_string = \"Unable to inference because AzureML host container is not done flashing the FPGA. If still not available in 5 minutes, check logs of module.\"\n elif \"Object reference not set to an instance of an object\" in tb:\n msg_string = \"Unable to inference because the names of the input and output tensors used for scoring are incorrect.\\n\" + \\\n \"Please update the docker CMD arguments to include the correct --input-tensors and --output-tensors parameters.\"\n else: \n msg_string = \"Unable to inference for unknown reason. See stack trace below:\\n{}\".format(tb)\n print(msg_string)\n print(\"Trying again in {} seconds...\".format(args.wait))\n \n if DEVICE_CONNECTION_STRING and not args.suppress_messages:\n send_iothub_message(iothub_client, msg_string)\n time.sleep(args.wait)\n\nif __name__ == \"__main__\":\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--image-dir\", type=str, default=\"./assets/\", dest='image_dir',\n help=\"The file path of the image to inference. Default: './assets/'\")\n parser.add_argument(\"-i\", \"--input-tensors\", type=str, default=\"Placeholder:0\", dest='input_tensors',\n help=\"The name of the input tensor you specified when converting your model.\\n\" + \\\n \"Default for Brainwave resnet50: 'Placeholder:0'\")\n parser.add_argument(\"-o\", \"--output-tensors\", type=str, default=\"classifier/resnet_v1_50/predictions/Softmax:0\", dest='output_tensors',\n help=\"The name of the output tensor you specified when converting your model. \\n\" + \\\n \"Default for Brainwave resnet50: 'classifier/resnet_v1_50/predictions/Softmax:0'\")\n parser.add_argument(\"-a\", \"--address\", default=\"azureml-host\",\n help=\"The address of the inferencing container. \\n\" +\n \"For IOT Edge, this is name of the inferencing host module on the IOT Edge device.\\n\" +\n \"Default: azureml-host\")\n parser.add_argument(\"-p\", \"--port\", default=50051,\n help=\"The port of the inferencing container. \\n\" +\n \"Default: 50051.\")\n parser.add_argument(\"-w\", \"--wait\", default=10, type=int,\n help=\"Time to wait between each inference call. \\n\" +\n \"Default: 10.\")\n parser.add_argument(\"-s\", \"--suppress-messages\", action='store_true', dest='suppress_messages',\n help=\"Flag to suppress IOT Hub messages. Default: False.\\n\" + \\\n \"Use --wait flag to lessen or this flag to turn off IOT hub messaging to avoid reaching your limit of IOT Hub messages.\")\n args = parser.parse_args()\n main(args)","sub_path":"deploy-to-databox-edge/sample-client/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"635320556","text":"import json\n\nimport click\nfrom fabric.api import *\n\nPROJECT_CONFIG_FILE = \"project_config.json\"\nPROJECT_PATH = '/var/www/mysite/'\n\n@task\ndef update_sys():\n sudo('apt-get update --fix-missing')\n sudo('apt-get upgrade -y ')\n\n\n@task\ndef deploy_site():\n sudo('mkdir -p {}'.format(PROJECT_PATH))\n local('git archive --format=tar HEAD > dist.tar')\n with cd(PROJECT_PATH):\n sudo('rm -rf *')\n put('dist.tar', 'dist.tar', use_sudo=True)\n sudo('tar vxf dist.tar')\n sudo('rm dist.tar')\n\n sudo('docker-compose pull')\n sudo('docker-compose up --force-recreate --detach')\n\n sudo('chown -R www-data:www-data .')\n local('rm dist.tar')\n\n\n@click.group()\ndef main():\n print('set host ...')\n with open(PROJECT_CONFIG_FILE) as fp:\n config = json.load(fp)\n env.hosts = config.get('hosts', [])\n env.key_filename = config.get('pem_file')\n\n\n@main.command()\n@click.argument('operation', type=click.Choice(['update']))\ndef server(operation):\n print('server')\n if operation == 'update':\n execute(update_sys)\n\n\n@main.command()\ndef deploy():\n execute(deploy_site)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"contl.py","file_name":"contl.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36842658","text":"# Importing required libraries\nimport datetime\nimport time\nimport hashlib\nimport json\n# (NOT USED RIGHT NOW) import requests # pip install requests\nimport pickle\nimport os\nimport calendar\n\nfrom django.http import JsonResponse, HttpResponse # pip install django\nfrom urllib.parse import urlparse\nfrom uuid import uuid4\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.data = []\n genesis_block = self.create_block(\n timestamp=time.ctime(), previous_hash='0', proof='0')\n self.chain.append(genesis_block)\n # (DISTRIBUTION NOT SET UP YET) self.nodes = set()\n\n def create_block(self, timestamp, previous_hash, proof):\n block = {'Index': len(self.chain) + 1,\n 'Timestamp': timestamp,\n 'Data': self.data,\n 'Previous_hash': previous_hash,\n 'Proof': proof}\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def mine_block(self):\n previous_block = self.get_previous_block()\n previous_hash = self.hash(previous_block)\n new_proof = 1\n check_proof = False\n while check_proof is False:\n timestamp = time.ctime()\n block = self.create_block(timestamp, previous_hash, new_proof)\n hash_val = self.hash(block)\n if hash_val[0:2] == '00':\n self.chain.append(block)\n self.data = []\n check_proof = True\n else:\n new_proof += 1\n return block\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def add_data_to_pool(self, data, timestamp):\n self.data.append({'Data': data,\n 'Timestamp': timestamp})\n\n\n'''\n def is_chain_valid(self, chain): # NEED TO CHANGE THIS ONE\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['Previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['Proof']\n proof = block['Proof']\n hash_operation = hashlib.sha256(\n str(proof**2 - previous_proof**2).encode()).hexdigest()\n if hash_operation[0:2] != '00':\n return False\n previous_block = block\n block_index += 1\n return True\n\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n\n def replace_chain(self):\n network = self.nodes\n longest_chain = None\n max_length = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n # If a chain longer than the current is found.\n # longest_chain would be None if this node already has the longest.\n self.chain = longest_chain\n return True\n return False\n'''\n\n\ndef save_chain(blockchain, blockchain_path):\n with open(blockchain_path, 'wb') as blockchain_file:\n pickle.dump(blockchain, blockchain_file)\n\n\ndef init_chain():\n blockchain_path = os.path.join(os.path.dirname(\n __file__), '..', 'app_files/blockchain.blockchain')\n if os.path.isfile(blockchain_path) and os.path.getsize(blockchain_path):\n with open(blockchain_path, 'rb') as blockchain_file:\n blockchain = pickle.load(blockchain_file)\n else:\n blockchain = Blockchain()\n save_chain(blockchain, blockchain_path)\n return blockchain, blockchain_path\n\n\ndef get_previous_block(request):\n try:\n blockchain, _ = init_chain()\n except:\n return HttpResponse(\"Corrupted chain.\", status=404)\n previous_block = blockchain.get_previous_block()\n response = {'Index': previous_block['Index'],\n 'Timestamp': previous_block['Timestamp'],\n 'Data': previous_block['Data'],\n 'Previous_hash': previous_block['Previous_hash'],\n 'Proof': previous_block['Proof']}\n return JsonResponse(response, status=200)\n\n\ndef mine_block(request):\n blockchain, blockchain_path = init_chain()\n if len(blockchain.data) < 2:\n return HttpResponse('You need to add more data to the pool before mining a block!', status=400)\n previous_block = blockchain.get_previous_block()\n current_time = calendar.timegm(time.strptime(time.ctime()))\n previous_time = calendar.timegm(time.strptime(previous_block['Timestamp']))\n if current_time - previous_time <= 1:\n return HttpResponse(\"You need to wait a couple of seconds between mining blocks.\", status=429)\n added_block = blockchain.mine_block()\n response = {'Index': added_block['Index'],\n 'Timestamp': added_block['Timestamp'],\n 'Data': added_block['Data'],\n 'Previous_hash': added_block['Previous_hash'],\n 'Proof': added_block['Proof']}\n save_chain(blockchain, blockchain_path)\n return JsonResponse(response, status=201)\n\n\ndef add_data_to_pool(request):\n blockchain, blockchain_path = init_chain()\n posted_data = request.POST\n textData = posted_data['textData']\n current_time = calendar.timegm(time.strptime(time.ctime()))\n if len(blockchain.data) >= 2:\n return HttpResponse('You need to mine a block before adding more data!', status=400)\n if len(blockchain.data) != 0:\n previous_time = calendar.timegm(\n time.strptime(blockchain.data[-1]['Timestamp']))\n if current_time - previous_time < 2:\n return HttpResponse('You need to wait a couple of seconds before adding more data.', status=429)\n if len(textData) == 0:\n return HttpResponse('No data entered.', status=400)\n blockchain.add_data_to_pool(textData, time.ctime())\n save_chain(blockchain, blockchain_path)\n return HttpResponse('Data succesfully added to pool.', status=201)\n\n\n'''\n# Getting the full Blockchain in Postman\n# @app.route('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'Chain': blockchain.chain,\n 'Length': len(blockchain.chain)}\n return JsonResponse(response)\n\n\n# Add new data to the blockchain pool\n# @app.route('/get_data', methods=['POST'])\ndef get_data():\n json = request.get_json()\n data_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in data_keys):\n return 'Some elements of the data are missing.', 400\n index = blockchain.add_data(\n json['sender'], json['receiver'], json['amount'])\n response = {'message': f'Transation will be added to block {index}.'}\n return JsonResponse(response)\n\n\n# Checking if the blockchain is valid\n# @app.route('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'Success, the Blockchain is valid.'}\n else:\n response = {'message': 'Error, the Blockchain is invalid.'}\n return JsonResponse(response)\n\n\n# Distributing the blockchain\n# Connecting new nodes\n# @app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No nodes.', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message': 'All nodes are now connected. The Calcon blockchain now contains the following nodes:',\n 'total_nodes': list(blockchain.nodes)}\n return JsonResponse(response)\n\n\n# Replacing the chain with the longest chain if needed\n# @app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message': 'The chain was replaced.',\n 'new_chain': blockchain.chain}\n else:\n response = {'message': 'The chain was not replaced',\n 'current_chain': blockchain.chain}\n return JsonResponse(response)\n'''\n","sub_path":"my_site/blockchain/app_functions/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":8415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"612712671","text":"__author__ = 'Gustavo'\n# Função: Retorna o fatorial de 10.\n\n# Carregando a função na memória.\ndef fatorial(n):\n f = 1\n while n > 0:\n f = f * n\n n -= 1\n return f\n# Invocando a função.\nfatorial(10)\n\nfor i in range(5):\n print(fatorial(i))\n","sub_path":"TWP200/TWP292.py","file_name":"TWP292.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"356044084","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\nimport pdb\nfrom torch.nn.parameter import Parameter\nimport numpy as np\nfrom .utils import LogAct, RecLogAct\n\n\nclass DMCell(nn.Module):\n\n def __init__(self,\n inp_num = 5,\n hid_num = 2,\n Je = 8.,\n Jm = -2,\n I0 = 0.0,\n dt = 1.,\n taus = 100.,\n gamma = 0.1,\n target_mode=\"x_target\",\n learning_rule = \"force\",\n activation = LogAct(),\n rec_activation = RecLogAct()):\n super().__init__()\n\n self.hid_num = hid_num\n self.inp_num = inp_num\n self.Je = Je\n self.Jm = Jm\n self.I0 = I0\n self.alpha = dt/taus\n self.gamma = gamma\n self.win = Parameter(torch.Tensor(hid_num,inp_num))\n self.wr = Parameter(torch.Tensor(hid_num,hid_num))\n self.act = activation\n self.rec_act = rec_activation\n self.learning_rule = learning_rule\n self.target_mode = target_mode\n\n self.init_weights()\n\n def init_weights(self):\n stdv = 1.0 / math.sqrt(self.hid_num)\n # stdv = 0.5\n if self.learning_rule == \"force\":\n self.win.data = torch.zeros((self.hid_num,self.inp_num))\n else:\n # self.win.data.uniform_(-stdv, stdv)\n self.win.data = torch.zeros((self.hid_num,self.inp_num))\n wr = np.ones((self.hid_num,self.hid_num))*self.Jm\n wr = wr+np.eye(self.hid_num)*self.Je - np.eye(self.hid_num)*self.Jm\n self.wr.data = torch.FloatTensor(wr)\n self.wr.requires_grad = False\n\n def apply_win(self,w):\n assert torch.Size(w.shape) == self.win.shape, \"w shape should be same, but got {}.format\"(w.shape)\n self.win.data = torch.FloatTensor(w)\n\n def forward(self,x,hid,y=None):\n \"\"\"\n learning_rule is \"force\" or \"bp\"\n \"\"\"\n if y is None:\n s = hid[0]\n # pdb.set_trace()\n rx = F.linear(x,self.win) + self.I0 + F.linear(s,self.wr)\n r = self.act(rx)\n s_new = s + self.alpha*(-s + (1.-s)*self.gamma*r)\n if self.target_mode == \"x_target\":\n return rx, (s_new,)\n else:\n return r, (s_new,)\n\n elif y is not None and self.learning_rule == \"force\":\n if self.target_mode == \"x_target\":\n y = y\n else:\n y = self.rec_act(y)\n batch_size = x.shape[0]\n s,P = hid\n rx = F.linear(x,self.win) + self.I0 + F.linear(s,self.wr)\n err = rx - y\n r = x\n\n k_fenmu = F.linear(r, P)\n rPr = torch.sum(k_fenmu * r, 1, True)\n\n k_fenzi = 1.0 /(1.0 + rPr)\n k = k_fenmu * k_fenzi\n\n kall = k[:,:,None].repeat(1, 1, self.hid_num)\n # kall = torch.repeat(k[:, :, None], (1, 1, self.hid_num))\n dw = -kall * err[:, None, :]\n self.win.copy_(self.win + torch.mean(dw, 0).transpose(1,0))\n\n # pdb.set_trace()\n P = P - F.linear(k.t(), k_fenmu.t())/batch_size\n #\n r = self.act(rx)\n s_new = s + self.alpha*(-s + (1.-s)*self.gamma*r)\n\n return err,r,(s_new, P)\n else:\n raise ValueError(\"No such inference or training configuration in the Decision Network !\")\n\n\n# class ForceCell(nn.Module):\n#\n# def __init__(self,\n# inp_num = 5,\n# hid_num = 5,\n# Je = 8.,\n# Jm = -2,\n# I0 = 0.0,\n# dt = 0.1,\n# taus = 1.,\n# gamma = 0.1,):\n# super().__init__()\n# self.hid_num = hid_num\n# self.Je = Je\n# self.Jm = Jm\n# self.I0 = I0\n# self.alpha = dt/taus\n# self.gamma = gamma\n# self.win = Parameter(torch.Tensor(hid_num,inp_num))\n# self.wr = Parameter(torch.Tensor(hid_num,hid_num))\n# self.act = activation\n# self.wr = Parameter(torch.Tensor(hid_num,hid_num))\n# self.init_w()\n#\n# def init_w(self):\n# wr = np.ones((self.hid_num,self.hid_num))*self.Jm\n# wr = wr*np.eye(self.Je/self.Jm)\n# self.wr.weight.data.constant_(torch.FloatTensor(wr))\n#\n# def forward(self,x,y,hid):\n# batch_size = x.shape[0]\n# P, Wout = hid\n# xout = F.linear(x,Wout) + self.I0 + F.linear(s,self.wr)\n# err = xout - y\n# r = x\n#\n# k_fenmu = F.linear(r, P)\n# rPr = torch.sum(k_fenmu * r, 1, True)\n#\n# k_fenzi = 1.0 /(1.0 + rPr)\n# k = k_fenmu * k_fenzi\n#\n# kall = torch.repeat(k[:, :, None], (1, 1, self.hid_num))\n# dw = -kall * err[:, None, :]\n#\n# Wout = Wout + np.mean(dw, 0)\n# P = P - backend.matmul(k.T, k_fenmu)/batch_size\n#\n# return err, (Wout, P)\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"lib/rnn/dm_rnn.py","file_name":"dm_rnn.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"31943603","text":"import unittest\n\nfrom xbasic.field import Field\nfrom xbasic.web.forms import Scope\n\n\nclass Form1(Scope):\n login_details = Field(Scope).extends(\n username=Field(str),\n password=Field(str),\n )\n\n account_details = Field(Scope).extends(\n first_name=Field(str),\n last_name=Field(str),\n age=Field(int),\n\n contact=Field(Scope).extends(\n email=Field(str),\n phone=Field(str),\n )\n )\n\n\n# document loader, data loader, document dumper, data dumper\n\nclass FormTests(unittest.TestCase):\n def test_sanity(self):\n f1 = Form1()\n\n\n self.assertIs(f1.parent, None)\n self.assertIs(f1.root, f1)\n\n self.assertIs(f1.login_details.parent, f1)\n self.assertIs(f1.account_details.contact.parent, f1.account_details)\n self.assertIs(f1.account_details.contact.root, f1)\n\n x = f1.login_details\n f1.login_details = {}\n self.assertIs(f1.login_details, x)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"xbasic/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"32365732","text":"\"\"\"\r\nВыполнил Петров Роман ивт 1.1\r\nЗадание 3.1\r\nРеализовать программу-игру «Угадай число»,\r\nв которой для вывода на экран информации использовать метод format. \r\n\"\"\"\r\nimport random\r\n\r\nk = random.randint(1,10)\r\nx = None\r\n \r\nprint('Угадайте число от 1 до 10: ')\r\n\r\nwhile True:\r\n x = int(input())\r\n if x == k:\r\n print('Ура, ты угадал число {}'.format(x))\r\n break\r\n print('Не угадал')\r\n\r\n","sub_path":"prog_3/Section 6/var/3.1.py","file_name":"3.1.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"403479989","text":"import random\nfrom abc import ABCMeta, abstractmethod\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\nclass AbstractSolution:\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def getFitness():\n pass\n\nclass Solution(AbstractSolution):\n def __init__(self, x, y):\n self.point = Point(x, y)\n self.fitness = 0\n\n def getFitness(self):\n fitness = 10*self.point.x + 5*self.point.y - self.point.x**2 - self.point.y**2\n return fitness\n\ndef cullTheHerd(herd):\n for i,x in enumerate(herd):\n x.fitness = x.getFitness()\n herd = sorted(herd, key=lambda bison:bison.fitness, reverse=True)\n chosen = herd[:10]\n for i,x in enumerate(chosen):\n print(i, x.point.x, x.point.y, x.fitness)\n return chosen\n\nherd = []\n\nfor i in range(0, 20):\n x = random.random() * 20 - 10\n y = random.random() * 20 - 10\n herd.append(Solution(x, y))\n\nsurvivors = cullTheHerd(herd)","sub_path":"evolution.py","file_name":"evolution.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"644737492","text":"# -*- coding: utf-8 -*-\n\nfrom client import Client\nimport csv\n\n\nclass ClientList():\n '''Реализует поведение списка клиентов.'''\n\n def __init__(self):\n '''Инициализировать объект класса.'''\n\n self.clients = []\n\n def __str__(self):\n '''Вернуть строковое представление списка клиентов.'''\n\n if len(self.clients) == 0:\n return 'Список клиентов пуст'\n\n clients = '{0}КЛИЕНТЫ{0}\\n'.format('-' * 12)\n for num, _client in enumerate(self.clients):\n clients += '{}. {}({})\\n'.format(num + 1, _client.name, _client.country)\n clients += '-' * 31\n return clients\n\n def add_client_to_list(self, new_client):\n '''Добавить клиента в список.'''\n\n if isinstance(new_client, Client):\n self.clients.append(new_client)\n else:\n raise TypeError('Не могу добавить в список объект класса {}.'\n .format(new_client.__class__))\n\n def load_clients_from_file(self, filename = 'settings/clients.csv'):\n '''Вернуть список клиентов, считанный из файла.'''\n\n with open(filename, 'r', encoding = 'cp1251') as f:\n reader = csv.reader(f)\n headers = next(reader)\n for row in reader:\n new_client = Client.convert_from_csv_string(row)\n self.add_client_to_list(new_client)\n","sub_path":"Scripts/client_list.py","file_name":"client_list.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"303183030","text":"# -*- coding: utf-8 -*-\n\n\"\"\"This module contains functions that provide summaries of the errors encountered while parsing a BEL script\"\"\"\n\nfrom collections import Counter, defaultdict\n\nfrom pybel.constants import ANNOTATIONS\nfrom pybel.parser.exc import *\nfrom pybel.struct.filters.edge_predicates import edge_has_annotation\nfrom pybel.struct.summary.node_summary import get_names_by_namespace, get_namespaces\nfrom ..utils import count_dict_values\n\n__all__ = [\n 'count_error_types',\n 'count_naked_names',\n 'get_naked_names',\n 'get_incorrect_names_by_namespace',\n 'get_incorrect_names',\n 'get_undefined_namespaces',\n 'get_undefined_namespace_names',\n 'calculate_incorrect_name_dict',\n 'calculate_error_by_annotation',\n 'group_errors',\n 'get_names_including_errors',\n 'get_names_including_errors_by_namespace',\n 'get_undefined_annotations',\n 'get_namespaces_with_incorrect_names',\n 'get_most_common_errors',\n]\n\n\ndef count_error_types(graph):\n \"\"\"Counts the occurrence of each type of error in a graph\n\n :param pybel.BELGraph graph: A BEL graph\n :return: A Counter of {error type: frequency}\n :rtype: collections.Counter\n \"\"\"\n return Counter(e.__class__.__name__ for _, _, e, _ in graph.warnings)\n\n\ndef _naked_names_iter(graph):\n \"\"\"Iterates over naked name warnings froma graph\n\n :param pybel.BELGraph graph: A BEL graph\n :rtype: iter[NakedNameWarning]\n \"\"\"\n for _, _, e, _ in graph.warnings:\n if isinstance(e, NakedNameWarning):\n yield e.name\n\n\ndef count_naked_names(graph):\n \"\"\"Counts the frequency of each naked name (names without namespaces)\n\n :param pybel.BELGraph graph: A BEL graph\n :return: A Counter from {name: frequency}\n :rtype: collections.Counter\n \"\"\"\n return Counter(_naked_names_iter(graph))\n\n\ndef get_naked_names(graph):\n \"\"\"Gets the set of naked names in the graph\n\n :param pybel.BELGraph graph: A BEL graph\n :rtype: set[str]\n \"\"\"\n return set(_naked_names_iter(graph))\n\n\ndef get_namespaces_with_incorrect_names(graph):\n \"\"\"Returns the set of all namespaces with incorrect names in the graph\n\n :param pybel.BELGraph graph: A BEL graph\n :rtype: set[str]\n \"\"\"\n return {\n e.namespace\n for _, _, e, _ in graph.warnings\n if isinstance(e, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning))\n }\n\n\ndef get_incorrect_names_by_namespace(graph, namespace):\n \"\"\"Returns the set of all incorrect names from the given namespace in the graph\n\n :param pybel.BELGraph graph: A BEL graph\n :param str namespace: The namespace to filter by\n :return: The set of all incorrect names from the given namespace in the graph\n :rtype: set[str]\n \"\"\"\n return {\n e.name\n for _, _, e, _ in graph.warnings\n if isinstance(e, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)) and e.namespace == namespace\n }\n\n\ndef get_incorrect_names(graph):\n \"\"\"Returns the dict of the sets of all incorrect names from the given namespace in the graph\n\n :param pybel.BELGraph graph: A BEL graph\n :return: The set of all incorrect names from the given namespace in the graph\n :rtype: dict[str,set[str]]\n \"\"\"\n return {\n namespace: get_incorrect_names_by_namespace(graph, namespace)\n for namespace in get_namespaces(graph)\n }\n\n\ndef get_undefined_namespaces(graph):\n \"\"\"Gets all namespaces that aren't actually defined\n \n :param pybel.BELGraph graph: A BEL graph\n :return: The set of all undefined namespaces\n :rtype: set[str]\n \"\"\"\n return {\n e.namespace\n for _, _, e, _ in graph.warnings\n if isinstance(e, UndefinedNamespaceWarning)\n }\n\n\ndef get_undefined_namespace_names(graph, namespace):\n \"\"\"Gets the names from a namespace that wasn't actually defined\n \n :param pybel.BELGraph graph: A BEL graph\n :param str namespace: The namespace to filter by\n :return: The set of all names from the undefined namespace\n :rtype: set[str]\n \"\"\"\n return {\n e.name\n for _, _, e, _ in graph.warnings\n if isinstance(e, UndefinedNamespaceWarning) and e.namespace == namespace\n }\n\n\ndef get_undefined_annotations(graph):\n \"\"\"Gets all annotations that aren't actually defined\n \n :param pybel.BELGraph graph: A BEL graph\n :return: The set of all undefined annotations\n :rtype: set[str]\n \"\"\"\n return {\n e.annotation\n for _, _, e, _ in graph.warnings\n if isinstance(e, UndefinedAnnotationWarning)\n }\n\n\n# FIXME need to change underlying definition and usage of this exception\ndef get_undefined_annotation_values(graph, annotation):\n \"\"\"Gets the values from an annotation that wasn't actually defined\n\n :param pybel.BELGraph graph: A BEL graph\n :param str annotation: The annotaton to filter by\n :return: The set of all values from the undefined annotation\n :rtype: set[str]\n \"\"\"\n raise NotImplementedError\n # return {e.value for _, _, e, _ in graph.warnings if isinstance(e, UndefinedAnnotationWarning) and e.annotation == annotation}\n\n\ndef calculate_incorrect_name_dict(graph):\n \"\"\"Groups all of the incorrect identifiers in a dict of {namespace: list of erroneous names}\n\n :param pybel.BELGraph graph: A BEL graph\n :return: A dictionary of {namespace: list of erroneous names}\n :rtype: dict[str, str]\n \"\"\"\n missing = defaultdict(list)\n\n for line_number, line, e, ctx in graph.warnings:\n if not isinstance(e, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)):\n continue\n missing[e.namespace].append(e.name)\n\n return dict(missing)\n\n\ndef calculate_error_by_annotation(graph, annotation):\n \"\"\"Groups the graph by a given annotation and builds lists of errors for each\n\n :param pybel.BELGraph graph: A BEL graph\n :param annotation: The annotation to group errors by\n :type annotation: str\n :return: A dictionary of {annotation value: list of errors}\n :rtype: dict[str, list[str]]\n \"\"\"\n results = defaultdict(list)\n\n for line_number, line, e, context in graph.warnings:\n if not context or not edge_has_annotation(context, annotation):\n continue\n\n values = context[ANNOTATIONS][annotation]\n\n if isinstance(values, str):\n results[values].append(e.__class__.__name__)\n elif isinstance(values, (set, tuple, list)):\n for value in values:\n results[value].append(e.__class__.__name__)\n\n return dict(results)\n\n\ndef group_errors(graph):\n \"\"\"Groups the errors together for analysis of the most frequent error\n\n :param pybel.BELGraph graph: A BEL graph\n :return: A dictionary of {error string: list of line numbers}\n :rtype: dict[str, list[int]]\n \"\"\"\n warning_summary = defaultdict(list)\n\n for ln, _, e, _ in graph.warnings:\n warning_summary[str(e)].append(ln)\n\n return dict(warning_summary)\n\n\ndef get_most_common_errors(graph, number=20):\n \"\"\"Gets the most common errors in a graph\n\n :param pybel.BELGraph graph:\n :param int number:\n :rtype: Counter\n \"\"\"\n return count_dict_values(group_errors(graph)).most_common(number)\n\n\ndef get_names_including_errors_by_namespace(graph, namespace):\n \"\"\"Takes the names from the graph in a given namespace (:func:`pybel.struct.summary.get_names_by_namespace`) and\n the erroneous names from the same namespace (:func:`get_incorrect_names_by_namespace`) and returns them together\n as a unioned set\n\n :param pybel.BELGraph graph: A BEL graph\n :param str namespace: The namespace to filter by\n :return: The set of all correct and incorrect names from the given namespace in the graph\n :rtype: set[str]\n \"\"\"\n return get_names_by_namespace(graph, namespace) | get_incorrect_names_by_namespace(graph, namespace)\n\n\ndef get_names_including_errors(graph):\n \"\"\"Takes the names from the graph in a given namespace and the erroneous names from the same namespace and returns\n them together as a unioned set\n\n :param pybel.BELGraph graph: A BEL graph\n :return: The dict of the sets of all correct and incorrect names from the given namespace in the graph\n :rtype: dict[str,set[str]]\n \"\"\"\n return {\n namespace: get_names_including_errors_by_namespace(graph, namespace)\n for namespace in get_namespaces(graph)\n }\n","sub_path":"src/pybel_tools/summary/error_summary.py","file_name":"error_summary.py","file_ext":"py","file_size_in_byte":8370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"265808296","text":"class Solution(object):\n def addBinary(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n a_num = int(a, 2)\n b_num = int(b, 2)\n return bin(a_num+b_num)[2:]","sub_path":"Python/66.py","file_name":"66.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"512105814","text":"import numpy as np\nimport pandas as pd\nfrom sys import argv\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv(argv[1])\nfig, ax = plt.subplots()\n\nplt.plot(data['epoch'], data['acc'], label=\"training\")\nplt.plot(data['epoch'], data['val_acc'], label=\"validation\")\nplt.xlabel(\"numper of epochs\")\nplt.ylabel(\"accuracy\")\nplt.legend(loc='lower right')\n\nplt.figure(2)\nplt.plot(data['epoch'], data['loss'], label=\"training\")\nplt.plot(data['epoch'], data['val_loss'], label=\"validation\")\nplt.xlabel(\"numper of epochs\")\nplt.ylabel(\"loss\")\nplt.legend()\n\n#plt.ylabel('some numbers')\n\n\nplt.show()\n","sub_path":"plot_learning_curves.py","file_name":"plot_learning_curves.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"12937466","text":"# -*- coding:utf-8 -*-\n# Author: xzq\n# Date: 2019-12-24 13:57\n\nfrom torchvision import transforms\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\nfrom PIL import Image\nimport cv2\nimport random\n\n# 使用ResNet18时需要尺寸为32x32\n# train_boarder = 32\ntrain_boarder = 112\n\n\ndef parse_line(line):\n \"\"\"\n 解析从txt文件中读取的每一行\n :param line:\n :return:\n \"\"\"\n line_parts = line.strip().split()\n img_name = line_parts[0]\n rect = list(map(int, list(map(float, line_parts[1:5]))))\n landmarks = list(map(float, line_parts[5:len(line_parts)]))\n return img_name, rect, landmarks\n\n\nclass FaceLandmarksDataset(Dataset):\n \"\"\"\n 自定义数据集\n \"\"\"\n def __init__(self, src_lines, phase, transform=None):\n \"\"\"\n :param src_lines: src_lines\n :param phase: whether we are training or not\n :param transform: data transform\n \"\"\"\n self.lines = src_lines\n self.phase = phase\n self.transform = transform\n\n def __len__(self):\n return len(self.lines)\n\n def __getitem__(self, idx):\n img_name, rect, landmarks = parse_line(self.lines[idx])\n # image\n img = Image.open(img_name).convert('L')\n img_crop = img.crop(tuple(rect))\n landmarks = np.array(landmarks).astype(np.float32)\n\n origin_width = rect[2] - rect[0]\n origin_height = rect[3] - rect[1]\n w_ratios = train_boarder / origin_width\n h_ratios = train_boarder / origin_height\n for k in range(0, len(landmarks), 2):\n landmarks[k] = round(landmarks[k] * w_ratios)\n landmarks[k + 1] = round(landmarks[k + 1] * h_ratios)\n\n sample = {'image': img_crop, 'landmarks': landmarks}\n sample = self.transform(sample)\n return sample\n\n\nclass ToTensor(object):\n \"\"\"\n 将ndarrays转换为张量Tensor\n 张量通道序列: N x C x H x W\n \"\"\"\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n image = np.expand_dims(image, axis=2)\n image = cv2.resize(image, (train_boarder, train_boarder))\n image = np.expand_dims(image, axis=2)\n # 使用ResNet18时需要将图像转为彩色图\n # image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)\n image = image.transpose((2, 0, 1))\n return {'image': torch.from_numpy(image), 'landmarks': torch.from_numpy(landmarks)}\n\n\ndef load_data(phase):\n \"\"\"\n 去除normalize\n :param phase:\n :return:\n \"\"\"\n data_file = phase + '.txt'\n with open(data_file) as f:\n lines = f.readlines()\n if phase == 'stage2_train':\n tsfm = transforms.Compose([\n ToTensor()\n ])\n else:\n tsfm = transforms.Compose([\n ToTensor()\n ])\n data_set = FaceLandmarksDataset(lines, phase, transform=tsfm)\n return data_set\n\n\ndef get_train_test_set():\n train_set = load_data('stage2_train')\n valid_set = load_data('stage2_test')\n return train_set, valid_set\n\n\nif __name__ == '__main__':\n train_sets = load_data('stage2_train')\n idx_test = random.randint(0, len(train_sets))\n sample_test = train_sets[idx_test]\n img_test = sample_test['image']\n # 将Tensor格式转换成OpenCV的图像格式\n img_test = img_test.numpy()\n # img_test = np.squeeze(img_test, axis=(1,))\n img_test = img_test.transpose((1, 2, 0))\n # 调用下面的cv2.circle时\n # 由于这里对img_test有数据操作,当传入circle函img_copy数是不连续的内存数据,\n # 而该函数输出的内存是连续的\n # 为了保证输入输出一致,这里调用copy()方法获取连续的内存数据img_copy\n img_copy = img_test.copy()\n landmarks_test = sample_test['landmarks']\n # 请画出人脸crop以及对应的landmarks\n # please complete your code under this blank\n for i in range(0, len(landmarks_test), 2):\n # 由于关键点坐标是相对于人脸矩形框的,绘制时需要调整\n center = (int(landmarks_test[i]), int(landmarks_test[i + 1]))\n cv2.circle(img_copy, center, 1, (255, 0, 0), -1)\n cv2.imshow(\"image\", img_copy)\n cv2.waitKey(0)\n","sub_path":"Detector/stage2_data.py","file_name":"stage2_data.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"396298765","text":"import typing\n\nfrom stests.core.orchestration import ExecutionContext\nfrom stests.core.utils import logger\nfrom stests.generators.meta import GENERATOR_MAP as MODULES\n\n\n\nclass WorkflowStep():\n \"\"\"A step with a phase of a broader workflow.\n \n \"\"\"\n def __init__(self, ctx: ExecutionContext, index: int, module):\n \"\"\"Constructor.\n \n \"\"\"\n # Workflow execution context information.\n self.ctx: ExecutionContext = ctx\n\n # Index within the set of phase steps.\n self.index: int = index\n\n # Flag indicating whether this is the last step within the phase.\n self.is_last: bool = False\n\n # Python module in which the step is declared.\n self.module = module\n\n # Execution error.\n self.error: typing.Union[str, Exception] = None\n\n # Execution result.\n self.result: typing.Union[None, typing.Callable] = None\n\n @property\n def description(self) -> str:\n return self.module.DESCRIPTION\n\n @property\n def has_verifer(self) -> bool:\n try:\n self.module.verify\n except AttributeError:\n return False\n else:\n return True\n\n @property\n def has_verifer_for_deploy(self) -> bool:\n try:\n self.module.verify_deploy\n except AttributeError:\n return False\n else:\n return True\n\n @property\n def label(self) -> str:\n return self.module.LABEL\n \n @property\n def is_async(self) -> bool: \n \"\"\"Is this effectively an asynchronous step - i.e. relies upon chain events to complete.\"\"\" \n return hasattr(self.module, \"verify_deploy\") \n\n\n def execute(self):\n \"\"\"Performs step execution.\n \n \"\"\"\n try:\n self.result = self.module.execute(self.ctx)\n except Exception as err:\n self.error = err\n\n\n def verify(self):\n \"\"\"Performs step verification.\n \n \"\"\"\n self.module.verify(self.ctx)\n\n\n def verify_deploy(self, dhash: str):\n \"\"\"Performs step deploy verification.\n \n \"\"\"\n self.module.verify_deploy(self.ctx, dhash)\n\n\nclass WorkflowPhase():\n \"\"\"A phase within a broader workflow.\n \n \"\"\"\n def __init__(self, ctx: ExecutionContext, index: int, module):\n \"\"\"Constructor.\n \n \"\"\"\n # Workflow execution context information.\n self.ctx: ExecutionContext = ctx\n\n # Index within the set of phases.\n self.index: int = index\n\n # Flag indicating whether this is the last phase within the workflow.\n self.is_last: bool = False\n\n # Python module in which the phase is declared.\n self.module = module\n\n # Associated steps.\n self.steps = [WorkflowStep(ctx, i, s) for i, s in enumerate(module.STEPS)]\n if self.steps:\n self.steps[-1].is_last = True\n\n\n def get_step(self, step_index: int) -> WorkflowStep:\n \"\"\"Returns a step within managed collection.\n \n \"\"\"\n return self.steps[step_index - 1]\n\n\nclass Workflow():\n \"\"\"A workflow executed in order to test a scenario.\n \n \"\"\"\n def __init__(self, ctx: ExecutionContext, module):\n \"\"\"Constructor.\n \n \"\"\"\n # Workflow execution context information.\n self.ctx: ExecutionContext = ctx\n\n # Python module in which the workflow is declared.\n self.module = module\n\n # Associated phases.\n self.phases = [WorkflowPhase(ctx, i, p) for i, p in enumerate(module.PHASES)]\n if self.phases:\n self.phases[-1].is_last = True\n\n\n @property\n def description(self):\n return self.module.DESCRIPTION\n\n @property\n def typeof(self):\n return self.module.TYPE\n\n \n def get_phase(self, phase_index: int) -> WorkflowPhase:\n \"\"\"Returns a phase within managed collection.\n \n \"\"\"\n return self.phases[phase_index - 1]\n\n\n def get_step(self, phase_index: int, step_index: int) -> WorkflowStep:\n \"\"\"Returns a step within managed collection.\n \n \"\"\"\n phase = self.get_phase(phase_index) \n\n return phase.get_step(step_index)\n\n\n @staticmethod\n def create(ctx: ExecutionContext):\n \"\"\"Simple factory method.\n \n :param ctx: Workflow execution context information.\n\n :returns: Workflow wrapper instance.\n\n \"\"\"\n try:\n MODULES[ctx.run_type]\n except KeyError:\n raise ValueError(f\"Unsupported workflow type: {ctx.run_type}\")\n else:\n return Workflow(ctx, MODULES[ctx.run_type])\n\n\n @staticmethod\n def get_phase_(ctx: ExecutionContext, phase_index: int) -> WorkflowPhase:\n \"\"\"Simple factory method.\n \n :param ctx: Workflow execution context information.\n\n :returns: Workflow wrapper instance.\n\n \"\"\"\n try:\n wflow = Workflow.create(ctx)\n except:\n return None\n else:\n return wflow.get_phase(phase_index)\n\n\n @staticmethod\n def get_phase_step(ctx: ExecutionContext, phase_index: int, step_index: int) -> WorkflowStep:\n \"\"\"Simple factory method.\n \n :param ctx: Workflow execution context information.\n\n :returns: Workflow wrapper instance.\n\n \"\"\"\n try:\n wflow = Workflow.create(ctx)\n except:\n return None\n else:\n return wflow.get_step(phase_index, step_index)\n","sub_path":"stests/orchestration/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"603564426","text":"import json\nimport os\n\ndef __init__(path):\n global datapath\n datapath = path\n target = open(str(path) + 'settings.json', 'w')\n config = \"\"\"\n {\n \\\"port\\\": 19132,\n \\\"name\\\": \\\"A Minecraft: PE Server\\\"\n }\n \"\"\"\n parsed = json.loads(config)\n target.write(json.dumps(parsed, indent=4, sort_keys=True))\n target.write(json.dumps(parsed, indent=4, sort_keys=True))\n\n\ndef generate_config():\n target = open(str(datapath) + 'settings.json', 'w')\n config = \"\"\"\n {\n \\\"port\\\": 19132,\n \\\"name\\\": \\\"A Minecraft: PE Server\\\"\n }\n \"\"\"\n parsed = json.loads(config)\n target.write(json.dumps(parsed, indent = 4, sort_keys = True))\n\ndef load_config():\n with open(datapath + 'settings.json', 'r') as handle:\n parsed = json.load(handle)\n return parsed\n\ndef getData():\n if os.path.isfile(datapath + 'settings.json'):\n return load_config()\n else:\n generate_config()\n return load_config()","sub_path":"trinket/serverconfig.py","file_name":"serverconfig.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"187200691","text":"\"\"\"\nVarious containers.\n\"\"\"\n\n\ndef recursion_lock(retval=\"\", lock_name=\"__recursion_lock__\"):\n def decorator(func):\n def wrapper(self, *args, **kw):\n if getattr(self, lock_name, False):\n return retval\n setattr(self, lock_name, True)\n try:\n return func(self, *args, **kw)\n finally:\n delattr(self, lock_name)\n\n wrapper.__name__ = func.__name__\n return wrapper\n\n return decorator\n\n\nclass Container(dict):\n r\"\"\"\n Generic ordered dictionary that allows both key and attribute access.\n\n Containers are dictionaries, translating attribute access into key access, and preserving key order. Also they use call method to add keys, because **kw does not preserve order.\n\n Structs parse return containers, becuase their fields have order.\n\n Example::\n\n Container([(\"name\",\"anonymous\"),(\"age\",21)])\n \n Container(name=\"anonymous\")(age=21)\n\n # This is NOT correct because keyword arguments order is not preserved.\n Container(name=\"anonymous\",age=21)\n\n Container(container2tocopy)\n \"\"\"\n __slots__ = [\"__keys_order__\",\"__recursion_lock__\"]\n\n def __init__(self, *args, **kw):\n object.__setattr__(self, \"__keys_order__\", [])\n if isinstance(args, dict):\n for k, v in args.items():\n self[k] = v\n return\n for arg in args:\n if isinstance(arg, dict):\n for k, v in arg.items():\n self[k] = v\n else:\n for k, v in arg:\n self[k] = v\n for k, v in kw.items():\n self[k] = v\n\n def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)\n\n def __setitem__(self, key, val):\n if key not in self:\n self.__keys_order__.append(key)\n dict.__setitem__(self, key, val)\n\n def __delitem__(self, key):\n dict.__delitem__(self, key)\n self.__keys_order__.remove(key)\n\n __delattr__ = __delitem__\n __setattr__ = __setitem__\n\n def __call__(self, **kw):\n \"\"\"\n Chains adding new entries to the same container. See ctor.\n \"\"\"\n for k,v in kw.items():\n self.__setitem__(k, v)\n return self\n\n def clear(self):\n dict.clear(self)\n del self.__keys_order__[:]\n\n def pop(self, key, *default):\n \"\"\"\n Removes and returns the value for a given key, raises KeyError if not found.\n \"\"\"\n val = dict.pop(self, key, *default)\n self.__keys_order__.remove(key)\n return val\n\n def popitem(self):\n \"\"\"\n Removes and returns the last key and value from order.\n \"\"\"\n k = self.__keys_order__.pop()\n v = dict.pop(k)\n return k, v\n\n def update(self, seqordict, **kw):\n if isinstance(seqordict, dict):\n for k, v in seqordict.items():\n self[k] = v\n else:\n for k, v in seqordict:\n self[k] = v\n dict.update(self, kw)\n\n def copy(self):\n return Container(self.iteritems())\n\n __update__ = update\n __copy__ = copy\n\n def iterkeys(self):\n return iter(self.__keys_order__)\n\n def itervalues(self):\n return (self[k] for k in self.__keys_order__)\n\n def iteritems(self):\n return ((k, self[k]) for k in self.__keys_order__)\n\n def keys(self):\n return self.__keys_order__\n\n def values(self):\n return list(self.itervalues())\n\n def items(self):\n return list(self.iteritems())\n\n __iter__ = iterkeys\n\n def __eq__(self, other, skiporder=False):\n if not isinstance(other, dict):\n return False\n if len(self) != len(other):\n return False\n if skiporder:\n for k,v in self.iteritems():\n if k not in other or v != other[k]:\n return False\n else:\n for (k,v),(k2,v2) in zip(self.iteritems(), other.iteritems()):\n if k != k2 or v != v2:\n return False\n return True\n\n def __ne__(self, other, skiporder=False):\n return not self.__eq__(other, skiporder)\n\n def _search(self, name, search_all):\n items = []\n for key in self.keys():\n try:\n if key == name:\n if search_all:\n items.append(self[key])\n else:\n return self[key]\n if type(self[key]) == Container or type(self[key]) == ListContainer:\n ret = self[key]._search(name, search_all)\n if ret is not None:\n if search_all:\n items.extend(ret)\n else:\n return ret\n except:\n pass\n if search_all:\n return items\n else:\n return None\n\n def search(self, name):\n return self._search(name, False)\n\n def search_all(self, name):\n return self._search(name, True)\n\n @recursion_lock()\n def __repr__(self):\n parts = [\"Container\"]\n for k,v in self.iteritems():\n if not k.startswith(\"_\"):\n parts.extend([\"(\",str(k),\"=\",repr(v),\")\"])\n if len(parts) == 1:\n parts.append(\"()\")\n return \"\".join(parts)\n\n @recursion_lock()\n def __str__(self, indentation=\"\\n \"):\n text = [\"Container: \"]\n for k,v in self.iteritems():\n if not k.startswith(\"_\"):\n text.extend([indentation, k, \" = \"])\n text.append(indentation.join(str(v).split(\"\\n\")))\n return \"\".join(text)\n\n\nclass FlagsContainer(Container):\n r\"\"\"\n A container providing pretty-printing for flags.\n\n Only set flags are displayed.\n \"\"\"\n\n def __eq__(self, other, skiporder=True):\n return super(FlagsContainer, self).__eq__(other, skiporder)\n\n def __ne__(self, other, skiporder=True):\n return not self.__eq__(other, skiporder)\n\n @recursion_lock()\n def __str__(self, indentation=\"\\n \"):\n text = [\"FlagsContainer: \"]\n for k,v in self.iteritems():\n if not k.startswith(\"_\") and v:\n text.extend([indentation, k, \" = \"])\n lines = str(v).split(\"\\n\")\n text.append(indentation.join(lines))\n return \"\".join(text)\n\n\nclass ListContainer(list):\n r\"\"\"\n A container for lists.\n \"\"\"\n\n @recursion_lock()\n def __str__(self, indentation=\"\\n \"):\n text = [\"ListContainer: \"]\n for k in self:\n text.extend([indentation])\n lines = str(k).split(\"\\n\")\n text.append(indentation.join(lines))\n return \"\".join(text)\n\n def _search(self, name, search_all):\n items = []\n for item in self:\n try:\n ret = item._search(name, search_all)\n except:\n continue\n if ret is not None:\n if search_all:\n items.extend(ret)\n else:\n return ret\n if search_all:\n return items\n else:\n return None\n\n def search(self, name):\n return self._search(name, False)\n\n def search_all(self, name):\n return self._search(name, True)\n\n\nclass LazyContainer(object):\n __slots__ = [\"subcon\", \"stream\", \"pos\", \"context\", \"_value\"]\n\n def __init__(self, subcon, stream, pos, context):\n self.subcon = subcon\n self.stream = stream\n self.pos = pos\n self.context = context\n self._value = NotImplemented\n\n def __eq__(self, other):\n try:\n return self._value == other._value\n except AttributeError:\n return False\n\n def __ne__(self, other):\n return not (self == other)\n\n def __str__(self):\n if self._value is NotImplemented:\n text = \"\"\n else:\n text = str(self._value)\n return \"%s: %s\" % (self.__class__.__name__, text)\n\n def read(self):\n self.stream.seek(self.pos)\n return self.subcon._parse(self.stream, self.context)\n\n def dispose(self):\n self.subcon = None\n self.stream = None\n self.context = None\n self.pos = None\n\n def _get_value(self):\n if self._value is NotImplemented:\n self._value = self.read()\n return self._value\n\n value = property(_get_value)\n\n has_value = property(lambda self: self._value is not NotImplemented)\n\n","sub_path":"construct/lib/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":8683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"206822994","text":"import keras\nimport math\nimport time\nimport numpy as np\nfrom keras.datasets import cifar10\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import Conv2D, Dense, Input, add, Activation, GlobalAveragePooling2D, multiply, Reshape\nfrom keras.layers import Lambda, concatenate\nfrom keras.initializers import he_normal\nfrom keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint\nfrom keras.models import Model\nfrom keras import optimizers\nfrom keras.utils import multi_gpu_model\nfrom keras import regularizers\nfrom keras.applications.vgg19 import VGG19\n\n\n\nnum_classes = 10\nbatch_size = 120 # 120 \niterations = 782 # 416 # total data / iterations = batch size\nepochs = 250\nepochs1 = 10\n\nmean = [125.307, 122.95, 113.865]\nstd = [62.9932, 62.0887, 66.7048]\n\nfrom keras import backend as K\nif('tensorflow' == K.backend()):\n import tensorflow as tf\n from keras.backend.tensorflow_backend import set_session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):\n # Load the raw CIFAR-10 data\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n x_val = x_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n x_train = x_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n x_test = x_test[mask]\n y_test = y_test[mask]\n\n return x_train, y_train, x_val, y_val, x_test, y_test\n \ndef scheduler(epoch):\n if epoch < 100:\n return 0.1\n if epoch < 225:\n return 0.01\n return 0.001\n\nif __name__ == '__main__':\n\n # load data\n # (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n x_train, y_train, x_val, y_val, x_test, y_test = get_CIFAR10_data()\n \n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_val = keras.utils.to_categorical(y_val, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n x_train = x_train.astype('float32')\n x_val = x_val.astype('float32')\n x_test = x_test.astype('float32')\n \n # - mean / std\n for i in range(3):\n x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i]) / std[i]\n x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i]) / std[i]\n x_val[:,:,:,i] = (x_val[:,:,:,i] - mean[i]) / std[i]\n\n\n # print('Train data shape before: ', x_train.shape)\n # print('Validation data shape before: ', x_val.shape)\n # print('Test data shape before: ', x_test.shape)\n # print('Type train: ', type(x_train))\n \n # x_train = tf.image.resize_images(x_train, [96, 96], method=0).eval(session = sess)\n # x_test = tf.image.resize_images(x_test, [96, 96], method=0).eval(session = sess)\n # x_val = tf.image.resize_images(x_val, [96, 96], method=0).eval(session = sess)\n \n # print('Train data shape: ', x_train.shape)\n # print('Validation data shape: ', x_val.shape)\n # print('Test data shape: ', x_test.shape)\n # print('Type train: ', type(x_train))\n # setting input pic\n input_img = Input(shape=(32, 32, 3)) \n # create the base pre-trained model\n base_model = VGG19(input_tensor=input_img, weights='imagenet', include_top=False)\n\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n # let's add a fully-connected layer\n x = Dense(1024, activation='relu')(x)\n # and a logistic layer -- let's say we have 200 classes\n predictions = Dense(10, activation='softmax')(x)\n\n # this is the model we will train\n model = Model(inputs=base_model.input, outputs=predictions)\n\n print(model.summary())\n\n # first: train only the top layers (which were randomly initialized)\n # i.e. freeze all convolutional InceptionV3 layers\n for layer in base_model.layers:\n layer.trainable = False\n\n # set optimizer\n parallel_model = multi_gpu_model(model, gpus=2)\n sgd = optimizers.SGD(lr=0.0001, momentum=0.9, nesterov=True)\n parallel_model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n \n\n # set callback\n tb_cb = TensorBoard(log_dir='./VGG19/', histogram_freq=0) # tensorboard log\n change_lr = LearningRateScheduler(scheduler) # learning rate scheduler\n ckpt = ModelCheckpoint('./ckpt_vgg.{epoch:02d}-{val_acc:.4f}.h5', save_best_only=True, mode='auto', period=25) # checkpoint \n cbks = [change_lr, tb_cb] \n\n # set data augmentation\n print('Using real-time data augmentation.')\n\n datagen = ImageDataGenerator(horizontal_flip=True,\n width_shift_range=0.125,height_shift_range=0.125,fill_mode='reflect')\n datagen.fit(x_train)\n\n # start training\n start = time.time()\n # parallel_model.fit(x_train, y_train,\n # epochs=epochs,steps_per_epoch=iterations, callbacks=cbks,\n # validation_data=(x_val, y_val),validation_steps=50)\n parallel_model.fit_generator(datagen.flow(x_train, y_train,batch_size=batch_size), steps_per_epoch=iterations, epochs=epochs, callbacks=cbks,validation_data=(x_val, y_val))\n\n loss, accuracy = parallel_model.evaluate(x_test,y_test)\n print('\\ntest loss',loss)\n print('accuracy',accuracy)\n end = time.time()\n print('transfer learning time',end-start) \n model.save('transfer_VGG19.h5')\n\n\n # let's visualize layer names and layer indices to see how many layers\n # we should freeze:\n for i, layer in enumerate(base_model.layers):\n print(i, layer.name)\n\n # we chose to train the top 2 inception blocks, i.e. we will freeze\n # the first 249 layers and unfreeze the rest:\n for layer in model.layers[:block3_pool ]:\n layer.trainable = False\n for layer in model.layers[block3_pool:]:\n layer.trainable = True\n\n # set optimizer\n # we need to recompile the model for these modifications to take effect\n # we use SGD with a low learning rate\n\n sgd = optimizers.SGD(lr=0.0001, momentum=0.9, nesterov=True)\n parallel_model = multi_gpu_model(model, gpus=2)\n parallel_model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n \n\n # set callback\n tb_cb = TensorBoard(log_dir='./VGG19_finetune/', histogram_freq=0) # tensorboard log\n # change_lr = LearningRateScheduler(scheduler) # learning rate scheduler\n # ckpt = ModelCheckpoint('./ckpt_inception.h5', save_best_only=True, mode='auto', period=1) # checkpoint \n cbks = [tb_cb] \n\n # set data augmentation\n print('Using real-time data augmentation.')\n\n datagen = ImageDataGenerator(horizontal_flip=True,\n width_shift_range=0.125,height_shift_range=0.125,fill_mode='reflect')\n datagen.fit(x_train)\n\n # start training\n start = time.time()\n # parallel_model.fit(x_train, y_train,\n # epochs=epochs,steps_per_epoch=iterations, callbacks=cbks,\n # validation_data=(x_val, y_val),validation_steps=50)\n parallel_model.fit_generator(datagen.flow(x_train, y_train,batch_size=batch_size), steps_per_epoch=iterations, epochs=epochs1, callbacks=cbks,validation_data=(x_val, y_val))\n\n loss, accuracy = parallel_model.evaluate(x_test,y_test)\n print('\\ntest loss',loss)\n print('accuracy',accuracy)\n end = time.time()\n print('fine tune time',end-start) \n senet.save('finetune_VGG19.h5')\n\n\n\n \n","sub_path":"4.Transfer_learning/VGG19.py","file_name":"VGG19.py","file_ext":"py","file_size_in_byte":7750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"100108778","text":"import os\nimport random\nimport re\nimport string\n\n\ndef create_file(file_name, directory, size):\n regex = r\"(\\d+)([a-zA-Z]*)\"\n value, suffix = re.findall(regex, size)[0]\n suffix_to_bytes = {\n \"\": 1,\n \"B\": 1,\n \"KB\": 1024,\n \"MB\": 1048576,\n \"GB\": 1073741824,\n \"TB\": 1099511627776\n }\n size_in_bytes = int(value) * suffix_to_bytes[suffix.upper()]\n alphanumeric_characters = string.ascii_letters + string.digits\n token = \"\".join(\n random.choice(alphanumeric_characters) for _ in range(size_in_bytes)\n )\n file_path = os.path.join(directory, file_name)\n file_handle = open(file_path, \"w\")\n file_handle.write(token)\n\n\nif __name__ == \"__main__\":\n create_file(\"test1.txt\", \"/home/victor/\", \"2KB\")\n create_file(\"test2.txt\", \"/home/victor/\", \"10KB\")\n create_file(\"test3.txt\", \"/home/victor/\", \"1024\")\n create_file(\"test4.txt\", \"/home/victor/\", \"2MB\")\n create_file(\"test5.txt\", \"/home/victor/\", \"1B\")\n","sub_path":"lesson_1/homework/pep8_me.py","file_name":"pep8_me.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"239116246","text":"\"\"\"\nScript for reducing Effelsberg data\n\"\"\"\nimport astropy.units as u\nfrom baseband import vdif\nfrom astropy.time import Time, TimeDelta\nimport numpy as np\nimport sys\nimport folding_pipeline as sr\nfrom baseband_tasks.shaping import Reshape\nfrom baseband_tasks.io import hdf5\nimport traceback\nimport time\n\n# OS Things\nfdir = '/mnt/scratch-lustre/fsyed/B1133+16/Analysis2020/gk049e/baseband_data/ef/'\nfname = sys.argv[1]\noutput_name = '/mnt/scratch-lustre/fsyed/B1133+16/Analysis2020/gk049e/numpy_arrays/ef/' + fname[:-5]\nprint(\"Output File Name: {}\".format(output_name))\n\n# Load Data\nfrequency = np.array([[332.00], [332.00]]) * u.MHz\n\n# NOTE: Sideband should be [[-1,-1], [1,1]] according to .vex file. Data did not turn our great however so I am looking into it. \nsideband = np.array([[1, 1], [-1, -1]])\n\npolarization = ['R', 'L'] # Right circular polarization & left circular polarization\ndispersion_measure = 4.84066 * u.pc / u.cm**3\npolyco_file = '/mnt/scratch-lustre/fsyed/B1133+16/Analysis2020/gk049e/polycos/ef/polyco_new.dat'\nfullpol = False\nprint(\"Parameters set\")\n\n# Creating stream reader. For other formats, such as vdif, use vdif.open(...)\nfh = vdif.open(fdir + fname, 'rs')\nrh = Reshape(fh, (2, 2))\ndt = TimeDelta(10, format='sec')\n\n# Rounding Time\nstart = Time(rh.time)\nstart_time_str = start.iso.__str__()\nnew_time = Time(start_time_str, precision = -1)\nnew_time_str = new_time.iso.__str__()\nstart_time = Time(new_time_str) + dt\nprint(\"Opened stream reader with sample shape:\", rh.sample_shape)\nprint(\"Starting at time:\", start_time)\n\n# Initial waterfall interpretor\nWF = sr.Fold(rh, dispersion_measure, frequency, sideband, polyco_file, polarization, fullpol,start=start_time, nthreads=1)\nprint(\"Initialized waterfall interpretor with shape:\", WF.integrator.shape)\n\n# EXPERIMENTAL: Create stream writer.\nh5w = hdf5.open(\"/mnt/scratch-lustre/fsyed/B1133+16/Analysis2020/gk049e/hdf5_files/ef/\" + fname[:-5] + \".hdf5\", 'w', template=WF.integrator)\nprint(\"Output File name: \" + \"/mnt/scratch-lustre/fsyed/B1133+16/Analysis2020/gk049e/hdf5_files/ef/\" + fname[:-5] + \".hdf5\")\n\n# Determine how many samples to output at a time. I reccomend 1.\nnsamples = WF.integrator.shape[0]\nnsamples_per_output = 1\ntimes = []\n\n# Start the timer\nprint(\"Starting timer\")\nruntime_start = time.time()\n\n# Loop through integrator, creating one time bin at a time\ntry:\n\n while WF.integrator.tell() < nsamples - nsamples_per_output:\n # OUTPUT to hdf5 file\n current_time = WF.integrate_and_save(count=nsamples_per_output, output=h5w)\n print(\"Run-time so far: \", time.time() - runtime_start)\n print()\n\n # Get the run-time\n runtime_end = time.time()\n runtime = runtime_end - runtime_start\n print(\"Run-Time For Program : {}\".format(runtime))\n\n # Save File\n h5w.close()\n\nexcept:\n print(\"Something went wrong. Likely, you inputted noise or too small of a sample set\")\n print(traceback.format_exc())\n print(sys.exc_info()[0])\n\n # Get the run-time\n runtime_end = time.time()\n runtime = runtime_end - runtime_start\n print(\"Run-Time For Program : {}\".format(runtime))\n\n # Same File\n h5w.close()\n","sub_path":"ef_fold.py","file_name":"ef_fold.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"322924485","text":"from Point import Point\nimport math\n\n\nclass ConvexPolygon(object):\n \"\"\"\n This class represents a convex Polygon in 2-d space\n \"\"\"\n def __init__(self, *args):\n \"\"\"\n This function initializes the ConvexPolygon class\n It sets the points in an ordered position from leftmost\n to rightmost.\n :param args: These are the points of the convex polygon\n Assumption:The points will be provided in the counter-clockwise\n direction\n \"\"\"\n self.points = []\n self.upperchain = []\n self.lowerchain = []\n # finding the min and max x values help us to find the\n # leftmost and rightmost points so we can cut the convex polygon\n # into upper and lower parts\n if len(args) < 3:\n raise ValueError('Polygon must have a minimum of 3 points')\n\n self.minxpoint = args[0]\n self.maxxpoint = args[0]\n for point in args:\n if type(point) is not Point:\n raise TypeError('ConvexPolygon: Must be initialized with points')\n else:\n # append point to list of points\n self.points.append(point)\n # also find min and max x points so we can divide the\n # polygon into upper and lower chains.\n if point.x < self.minxpoint.x:\n self.minxpoint = point\n elif point.x == self.minxpoint.x and point.y < self.minxpoint.y:\n self.minxpoint = point\n if point.x > self.maxxpoint.x:\n self.maxxpoint = point\n elif point.x == self.maxxpoint.x and point.y > self.maxxpoint.y:\n self.maxxpoint = point\n minindex = self.points.index(self.minxpoint)\n\n orderedpoints = []\n\n for i in range(minindex, minindex + len(self.points)):\n j = i\n if j >= len(self.points):\n j = (j % len(self.points))\n orderedpoints.append(self.points[j])\n self.points = orderedpoints\n minindex = self.points.index(self.minxpoint)\n maxindex = self.points.index(self.maxxpoint)\n\n for j in range(len(orderedpoints)):\n if j > maxindex:\n self.upperchain.append(self.points[j])\n if j == maxindex or j == minindex:\n self.upperchain.append(self.points[j])\n self.lowerchain.append(self.points[j])\n if minindex < j < maxindex:\n self.lowerchain.append(self.points[j])\n print(self.points)\n print(self.upperchain)\n print(self.lowerchain)\n\n def inside(self, p):\n \"\"\"\n determines if point p is in this convexpolygon\n :param p:\n :return:\n \"\"\"\n if self.ispointbelowupperchain(p) and self.ispointabovelowerchain(p):\n return True\n else:\n return False\n\n def ispointbelowupperchain(self, p):\n \"\"\"\n This function checks if the point p in on or below the upper\n chain\n :return: Boolean\n \"\"\"\n foundpoint, foundpointnext = self.findclosestlessthanpoint(self.upperchain, p)\n if self.is_between(foundpoint, p, foundpointnext):\n return True\n isvertical, m, c = self.getlineequation(foundpoint, foundpointnext)\n #y = mx+c\n if p.y - (m*p.x) - c < 0:\n return True\n else:\n return False\n\n def ispointabovelowerchain(self, p):\n \"\"\"\n This function checks if the point p in on or below the upper\n chain\n :return: Boolean\n \"\"\"\n foundpoint, foundpointnext = self.findclosestlessthanpoint(self.lowerchain, p)\n if self.is_between(foundpoint, p, foundpointnext):\n return True\n\n isvertical, m, c = self.getlineequation(foundpoint, foundpointnext)\n #y = mx+c\n if p.y - (m*p.x) - c > 0:\n return True\n else:\n return False\n\n\n def findclosestlessthanpoint(self, chain, p):\n \"\"\"\n\n :param chain:\n :param p:\n :return:\n \"\"\"\n for i in range(len(chain)-1):\n currentpoint, nextpoint = chain[i], chain[i+1]\n if currentpoint.x <= p.x < nextpoint.x and not self.islinevertical(currentpoint,nextpoint):\n foundpoint = currentpoint\n foundpointnext = nextpoint\n break\n else:\n pos = len(chain)-2\n foundpoint = chain[pos]\n foundpointnext = chain[pos+1]\n while self.islinevertical(foundpoint,foundpointnext) and pos >=0:\n pos -= 1\n foundpoint = chain[pos]\n foundpointnext = chain[pos + 1]\n\n return foundpoint, foundpointnext\n\n def islinevertical(self,a,b):\n return b.x - a.x == 0\n\n def getlineequation(self, a, b):\n \"\"\"\n Calculates the equation of the line given 2 points\n y = mx + c\n :param a: one point on the line\n :param b: second point on the line\n :return: m , c\n \"\"\"\n\n # if slope is not defined\n if b.x-a.x == 0:\n m = 'irrelevant'\n c = b.x\n return True, m, c\n m = (b.y- a.y) / (b.x-a.x)\n c = a.y - (m * a.x)\n return False, m, c\n\n def distance(self,a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)\n\n def is_between(self, a, c, b):\n return self.distance(a, c) + self.distance(c, b) == self.distance(a, b)\n\nif __name__==\"__main__\":\n points = [Point(*p) for p in [(0, 0), (2, 0), (2, 2), (0, 2)]]\n polygon = ConvexPolygon(*points)\n\n # test ordering 2\n points = [Point(*p) for p in [(2, 2), (0, 2), (0, 0), (2, 0)]]\n polygon = ConvexPolygon(*points)\n # print(polygon.points[0][0])\n p = Point(3, 0)\n q = Point(0, 0)\n r = Point(1, 1)\n\n closest, closestnext = polygon.findclosestlessthanpoint(polygon.lowerchain, p)\n #print(closest, \" \", closestnext)\n print(polygon.inside(p))\n print(polygon.inside(q))\n print(polygon.inside(r))\n\n\n\n\n\n","sub_path":"classexercises/HWPointInside/ConvexPolygon.py","file_name":"ConvexPolygon.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"175135506","text":"# Name: Оформление входящих чисел\n# Author: Panin Stanislav\n# Created: 13.03.2021\n\n\nlist_word = ['в', '5', 'часов', '17', 'минут', 'температура', 'воздуха', 'была', '+5', 'градусов']\n\n\ndef search_int_try(ls):\n list_id = []\n for el in range(0, len(ls)):\n try:\n num = int(ls[el])\n if len(ls[el]) < 2:\n ls[el] = '0' + ls[el]\n if '+' in ls[el]:\n if len(str(num)) < 2:\n ls[el] = int(ls[el])\n ls[el] = '+0' + str(ls[el])\n list_id.append(el)\n except ValueError:\n ls[el] = ls[el] + ' '\n return list_id\n\n\ndef edited_list(lis, id_el):\n id_el.reverse()\n for el in id_el:\n lis.insert(el + 1, '\" ')\n lis.insert(el, '\"')\n lis = ''.join(str(i) for i in lis)\n return lis\n\n\ndef main():\n id_elm = search_int_try(list_word)\n print(edited_list(list_word, id_elm))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"numeral_design.py","file_name":"numeral_design.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"76883099","text":"import scanpy as sc\nimport pandas as pd\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport re\n\n#--------------------variable------------------------\nfmt='png'\n\nfd_h5ad='./out/a00_preprocess_00_pp'\nfd_out='./out/a01_plot-pp_04_batch-diff'\n\nl_pair=['Ctrl-MethFix', 'Ctrl-RNAlater', 'MethFix-RNAlater']\nl_sample=['Ctrl', 'MethFix', 'RNAlater']\ndic_cmap={'Ctrl': '#4287f5', 'MethFix': '#f5a142', 'RNAlater': '#4bf542'}\n\nnn=10\nnpc=30\nn_genes=4000\n\n#-----------------setup----------------------------\nPath(fd_out).mkdir(exist_ok=True, parents=True)\n\n#-------------function------------------------------\ndef plot_batch(adata, f_out, title, dic_cmap=dic_cmap):\n\tl_cell=[i for i in l_sample if i in adata.obs['sample'].unique()]\n\tcmap=[dic_cmap[i] for i in l_cell]\n\t#1. plot\n\tfig, ax=plt.subplots(figsize=(10,10))\n\tax=sc.pl.umap(adata, color=['sample'], show=False, palette=cmap, frameon=False, s=8, alpha=0.8)\n\t#2. adjust\n\tax.set_title(title, fontsize=16, pad=25, weight='medium')\n\tplt.legend(loc='center left', bbox_to_anchor=(1, 0.5), frameon=False, prop={'size': 13})\n\n\tplt.tight_layout()\n\tplt.savefig(f_out, dpi=300)\n\tplt.close()\n\treturn\n\t\n##################################################################\nfor pair in l_pair:\n#\t#1. setup\n#\tsample1=pair.split('-')[0]\n#\tsample2=pair.split('-')[1]\n#\t\n#\tad1=sc.read(f'{fd_h5ad}/clean_{sample1}.h5ad')\n#\tad2=sc.read(f'{fd_h5ad}/clean_{sample2}.h5ad')\n#\t\n#\t#2. get common gene\n#\tl_gene=[i for i in ad1.var.index if i in ad2.var.index]\n#\tl_gene=[i for i in ad1.var.index if i in ad2.var.index]\n#\tl_gene=[i for i in l_gene if not re.match('^mt-*', i)]\n#\tl_gene=[i for i in l_gene if not re.match('^[A-Z][A-Z]+', i)]\n#\tl_gene=[i for i in l_gene if not ('Rik' in i)]\n#\tl_gene=[i for i in l_gene if not ('-' in i)]\n#\tl_gene=[i for i in l_gene if len(i)>1]\n#\tl_gene=[i for i in l_gene if not re.match('^Gm\\d+', i)]\n#\tl_gene.sort()\n#\t\n#\t#3. concat adata\n#\tad1=ad1[:, l_gene].copy()\n#\tad2=ad2[:, l_gene].copy()\n#\tadata=ad1.concatenate(ad2)\n#\t\n#\t#4. normalization, HVG\n#\tsc.pp.normalize_total(adata, exclude_highly_expressed=True)\n#\tsc.pp.log1p(adata)\n#\tadata.raw=adata\n#\t\n#\tsc.pp.highly_variable_genes(adata, n_top_genes=n_genes)\n#\tadata=adata[:, adata.var['highly_variable']].copy()\n\n#\t#5. PCA\n#\tsc.pp.regress_out(adata, ['n_counts', 'perc_others'])\n#\tsc.tl.pca(adata, svd_solver='arpack')\n\n#\t#6. calculate neighbor\n#\tsc.pp.neighbors(adata, n_neighbors=nn, n_pcs=npc)\n#\t\n#\t#7. embed- umap\n#\tsc.tl.umap(adata, n_components=2, random_state=42)\n#\tadata.write(f'{fd_out}/{pair}.h5ad')\n\t\n\t#8. plot\n\tadata=sc.read(f'{fd_out}/{pair}.h5ad')\n\n\tf_out=f'{fd_out}/{pair}.{fmt}'\n\ttitle=pair\n\tplot_batch(adata, f_out, title)\n\t\n\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"a01_plot-pp_04_batch-diff.py","file_name":"a01_plot-pp_04_batch-diff.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"182439075","text":"'''\nTask\nGiven a string, S, of length N that is indexed from 0 to N-1, print its even-indexed and odd-indexed characters as 2 space-separated strings on a single line (see the Sample below for more detail).\n\nNote: 0 is considered to be an even index.\n\nInput Format\nThe first line contains an integer, T (the number of test cases).\nEach line i of the T subsequent lines contain a String, S.\n\nOutput Format\nFor each String Sj (where 0<=j<=T-1), print Sj's even-indexed characters, followed by a space, followed by 's odd-indexed characters.\n'''\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\nage = int(input())\n\nfor i in range(age):\n s = str(input())\n s_even = str()\n s_odd = str()\n for j in range(len(s)):\n if j%2 == 0:\n s_even += s[j]\n else:\n s_odd += s[j]\n print(s_even, s_odd)\n","sub_path":"30 Days of Code/day06_lets_review.py","file_name":"day06_lets_review.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"89910118","text":"import hmac\nimport hashlib\nimport secrets\nimport json\nimport datetime\nimport traceback\n\nimport threading\nfrom login import login\n\nK_MANUAL_JSON_PATH = 'manualRestream.json'\nK_CONFIG_JSON_PATH = 'config.json'\nk_LOG_PATH = 'mainLog.log'\n\n\n_g_existed_cookies = {}\n\ndef myLogger(logStr):\n resStr = str(datetime.datetime.now()) + \" [MyLOGGER] \" + str(logStr)\n try:\n print(resStr)\n except Exception as e:\n print(e)\n with open(k_LOG_PATH, 'a+', encoding='utf-8') as tmpFile:\n tmpFile.write(resStr + '\\n')\n\ndef verifySecert(verifyMsg, i_msg):\n i_msg = str.encode(i_msg) if isinstance(i_msg, str) else i_msg\n key = configJson().get('subSecert', '')\n key = str.encode(key)\n hexdig = hmac.new(key, msg=i_msg, digestmod=hashlib.sha1).hexdigest()\n\n print(verifyMsg, hexdig)\n return verifyMsg == hexdig\n\ndef configJson():\n with open(K_CONFIG_JSON_PATH, 'r', encoding='utf-8') as f:\n configDict = json.loads(f.read())\n\n # greate the secerts key\n if configDict.get('subSecert') == \"\":\n configDict['subSecert'] = secrets.token_hex(16)\n saveConfigJson(configDict)\n return configDict\n\n\ndef getSubInfoWithSubChannelId(channelId):\n ret = None\n for subscribe in configJson().get('subscribeList', []):\n if subscribe.get('youtubeChannelId') == channelId:\n ret = subscribe\n break\n if ret['login_type'] == 'account':\n if channelId in _g_existed_cookies:\n ret['bilibili_cookiesStr'] = _g_existed_cookies[channelId]\n else:\n ret['bilibili_cookiesStr'] = login(ret['username'], ret['password'])\n if ret['bilibili_cookiesStr'] != '':\n _g_existed_cookies[channelId] = ret['bilibili_cookiesStr']\n return ret\n\n\ndef saveConfigJson(config_dict):\n with open(K_CONFIG_JSON_PATH, 'w', encoding='utf-8') as wf:\n json.dump(config_dict, wf, indent=4, sort_keys=True)\n\n\ndef addManualSrc(srcNote, srcLink):\n tmp_dict = manualJson()\n src_dict = tmp_dict.get('src_dict', {})\n src_dict[srcNote] = srcLink\n tmp_dict['src_dict'] = src_dict\n saveManualJson(tmp_dict)\n\ndef addManualDes(desNote, desLink):\n tmp_dict = manualJson()\n des_dict = tmp_dict.get('des_dict', {})\n des_dict[desNote] = desLink\n tmp_dict['des_dict'] = des_dict\n saveManualJson(tmp_dict)\n\n\ndef manualJson():\n manualDict = {\"src_dict\":{}, \"des_dict\":{}}\n try:\n with open(K_MANUAL_JSON_PATH, 'r', encoding='utf-8') as f:\n manualDict = json.loads(f.read())\n except FileNotFoundError:\n saveManualJson(manualDict)\n return manualDict\n\ndef saveManualJson(manualDict):\n with open(K_MANUAL_JSON_PATH, 'w', encoding='utf-8') as wf:\n json.dump(manualDict, wf, indent=4, sort_keys=True)\n\n\ndef runFuncAsyncThread(target_func, args):\n try:\n t = threading.Thread(target=target_func, args=args)\n t.start()\n except Exception as e:\n myLogger(traceback.format_exc())\n myLogger(str(e))\n","sub_path":"utitls.py","file_name":"utitls.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"307821950","text":"from django.db.models import Sum, Q\nfrom rest_framework import serializers\n\nfrom .models import Project, ProjectApproval, Purchase\nimport authenticate as auth\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n sum_budget = serializers.SerializerMethodField()\n sum_purchase_price = serializers.SerializerMethodField()\n leader = serializers.SerializerMethodField()\n\n class Meta:\n model = Project\n fields = ('id', 'title', 'description', 'accounting_type', 'leader', 'closed', 'sum_budget', 'date_created', 'date_updated', 'sum_purchase_price')\n\n def get_sum_budget(self, obj):\n query_result = ProjectApproval.objects.filter(\n project_id=obj.id, approved=True).aggregate(\n Sum('budget_amount'))['budget_amount__sum']\n return query_result or 0\n\n def get_sum_purchase_price(self, obj):\n query_result = Purchase.objects.filter(Q(approver__isnull=True) | Q(approved=True), project_id=obj.id).aggregate(\n Sum('price'))['price__sum']\n return query_result or 0\n\n def get_leader(self, obj):\n return auth.serializer.UserSerializer(obj.leader).data\n\n\nclass ProjectDetailSerializer(ProjectSerializer):\n purchases = serializers.SerializerMethodField()\n approvals = serializers.SerializerMethodField()\n\n sum_req_budget = serializers.SerializerMethodField()\n\n class Meta:\n model = Project\n fields = ('id', 'title', 'accounting_type', 'leader', 'closed',\n 'sum_budget', 'sum_req_budget', 'sum_purchase_price',\n 'approvals', 'purchases', 'description', 'date_created', 'date_updated')\n\n def get_purchases(self, obj):\n try:\n return PurchaseSerializer(\n Purchase.objects.filter(project_id=obj.id), many=True).data\n except:\n return []\n\n def get_approvals(self, obj):\n try:\n return ProjectApprovalSerializer(\n ProjectApproval.objects.filter(project_id=obj.id),\n many=True).data\n except:\n return []\n\n def get_sum_req_budget(self, obj):\n query_result = ProjectApproval.objects.filter(\n project_id=obj.id, approver__isnull=True).aggregate(\n Sum('budget_amount'))['budget_amount__sum']\n return query_result or 0\n\n\nclass CreateProjectSerializer(serializers.ModelSerializer):\n class Meta:\n model = Project\n fields = ('id', 'title', 'description', 'accounting_type', 'leader')\n\nclass ProjectApprovalSerializer(serializers.ModelSerializer):\n project = ProjectSerializer()\n\n class Meta:\n model = ProjectApproval\n fields = ('id', 'project', 'comment', 'approver', 'budget_amount', 'approved', 'date_created')\n\n\nclass CreateProjectApprovalSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProjectApproval\n fields = ('id', 'project', 'budget_amount')\n\n\nclass PurchaseSerializer(serializers.ModelSerializer):\n project = ProjectSerializer()\n\n class Meta:\n model = Purchase\n fields = ('id', 'title', 'project', 'comment', 'price', 'approver', 'approved', 'date_created')\n\n\nclass CreatePurchaseSerializer(serializers.ModelSerializer):\n class Meta:\n model = Purchase\n fields = ('id', 'title', 'project', 'price')\n\n","sub_path":"api/accounting/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"544828958","text":"def read_sites(path):\n container = []\n with open(path) as file:\n for line in file:\n container.append(line.strip().split('\\t')[-1])\n return(container)\n\n\ndef write_sites(sites, out_path):\n with open(out_path, 'w') as file:\n for line in sites:\n file.write(line + '\\n')\n file.close()\n\n\ndef extract_sites(peaks_path, out_path):\n sites = read_sites(peaks_path)\n write_sites(sites, out_path)\n return(0)","sub_path":"tools/extract_sites.py","file_name":"extract_sites.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"459282346","text":"import streamlit as st\r\nimport numpy as np\r\nimport pandas as pd\r\nimport plotly.express as ex\r\nfrom plotly.subplots import make_subplots\r\nimport plotly.graph_objects as go\r\nimport datetime\r\n\r\nst.set_page_config(layout=\"wide\") \r\n\r\ncol_names = ['Pid', 'Sex', 'Race', 'Ref_Date', 'Paper_Date', 'Referral_Date', 'Stat', 'Category', 'Offense',\r\n 'General_Category', 'OffenseDescription', 'Referral_Type']\r\n\r\nrefs = pd.read_csv('https://raw.githubusercontent.com/jlo-dcjd/ref-off-corr-st-app/main/Referrals%202010-2021%2009-21-2021%2017.06.csv', \r\n names=col_names, skiprows=1)\r\n\r\nrefs['Referral_Date'] = pd.to_datetime(refs['Referral_Date'])\r\n\r\ngeneral_2010 = refs.groupby(pd.Grouper(key='Referral_Date', freq='M'))['General_Category'].value_counts().unstack().fillna(0)\r\n\r\ngeneral_2015 = general_2010.loc[datetime.date(year=2015,month=10,day=1): ].copy()\r\ngeneral_2016 = general_2010.loc[datetime.date(year=2016,month=1,day=1): ].copy()\r\ngeneral_2015.drop(['Contempt'], axis=1, inplace=True) # remove other category\r\ngeneral_2016.drop(['Contempt'], axis=1, inplace=True) # remove other category\r\n\r\nst.title('Referral Offense Correlation')\r\n\r\n\r\nst.subheader('Select Referral Offenses')\r\noption = st.selectbox(\r\n 'Offense 1', general_2016.columns)\r\n \r\noption2 = st.selectbox(\r\n 'Offense 2', general_2016.columns)\r\n\r\n\r\n\r\n\r\n# ----------- scatter plots CY ---------------\r\nfig = make_subplots(rows=1, cols=6, shared_yaxes=True, subplot_titles=(\"2016\", \"2017\", \"2018\", '2019', '2020', '2021'))\r\n\r\nfig.add_trace(\r\n go.Scatter(\r\n x=general_2016[option][:12],\r\n y=general_2016[option2][:12],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=1\r\n)\r\nfig.add_trace(\r\n go.Scatter(\r\n x=general_2016[option][12:24],\r\n y=general_2016[option2][12:24],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=2\r\n)\r\nfig.add_trace(\r\n go.Scatter(\r\n x=general_2016[option][24:36],\r\n y=general_2016[option2][24:36],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=3\r\n)\r\nfig.add_trace(\r\n go.Scatter(\r\n x=general_2016[option][36:48],\r\n y=general_2016[option2][36:48],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=4\r\n)\r\nfig.add_trace(\r\n go.Scatter(\r\n x=general_2016[option][48:60],\r\n y=general_2016[option2][48:60],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=5\r\n)\r\nfig.add_trace(\r\n go.Scatter(\r\n x=general_2016[option][60:],\r\n y=general_2016[option2][60:],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=6\r\n)\r\n\r\nfig.update_xaxes(title_text=option, row=1, col=1)\r\nfig.update_yaxes(title_text=option2, row=1, col=1)\r\n\r\nfig.update_xaxes(tick0=0, dtick=10, row=1, col=1)\r\nfig.update_xaxes(tick0=0, dtick=10, row=1, col=3)\r\n\r\n\r\nfig.update_layout(width=1200, height=350, title_text=\"{} vs. {}\".format(option, option2))\r\n# -------------------------\r\n# ----------- scatter plots FY ---------------\r\nfig3 = make_subplots(rows=1, cols=6, shared_yaxes=False, subplot_titles=(\"2016\", \"2017\", \"2018\", '2019', '2020', '2021'))\r\n\r\nfig3.add_trace(\r\n go.Scatter(\r\n x=general_2015[option][:12],\r\n y=general_2015[option2][:12],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=1\r\n)\r\nfig3.add_trace(\r\n go.Scatter(\r\n x=general_2015[option][12:24],\r\n y=general_2015[option2][12:24],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=2\r\n)\r\nfig3.add_trace(\r\n go.Scatter(\r\n x=general_2015[option][24:36],\r\n y=general_2015[option2][24:36],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=3\r\n)\r\nfig3.add_trace(\r\n go.Scatter(\r\n x=general_2015[option][36:48],\r\n y=general_2015[option2][36:48],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=4\r\n)\r\nfig3.add_trace(\r\n go.Scatter(\r\n x=general_2015[option][48:60],\r\n y=general_2015[option2][48:60],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=5\r\n)\r\nfig3.add_trace(\r\n go.Scatter(\r\n x=general_2015[option][60:],\r\n y=general_2015[option2][60:],\r\n mode=\"markers\",\r\n ),\r\n row=1, col=6\r\n)\r\n\r\nfig3.update_xaxes(title_text=option, row=1, col=1)\r\nfig.update_yaxes(title_text=option2, row=1, col=1)\r\n\r\nfig3.update_xaxes(tick0=0, dtick=10, row=1, col=1)\r\nfig3.update_xaxes(tick0=0, dtick=10, row=1, col=3)\r\n\r\n\r\nfig3.update_layout(width=1200, height=350, title_text=\"{} vs. {}\".format(option, option2))\r\n# -------------------------\r\n\r\n# ------ Sub plots (line chart + r correlation) CY\r\nfig1 = make_subplots(rows=2, cols=1, shared_yaxes=False)\r\ny_list = ['2016', '2017', '2018', '2019', '2020', '2021']\r\n\r\nfig1.add_trace(\r\ngo.Scatter(x=general_2016.index, y=general_2016[option], name=option),\r\nrow=1, col=1\r\n)\r\nfig1.add_trace(\r\n go.Scatter(x=general_2016.index, y=general_2016[option2], name=option2),\r\n row=1, col=1\r\n)\r\n\r\nc16 = round(general_2016[option][:12].corr(general_2016[option2][:12], method='pearson'), 2)\r\nc17 = round(general_2016[option][12:24].corr(general_2016[option2][12:24], method='pearson'), 2)\r\nc18 = round(general_2016[option][24:36].corr(general_2016[option2][24:36], method='pearson'), 2)\r\nc19 = round(general_2016[option][36:48].corr(general_2016[option2][36:48], method='pearson'), 2)\r\nc20 = round(general_2016[option][48:60].corr(general_2016[option2][48:60], method='pearson'), 2)\r\nc21 = round(general_2016[option][60:].corr(general_2016[option2][60:], method='pearson'), 2)\r\nc_list = [c16, c17, c18, c19, c20, c21]\r\n\r\n\r\nfig1.add_trace(\r\n go.Scatter(x=y_list, y=c_list, name='Pearson Correlation'),\r\n row=2, col=1\r\n)\r\n\r\nfig1.update_yaxes(tick0=0, dtick=.3, row=2, col=1)\r\nfig1.update_layout(width=1200, title_text=\"{} vs. {}\".format(option, option2))\r\n# ----------------------------\r\n\r\n# ------ Sub plots (line chart + r correlation) FY\r\nfig2 = make_subplots(rows=2, cols=1, shared_yaxes=False)\r\ny_list = ['2016', '2017', '2018', '2019', '2020', '2021']\r\n\r\nfig2.add_trace(\r\ngo.Scatter(x=general_2015.index, y=general_2015[option], name=option),\r\nrow=1, col=1\r\n)\r\nfig2.add_trace(\r\n go.Scatter(x=general_2015.index, y=general_2015[option2], name=option2),\r\n row=1, col=1\r\n)\r\n\r\nc16 = round(general_2015[option][:12].corr(general_2015[option2][:12], method='pearson'), 2)\r\nc17 = round(general_2015[option][12:24].corr(general_2015[option2][12:24], method='pearson'), 2)\r\nc18 = round(general_2015[option][24:36].corr(general_2015[option2][24:36], method='pearson'), 2)\r\nc19 = round(general_2015[option][36:48].corr(general_2015[option2][36:48], method='pearson'), 2)\r\nc20 = round(general_2015[option][48:60].corr(general_2015[option2][48:60], method='pearson'), 2)\r\nc21 = round(general_2015[option][60:].corr(general_2015[option2][60:], method='pearson'), 2)\r\nc_list = [c16, c17, c18, c19, c20, c21]\r\n\r\n\r\nfig2.add_trace(\r\n go.Scatter(x=y_list, y=c_list, name='Pearson Correlation'),\r\n row=2, col=1\r\n)\r\n\r\nfig2.update_yaxes(tick0=0, dtick=.3, row=2, col=1)\r\nfig2.update_layout(width=1200, title_text=\"{} vs. {}\".format(option, option2))\r\n# ----------------------------\r\n\r\n# Radio Button\r\nstatus = st.radio(\"Select Type: \", ('Calendar Year', 'Fiscal Year'))\r\nif (status == 'Calendar Year'):\r\n st.success(\"Calendar Year\")\r\n st.dataframe(general_2016) \r\n st.plotly_chart(fig1)\r\n st.plotly_chart(fig)\r\nelse:\r\n st.success(\"Fiscal Year\")\r\n st.dataframe(general_2015) \r\n st.plotly_chart(fig2)\r\n st.plotly_chart(fig3)\r\n","sub_path":"ref-off-corr-app.py","file_name":"ref-off-corr-app.py","file_ext":"py","file_size_in_byte":7319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"309234963","text":"# MUSA MAHMOOD - Copyright 2018\n# Python 3.6.3\n# TF 1.8.0\n\n# Imports:\nimport os\nimport datetime\nimport numpy as np\nimport tf_shared_k as tfs\n\nfrom keras.layers import Dropout\nfrom keras.optimizers import Adam\nfrom scipy.io import savemat, loadmat\nfrom keras.models import Model, load_model\nfrom keras.utils.generic_utils import Progbar\nfrom keras.layers.convolutional import UpSampling1D\nfrom sklearn.model_selection import train_test_split\nfrom keras.layers import Conv1D, LeakyReLU, Input, Concatenate\nfrom keras_contrib.layers.normalization import InstanceNormalization\n\n# Sources: (Ctrl-LMB in Pycharm)\n# Instance Norm: https://arxiv.org/abs/1701.02096\n# Cycle GAN: https://arxiv.org/abs/1703.10593\n# GAN (orig paper): http://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf\n\n# Setup:\nTRAIN = True # TRAIN ANYWAY FOR # epochs, or just evaluate\nbatch_size = 128\nepochs = 100\nnum_channels = 1\nnum_classes = 1\nlearn_rate = 0.0002\nlambda_cycle = 10.0 # Cycle-consistency loss\nlambda_id = 0.1 * lambda_cycle # Identity loss\nlabel = 'ptb_ecg_cycle_gan_leadv2_lr' + str(learn_rate) + '_r0'\nmodel_dir = \"model_exports/\" + label + '/'\noutput_folder = 'outputs/' + label + '/'\ndescription = label\nseq_length = 2000\ninput_length = seq_length\nx_shape = [seq_length, 1]\ny_shape = [seq_length, num_classes]\n\nx_lead_v2 = tfs.load_mat('data/lead_v2_all/all_x.mat', key='X', shape=[seq_length, 1])\nx_lead_ii = tfs.load_mat('data/lead_ii_all/all_y.mat', key='Y', shape=[seq_length, 1])\nx_train, x_test, y_train, y_test = train_test_split(x_lead_v2, x_lead_ii, train_size=0.75, random_state=1)\n\n\ndef build_generator():\n def conv_layer(layer_input, filters, kernel_size=5, strides=2):\n d = Conv1D(filters, kernel_size, strides=strides, padding='same')(layer_input)\n d = LeakyReLU(alpha=0.20)(d)\n d = InstanceNormalization()(d)\n return d\n\n def deconv_layer(layer_input, skip_input, filters, f_size=4, dropout_rate=0):\n u = UpSampling1D(size=2)(layer_input)\n u = Conv1D(filters, f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = InstanceNormalization()(u)\n u = Concatenate()([u, skip_input])\n return u\n\n # Input samples\n input_samples = Input(shape=(input_length, 1))\n\n # Downsampling:\n d1 = conv_layer(input_samples, 32, 8, 2)\n d2 = conv_layer(d1, 64, 8, 2)\n d3 = conv_layer(d2, 128, 8, 2)\n d4 = conv_layer(d3, 256, 8, 2)\n\n # Now Upsample:\n u1 = deconv_layer(d4, d3, 128, f_size=8)\n u2 = deconv_layer(u1, d2, 64, f_size=8)\n u3 = deconv_layer(u2, d1, 32, f_size=8)\n u4 = UpSampling1D(size=2)(u3)\n output_samples = Conv1D(1, kernel_size=8, strides=1, padding='same', activation='tanh')(u4) #\n return Model(input_samples, output_samples)\n\n\ndef build_discriminator():\n def discriminator_layer(layer_input, filters, f_size=5, strides=2, normalization=True):\n d = Conv1D(filters, kernel_size=f_size, strides=strides, padding='same')(layer_input)\n d = LeakyReLU(alpha=0.2)(d)\n if normalization:\n d = InstanceNormalization()(d)\n return d\n\n input_samples = Input(shape=(input_length, 1))\n d1 = discriminator_layer(input_samples, 64, 8, 2, normalization=False)\n d2 = discriminator_layer(d1, 128, 8, 2)\n d3 = discriminator_layer(d2, 256, 8, 2)\n d4 = discriminator_layer(d3, 512, 8, 2)\n validity = Conv1D(1, kernel_size=8, strides=1, padding='same')(d4)\n return Model(input_samples, validity)\n\n\n# Restore Model if Present:\nkeras_training_file = tfs.prep_dir(model_dir) + description + 'training.mat'\nkeras_training_epochs_key = 'training_epochs'\nkeras_training_batch_size_key = 'training_batch_size'\nkeras_combined_model_location = tfs.prep_dir(model_dir) + description + 'combined_model.h5'\nkeras_d_A_location = tfs.prep_dir(model_dir) + description + 'd_A.h5'\nkeras_d_B_location = tfs.prep_dir(model_dir) + description + 'd_B.h5'\nkeras_g_BA_location = tfs.prep_dir(model_dir) + description + 'g_BA.h5'\nkeras_g_AB_location = tfs.prep_dir(model_dir) + description + 'g_AB.h5'\n\nkeras_g_AB_opt_location = tfs.prep_dir(model_dir) + '/opt_ptb_cycle_gan'\n# Load if it exists:\nif os.path.isfile(keras_d_A_location) and os.path.isfile(keras_d_B_location) and os.path.isfile(\n keras_g_AB_location) and os.path.isfile(keras_g_BA_location) and os.path.isfile(keras_combined_model_location):\n print('Loading existing models from directory: ', model_dir, description)\n # Load discriminators\n d_A = load_model(keras_d_A_location)\n d_B = load_model(keras_d_B_location)\n print('Discriminator: ')\n print(d_A.summary())\n optimizer = Adam(learn_rate, beta_1=0.50)\n d_A.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])\n d_B.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])\n # Load Generators:\n g_AB = load_model(keras_g_AB_location)\n g_BA = load_model(keras_g_BA_location)\n print('Generator: ')\n print(g_AB.summary())\n input_A = Input(shape=(input_length, 1))\n input_B = Input(shape=(input_length, 1))\n # Translate images to other domain\n fake_B = g_AB(input_A)\n fake_A = g_BA(input_B)\n # Translate Images back to original domain\n reconstr_A = g_BA(input_B)\n reconstr_B = g_AB(input_A)\n # Identity mapping of images\n input_A_id = g_BA(input_A)\n input_B_id = g_AB(input_B)\n # For the combined model we only train the generators:\n d_A.trainable = False\n d_B.trainable = False\n # Discriminators determines validity of translated data\n valid_A = d_A(fake_A)\n valid_B = d_B(fake_B)\n\n combined_model = load_model(keras_combined_model_location)\n print(combined_model.summary())\n combined_model.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],\n loss_weights=[1, 1, lambda_cycle, lambda_cycle, lambda_id, lambda_id], optimizer=optimizer)\nelse:\n # Set train = 1\n print('Existing model of description [', description, '] not found!')\n TRAIN = True\n print('Training automatically enabled! - Running ', epochs, ' epochs!')\n # Manually create models:\n optimizer = Adam(learn_rate, beta_1=0.50)\n # Build and compile the discriminators\n d_A = build_discriminator()\n d_B = build_discriminator()\n print('Discriminator: ')\n print(d_A.summary())\n\n d_A.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])\n d_B.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])\n\n # Build and compile the generators\n g_AB = build_generator()\n g_BA = build_generator()\n print('Generator: ')\n print(g_AB.summary())\n\n input_A = Input(shape=(input_length, 1))\n input_B = Input(shape=(input_length, 1))\n\n # Translate images to other domain\n fake_B = g_AB(input_A)\n fake_A = g_BA(input_B)\n\n # Translate Images back to original domain\n reconstr_A = g_BA(input_B)\n reconstr_B = g_AB(input_A)\n\n # Identity mapping of images\n input_A_id = g_BA(input_A)\n input_B_id = g_AB(input_B)\n\n # For the combined model we only train the generators:\n d_A.trainable = False\n d_B.trainable = False\n # Discriminators determines validity of translated data\n valid_A = d_A(fake_A)\n valid_B = d_B(fake_B)\n\n combined_model = Model(inputs=[input_A, input_B],\n outputs=[valid_A, valid_B, reconstr_A, reconstr_B, input_A_id, input_B_id])\n print(combined_model.summary())\n combined_model.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],\n loss_weights=[1, 1, lambda_cycle, lambda_cycle, lambda_id, lambda_id], optimizer=optimizer)\n\n# If mat file doesn't exist, make one and set to zero:\nif os.path.isfile(keras_training_file):\n prev_training_epochs = loadmat(keras_training_file).get(keras_training_epochs_key)\n prev_batch_size = loadmat(keras_training_file).get(keras_training_batch_size_key)\nelse:\n prev_training_epochs = 0\n prev_batch_size = 0\n savemat(keras_training_file, mdict={keras_training_epochs_key: 0, keras_training_batch_size_key: batch_size})\n\n# Train:\nif TRAIN:\n print('This model has been trained to ', prev_training_epochs, 'epochs, with batch size:', prev_batch_size)\n print('Starting Training! - batch_size:', batch_size, ' epochs: ', epochs)\n # Adversarial loss ground truths\n valid = np.ones((batch_size,) + (125, 1))\n fake = np.zeros((batch_size,) + (125, 1))\n # Training Routine:\n start_time = datetime.datetime.now()\n last_epoch_update = 0\n for epoch in range(epochs + 1):\n print('Epoch {} of {}'.format(epoch, epochs))\n number_batches = int(x_train.shape[0] / batch_size) - 1\n progress_bar = Progbar(target=number_batches)\n index = 0\n while index < number_batches:\n progress_bar.update(index)\n index += 1\n inputs_A = x_train[index * batch_size:(index + 1) * batch_size]\n inputs_B = y_train[index * batch_size:(index + 1) * batch_size]\n\n # Convert inputs using generator.\n fake_B = g_AB.predict(inputs_A)\n fake_A = g_BA.predict(inputs_B)\n\n # # # Train Discriminators:\n dA_loss_real = d_A.train_on_batch(inputs_A, valid)\n dA_loss_fake = d_A.train_on_batch(fake_A, fake)\n dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake)\n\n dB_loss_real = d_B.train_on_batch(inputs_B, valid)\n dB_loss_fake = d_B.train_on_batch(fake_B, fake)\n dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake)\n\n # Total Disc Loss:\n d_loss = 0.5 * np.add(dA_loss, dB_loss)\n\n # # # Train Generators:\n g_loss = combined_model.train_on_batch([inputs_A, inputs_B],\n [valid, valid, inputs_A, inputs_B, inputs_A, inputs_B])\n elapsed_time = datetime.datetime.now() - start_time\n\n # Plot the progress\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] \"\n \"[G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s \"\n % (epoch, epochs,\n index, number_batches,\n d_loss[0], 100 * d_loss[1],\n g_loss[0],\n float(np.mean(g_loss[1:3])),\n float(np.mean(g_loss[3:5])),\n float(np.mean(g_loss[5:6])),\n elapsed_time))\n if epoch % 10 == 0 and epoch != 0:\n # Translate inputs to other domain:\n fake_B = g_AB.predict(x_train)\n fake_A = g_BA.predict(y_train)\n # translate back to original domain:\n reconstr_A = g_BA.predict(fake_B)\n reconstr_B = g_AB.predict(fake_A)\n\n # Save Model & Update Stats\n combined_model.save(keras_combined_model_location)\n g_AB.save(keras_g_AB_location)\n g_BA.save(keras_g_BA_location)\n d_A.save(keras_d_A_location)\n d_B.save(keras_d_B_location)\n # Update Number of Epochs saved\n prev_training_epochs = loadmat(keras_training_file).get(keras_training_epochs_key)\n savemat(keras_training_file, mdict={keras_training_epochs_key: prev_training_epochs + 10,\n keras_training_batch_size_key: batch_size})\n # gen_imgs = generator.predict(x_train)\n mdict = {'x_val': x_train, 'y_true': y_train, 'fake_A': fake_A, 'fake_B': fake_B, 'reconstr_A': reconstr_A,\n 'reconstr_B': reconstr_B}\n total_epochs = prev_training_epochs + 10\n savemat(tfs.prep_dir(output_folder) + description + \"_%d.mat\" % total_epochs, mdict=mdict)\n last_epoch_update = epoch\n\n # Save Models After Training Completed\n combined_model.save(keras_combined_model_location)\n g_AB.save(keras_g_AB_location)\n g_BA.save(keras_g_BA_location)\n d_A.save(keras_d_A_location)\n d_B.save(keras_d_B_location)\n update = epochs - last_epoch_update\n prev_training_epochs = loadmat(keras_training_file).get(keras_training_epochs_key)\n savemat(keras_training_file, mdict={keras_training_epochs_key: prev_training_epochs + update,\n keras_training_batch_size_key: batch_size})\n\n# print('Evaluating & Saving Test Set:')\n# Generate Fake Images:\nfake_B = g_AB.predict(x_test)\nfake_A = g_BA.predict(y_test)\n\n# Translate back to original domain:\nreconstr_A = g_BA.predict(fake_B)\nreconstr_B = g_AB.predict(fake_A)\ntotal_epochs = loadmat(keras_training_file).get(keras_training_epochs_key)\nmdict = {'x_val': x_test, 'y_true': y_test, 'fake_A': fake_A, 'fake_B': fake_B, 'reconstr_A': reconstr_A,\n 'reconstr_B': reconstr_B}\nsavemat(tfs.prep_dir(output_folder) + 'test_' + description + '_' + str(total_epochs) + 'epochs.mat', mdict=mdict)\nprint('Test Data Saved: ', output_folder + 'test_' + description + '_' + str(total_epochs) + 'epochs.mat')\n\n# Export Generator g_AB:\nmodel = tfs.export_model_keras(keras_g_AB_location, export_dir=tfs.prep_dir(keras_g_AB_opt_location),\n model_name=description + 'g_AB', sequential=False)\n","sub_path":"_adversarial/resample_cyclegan_ptb.py","file_name":"resample_cyclegan_ptb.py","file_ext":"py","file_size_in_byte":13200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"12176683","text":"#!/usr/bin/env python3\nimport sys\nimport operator\nimport re\nimport numpy as np\n\n\ndef read_commands():\n\twith open('input_data') as input_file:\n\t\treturn input_file.read().split('\\n')\n\n\ndef get_letters(line):\n\tinput_letters = np.array(list(line))\n\tcount_letters = {}\n\tfor letter in [ chr(v) for v in range(97, 123) ]:\n\t\tcount = (input_letters == letter).sum()\n\t\tif count > 0:\n\t\t\tif count not in count_letters:\n\t\t\t\tcount_letters[count] = letter\n\t\t\telse:\n\t\t\t\tcount_letters[count] += letter\n\n\treturn ''.join([ ''.join(np.sort(list(v[1]))) for v in sorted(count_letters.items(), key=operator.itemgetter(0))[::-1] ])[0:5]\n\n\ndef is_valid(line):\n\tre_result = re.match(r'([a-z\\-]*)-([0-9]{3})\\[([a-z]{5})\\]', line)\n\tcode = re_result.group(1)\n\tindex = re_result.group(2)\n\thash_code = re_result.group(3)\n\tif get_letters(code) == hash_code:\n\t\treturn int(index)\n\telse:\n\t\treturn 0\n\ndef main(argv):\n\tsector_sum = 0\n\n\tfor line in read_commands():\n\t\tsector_sum += is_valid(line)\n\tprint(\"Sum of valid rooms: {}\".format(sector_sum))\n\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n","sub_path":"day04/solve_part_1.py","file_name":"solve_part_1.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"555250066","text":"import os\nfrom flask import jsonify\nfrom contextlib import suppress\n\nimport requests\n\nadd_header_endpoint = os.getenv(\n 'HEADER_ENDPOINT',\n 'https://us-central1-panoptes-survey.cloudfunctions.net/header-to-db'\n)\n\n\ndef image_received(request):\n \"\"\"Look for uploaded files and process according to the file type.\n\n Triggered when file is uploaded to bucket.\n\n FITS: Set header variables and then forward to endpoint for adding headers\n to the metadatabase. The header is looked up from the file id, including the\n storage bucket file generation id, which are stored into the headers.\n\n CR2: Trigger creation of timelapse and jpg images.\n\n Example file id:\n\n panoptes-survey/PAN001/M42/14d3bd/20181011T134202/20181011T134333.fits.fz/1539272833023747\n\n Args:\n data (dict): The Cloud Functions event payload.\n context (google.cloud.functions.Context): Metadata of triggering event.\n Returns:\n None; the output is written to Stackdriver Logging\n \"\"\"\n request_json = request.get_json()\n\n bucket_path = request_json.get('bucket_path')\n object_id = request_json.get('object_id')\n\n if bucket_path is None:\n return f'No file requested'\n\n _, file_ext = os.path.splitext(bucket_path)\n\n process_lookup = {\n '.fits': process_fits,\n '.fz': process_fits,\n '.cr2': process_cr2,\n }\n\n print(f\"Processing {bucket_path}\")\n\n with suppress(KeyError):\n process_lookup[file_ext](bucket_path, object_id)\n\n return jsonify(success=True, msg=f\"Image processed: {bucket_path}\")\n\n\ndef process_fits(bucket_path, object_id):\n \"\"\" Forward the headers to the -add-header-to-db Cloud Function.\n\n Args:\n bucket_path (str): The relative (to the bucket) path of the file in the storage bucket.\n \"\"\"\n # Get some of the fields from the path.\n unit_id, field, camera_id, seq_time, filename = bucket_path.split('/')\n\n # Get the image time from the filename\n image_time = filename.split('.')[0]\n\n # Build the sequence and image ids\n sequence_id = f'{unit_id}_{camera_id}_{seq_time}'\n image_id = f'{unit_id}_{camera_id}_{image_time}'\n\n headers = {\n 'PANID': unit_id,\n 'FIELD': field,\n 'INSTRUME': camera_id,\n 'SEQTIME': seq_time,\n 'IMGTIME': image_time,\n 'SEQID': sequence_id,\n 'IMAGEID': image_id,\n 'FILENAME': bucket_path,\n 'FILEID': object_id,\n 'PSTATE': 'fits_received'\n }\n\n # Send to add-header-to-db\n print(f\"Forwarding to add-header-to-db: {headers!r}\")\n requests.post(add_header_endpoint, json={\n 'headers': headers,\n 'bucket_path': bucket_path,\n 'object_id': object_id,\n })\n\n\ndef process_cr2(bucket_path, object_id):\n print('TODO: ADD CR2 PROCESSING')\n","sub_path":"cf-image-received/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"243523569","text":"# Plot the ``\"viridis\"`` colormap with the below and above colors.\n#\nimport pyvista as pv\nlut = pv.LookupTable('viridis', n_values=8)\nlut.below_range_color = 'black'\nlut.above_range_color = 'grey'\nlut.nan_color = 'r'\nlut.plot()\n#\n# Plot only ``\"blues\"`` colormap.\n#\nimport pyvista as pv\nlut = pv.LookupTable('blues', n_values=1024)\nlut.plot()\n","sub_path":"version/dev/api/plotting/_autosummary/pyvista-LookupTable-plot-1.py","file_name":"pyvista-LookupTable-plot-1.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"605966761","text":"from PyQt5.QtWidgets import QWidget, QApplication\nfrom PyQt5.QtGui import QPainter, QBrush, QPixmap\nfrom PyQt5.QtCore import Qt\nimport sys\n\nclass Example(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n\n def initUI(self):\n\n self.setGeometry(300, 300, 355, 280)\n self.setWindowTitle('Brushes')\n self.show()\n\n\n def paintEvent(self, e):\n\n qp = QPainter()\n qp.begin(self)\n qp.drawPixmap(300, 300, QPixmap('icon.png'))\n # self.drawBrushes(qp)\n qp.end()\n\n\n def drawBrushes(self, qp):\n pic = QPixmap().load('icon.png')\n brush = QBrush(Qt.SolidPattern)\n qp.setBrush(brush)\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n","sub_path":"third-party/pyqt/paint.py","file_name":"paint.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"171194997","text":"\"\"\"\nMichael S. Emanuel\nWed May 2 12:09:03 2018\n\nPandigital Fibonacci ends\nProblem 104\n\nThe Fibonacci sequence is defined by the recurrence relation:\n\nFn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.\nIt turns out that F541, which contains 113 digits, is the first Fibonacci number for which the last\nnine digits are 1-9 pandigital (contain all the digits 1 to 9, but not necessarily in order).\nAnd F2749, which contains 575 digits, is the first Fibonacci number for which the first nine digits\nare 1-9 pandigital.\n\nGiven that Fk is the first Fibonacci number for which the first nine digits AND the last nine\ndigits are 1-9 pandigital, find k.\n\"\"\"\n\nfrom math import log\nfrom Euler.Fibonacci import FibonacciGenerator\nfrom Euler.Pandigital import isPandigital9\n\n\ndef main() -> int:\n # Numerical constants\n billion: int = 10**9\n logRatio: float = log(2) / log(10)\n # k will track the kth Fibonacci number using the convention f1 = 1, f2 = 1, ...\n # so the first time k appears, k = 2\n k: int = 1\n # Keep testing successive Fibonacci numbers until one is found with these requirements\n for fn in FibonacciGenerator():\n # Need to increment k before we have the first continue statement!\n k += 1\n # Get the last 9 digits; this is fast b/c it's one modular division\n last9: int = fn % billion\n # Most of the time the last 9 digits aren't pandigital, so we can continue to save time\n if not isPandigital9(last9):\n continue\n else:\n # Status update\n print(f'Fibonacci number with k = {k} is pandigital at the end.')\n # If the last 9 digits are pandigital, convert n to a string and test the first 9 digits\n # approximate log10 of n\n log10_est: int = int(round(fn.bit_length() * logRatio))\n # Power of 10 used to divide fn to quickly get the first 9 digits\n power: int = log10_est - 15\n # The first 9 digits of fn\n first9 = int(str((fn // (10**power)))[:9])\n # Sanity check: test that above method works...\n # first9_slow: int = int(str(fn)[:9])\n # assert first9_fast == first9\n if isPandigital9(first9):\n print(f'The answer is k = {k}.')\n digitCount: int = len(str(fn))\n print(f'Fibonacci number F{k} is too long to print with {digitCount} digits.')\n break\n return k\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Prob104_PandigitalFibonacciEnds.py","file_name":"Prob104_PandigitalFibonacciEnds.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"542507584","text":"import vk\nimport time\nimport eventlet\nimport requests\nimport logging\nimport telebot\nfrom time import sleep\n\n#получаю текстовую информацию о 10 записях с ключевым словом \"английский\"\nsession = vk.Session(access_token='59b7d17c59b7d17c59b7d17c2959eb6e2e559b759b7d17c00f32b264fe21e4e99241029')\nvk_api = vk.API(session)\n\nURL_VK = vk_api.wall.search(domain='guidesspb', count=10, query='английский')\n\n#отсюда начина�� вставлять код из блога groosha\nFILENAME_VK = 'last_known_id.txt'\nBASE_POST_URL = 'https://vk.com/wall-8854_'\n\nBOT_TOKEN = '403467541:AAET4g7JSwMk-x8urPnzy_diTOTPyWdlm2s'\nCHANNEL_NAME = '@megaguideparsing'\n\nbot = telebot.TeleBot(BOT_TOKEN)\n\n#что-то там для контроля времени и таймаутов\ndef get_data():\n timeout = eventlet.Timeout(10)\n try:\n feed = requests.get(URL_VK)\n return feed.json()\n except eventlet.timeout.Timeout:\n logging.warning('Got Timeout while retrieving VK JSON data. Cancelling...')\n return None\n finally:\n timeout.cancel()\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"500291642","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n# from google.colab import drive\n# drive.mount('/content/drive')\n\n\n# In[1]:\n\n\nimport os\n\n\n# In[2]:\n\n\nHOME_PATH = 'server'\nDATA_PATH = './'\n\nWEIGHT_PATH = os.path.join(HOME_PATH, 'weight')\nif not os.path.isdir(WEIGHT_PATH):\n os.makedirs(WEIGHT_PATH)\n\n\n# ## Preprocess data\n\n# ## Load data\n\n# In[ ]:\n\n\nwith open(os.path.join(DATA_PATH, 'wordlist-full.txt')) as f:\n data = f.read().split('\\n')\n\nprint(len(data))\n\n\n# ## Tokenize\n\n# In[4]:\n\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\n\n# In[5]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[6]:\n\n\ntokenizer = Tokenizer(char_level=True, filters='', split='', oov_token='*')\n\n\n# In[7]:\n\n\npad_token = 0\nunknow_token = 1\n\n\n# In[8]:\n\n\ntokenizer.fit_on_texts(data)\n\n\n# In[9]:\n\n\nseq_all = tokenizer.texts_to_sequences(data)\n\n\n# In[10]:\n\n\nlen_all = [len(x) for x in seq_all]\nmax_len = max(len_all)\n\n\n# In[11]:\n\n\nseq_all = pad_sequences(seq_all, maxlen=max_len)\n\n\n# In[12]:\n\n\nX_all = seq_all[:,:-1]\ny_all = seq_all[:,1:]\n\n\n# In[13]:\n\n\nX_train, X_val, y_train, y_val = train_test_split(X_all, y_all, test_size=0.2, random_state=42)\n\n\n# In[14]:\n\n\nimport torch\ntorch.save(tokenizer, os.path.join(DATA_PATH, 'viet_word_tokenizer.h5'))\n\n\n# ## Data loader\n\n# In[15]:\n\n\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset, ConcatDataset\n\n\n# In[16]:\n\n\ndef build_dataset_from_tensors(X, y):\n ds = TensorDataset(X, y)\n return ds\n\n\n# In[17]:\n\n\nX_train = torch.tensor(X_train).long()\ny_train = torch.tensor(y_train).long()\nX_val = torch.tensor(X_val).long()\ny_val = torch.tensor(y_val).long()\n\n\n# In[18]:\n\n\ntrain_ds = build_dataset_from_tensors(X_train, y_train)\nval_ds = build_dataset_from_tensors(X_val, y_val)\n\n\n# In[19]:\n\n\nbatch_size = 256\nshuffle = True\n\n\n# In[20]:\n\n\ntrain_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=shuffle)\nval_dl = DataLoader(val_ds, batch_size=batch_size, shuffle=shuffle)\n\n\n# ## Model\n\n# In[21]:\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n# In[22]:\n\n\nfrom tqdm import tqdm\n\n\n# In[23]:\n\n\nclass Model(nn.Module):\n def __init__(self, vocab_size, embeding_size, hidden_size):\n super(Model, self).__init__()\n self.embeding = nn.Embedding(vocab_size, embeding_size)\n self.lstm = nn.LSTM(embeding_size, hidden_size, num_layers=2, batch_first=True, dropout=0.1)\n self.linear = nn.Linear(hidden_size, vocab_size)\n\n def forward(self, x, hidden_state=None):\n # x: BxS\n x = self.embeding(x) # BxSxE\n if hidden_state is None:\n x, hidden_state = self.lstm(x) # BxSx2H\n else:\n x, hidden_state = self.lstm(x, hidden_state) # BxSx2H\n x = F.relu(x)\n x = self.linear(x) # BxSxV\n return x, hidden_state\n\n def predict(self, x, hidden_state=None):\n x, hidden_state = self.forward(x, hidden_state)\n x = F.softmax(x, dim=-1) # BxSxV\n return x, hidden_state\n\n\n# In[24]:\n\n\ndef forward_and_loss(model, x, y, loss_fn, pad_token):\n out, hidden_state = model(x)\n loss = loss_fn(out.view(-1, out.size(-1)), y.view(-1), ignore_index=pad_token)\n return out, loss\n\n\n# In[25]:\n\n\ndef train_model(model, optim, train_iter, loss_fn, pad_token, weight_path=None, device=None):\n total_loss = 0.0\n\n model.train()\n\n with tqdm(total=len(train_iter)) as pbar:\n for x, y in train_iter:\n if device is not None and device.type=='cuda':\n x = x.cuda()\n y = y.cuda()\n\n optimizer.zero_grad()\n _, loss = forward_and_loss(model, x, y, loss_fn, pad_token=pad_token)\n\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n\n pbar.update(1)\n pbar.set_description(\"%-10s = %.6f \" % ('loss', total_loss))\n\n # Save model\n if weight_path is not None:\n state = {\n \"model\": model.state_dict(),\n \"optim\": optimizer.state_dict()\n }\n\n torch.save(state, weight_path)\n\n return total_loss\n\n\n# In[26]:\n\n\ndef evaluate_model(model, val_iter, pad_token, device=None):\n model.eval()\n with torch.no_grad(), tqdm(total=len(val_iter)) as pbar:\n total_loss = 0.0\n\n for x, y in val_iter:\n if device is not None and device.type=='cuda':\n x = x.cuda()\n y = y.cuda()\n\n _, loss = forward_and_loss(model, x, y, F.cross_entropy, pad_token=pad_token)\n\n total_loss += loss.item()\n\n pbar.update(1)\n pbar.set_description(\"%-10s = %.6f \" % ('val_loss', total_loss))\n\n return total_loss\n\n\n# ## Training\n\n# In[27]:\n\n\nvocab_size = len(tokenizer.word_index) + 1\nembedding_size = 200\nhidden_size = 256\nlearning_rate = 0.0001\nloss_fn = F.cross_entropy\n\n\n# In[28]:\n\n\ndevice = torch.torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n# In[29]:\n\n\nmodel = Model(vocab_size, embedding_size, hidden_size)\n\n\n# In[30]:\n\n\nif device.type=='cuda':\n model = model.cuda()\n\n\n# In[31]:\n\n\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.98), eps=1e-9)\n\n\n# In[32]:\n\n\nWEIGHT_PATH = os.path.join(HOME_PATH, 'ln_weight')\nif not os.path.isdir(WEIGHT_PATH):\n os.makedirs(WEIGHT_PATH)\n\n\n# In[33]:\n\n\n# logging.info('\\n' + '*'*50 + '\\n' + \"Start logging\\nvocab_size=%d\\nembedding_size=%d\\nhidden_size=%d\\nlearning_rate=%d\\nbatch_size=%d\\n\" \\\n# % (vocab_size, embedding_size, hidden_size, learning_rate, batch_size))\n\n\n# In[34]:\n\n\nnum_epoch = 500\n\n\n# In[35]:\n\n\nfor i in range(1, num_epoch+1):\n weight_path = None\n if i%10==0:\n weight_path = os.path.join(WEIGHT_PATH, 'epoch_%02d.h5' % i)\n print(\"\\nEpoch %02d\" % i, flush=True)\n train_loss = train_model(model, optimizer, train_dl, loss_fn, pad_token, weight_path, device)\n val_loss = evaluate_model(model, val_dl, pad_token, device)\n\n\n# In[ ]:\n","sub_path":"backup/viet_word_model.py","file_name":"viet_word_model.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"513332082","text":"import unittest\nimport sys\nfrom os import path\nsys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )\n\nfrom backend import add_account\n\nclass AddAccountTest(unittest.TestCase):\n \"\"\"Unit tests for accountcount.\"\"\"\n\n def test_add_account(self):\n \"\"\"Test accountcount function.\"\"\"\n event = {\n \"accountNum\":\"123456789012\"\n }\n resp = add_account.add_account(event)\n self.assertTrue(resp.get('Message'), 'Account added')\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"test/test_add_account.py","file_name":"test_add_account.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"61912636","text":"#!/usr/bin/python\n\nimport argparse\nimport sys\nimport glob\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-b',help='output from Blast_16SMicrobial.py')\nparser.add_argument('-k',help=\"keyword to search for in blast results\")\nargs=parser.parse_args()\nblast=args.b\nkeyword=args.k\n\ncurr_dir = os.getcwd()\nnew_file = curr_dir+\"/\"+keyword+\"_blast.txt\"\n#os.chdir(d)\n#now_dir = os.getcwd()\n#print(\"cwd\")\n#print(now_dir)\n\npos_list = []\n\n#for file in glob.iglob('*.fna'):\n# num_records = 0\nwith open(blast,'r') as blast_hits:\n lines = [line for line in blast_hits]\n for line in lines:\n if line[0] == '*':\n id = line[71:85]\n else: \n if keyword in line:\n pos_list.append(id)\n \nuni_pos_list = set(pos_list) \n\nfor id in uni_pos_list:\n with open(new_file,'a') as positives:\n positives.write(\"/home/katiephd/cdiff/2016-cdiff/2016-08-24-ksnp/2016-08-25-cdif_root_csor/639_cdif_1_csor_genomes/\")\n positives.write(id)\n positives.write(\".fna\")\n positives.write(\"\\t\")\n positives.write(id)\n positives.write(\"\\n\")\n \n \n# if num_records < int(contig):\n# with(open(new_file,'a')) as threshold:\n# threshold.write(str(now_dir))\n# threshold.write(\"/\")\n# threshold.write(str(file))\n# threshold.write(\"\\t\")\n# threshold.write(str(file)[:-4])\n# threshold.write(\"\\n\")\n","sub_path":"2016-cdiff/2016-08-24-ksnp/2016-08-29-blast-cdiff-genomes-16S/grab_blast_hits.py","file_name":"grab_blast_hits.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"203765203","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom builtins import str\nfrom builtins import object\nfrom .anova_drilldown import AnovaDrilldownNarratives\nfrom bi.common import NormalCard, NarrativesTree, C3ChartData,HtmlData\nfrom bi.common import NormalChartData, ChartJson\nfrom bi.common import utils as CommonUtils\nfrom bi.narratives import utils as NarrativesUtils\nfrom bi.narratives.anova.anova import OneWayAnovaNarratives\nfrom bi.settings import setting as GLOBALSETTINGS\n\n\nclass AnovaNarratives(object):\n ALPHA = 0.05\n\n KEY_SUMMARY = 'summary'\n KEY_NARRATIVES = 'narratives'\n KEY_TAKEAWAY = 'key_takeaway'\n DRILL_DOWN = 'drill_down_narrative'\n KEY_CARD = 'card'\n KEY_HEADING = 'heading'\n KEY_SUBHEADING = 'header'\n KEY_CHART = 'charts'\n KEY_PARAGRAPH = 'paragraphs'\n KEY_PARA_HEADER = 'header'\n KEY_PARA_CONTENT = 'content'\n KEY_BUBBLE = 'bubble_data'\n\n # @accepts(object, DFAnovaResult, DataFrameHelper)\n def __init__(self, df_anova_result, df_helper, df_context, result_setter,story_narrative,scriptWeight=None, analysisName=None):\n self._story_narrative = story_narrative\n self._result_setter = result_setter\n self._dataframe_context = df_context\n self._df_anova_result = df_anova_result\n self._df_helper = df_helper\n self.narratives = {}\n self.narratives['variables'] = ''\n self._blockSplitter = GLOBALSETTINGS.BLOCKSPLITTER\n self._base_dir = \"/anova/\"\n\n self._analysisName = self._dataframe_context.get_analysis_name()\n self._analysisDict = self._dataframe_context.get_analysis_dict()\n\n self._completionStatus = self._dataframe_context.get_completion_status()\n self._messageURL = self._dataframe_context.get_message_url()\n if analysisName == None:\n self._analysisName = self._dataframe_context.get_analysis_name()\n else:\n self._analysisName = analysisName\n if scriptWeight == None:\n self._scriptWeightDict = self._dataframe_context.get_measure_analysis_weight()\n else:\n self._scriptWeightDict = scriptWeight\n self._scriptStages = {\n \"anovaNarrativeStart\":{\n \"summary\":\"Started The Anova Narratives\",\n \"weight\":0\n },\n \"anovaNarrativeEnd\":{\n \"summary\":\"Narratives For Anova Finished\",\n \"weight\":10\n },\n }\n # self._completionStatus += self._scriptWeightDict[self._analysisName][\"narratives\"]*self._scriptStages[\"anovaNarrativeStart\"][\"weight\"]/10\n # progressMessage = CommonUtils.create_progress_message_object(self._analysisName,\\\n # \"anovaNarrativeStart\",\\\n # \"info\",\\\n # self._scriptStages[\"anovaNarrativeStart\"][\"summary\"],\\\n # self._completionStatus,\\\n # self._completionStatus)\n # CommonUtils.save_progress_message(self._messageURL,progressMessage)\n # self._dataframe_context.update_completion_status(self._completionStatus)\n CommonUtils.create_update_and_save_progress_message(self._dataframe_context,self._scriptWeightDict,self._scriptStages,self._analysisName,\"anovaNarrativeStart\",\"info\",display=False,emptyBin=False,customMsg=None,weightKey=\"narratives\")\n\n\n\n self._generate_narratives()\n\n # self._completionStatus += self._scriptWeightDict[self._analysisName][\"narratives\"]*self._scriptStages[\"anovaNarrativeEnd\"][\"weight\"]/10\n # progressMessage = CommonUtils.create_progress_message_object(self._analysisName,\\\n # \"anovaNarrativeEnd\",\\\n # \"info\",\\\n # self._scriptStages[\"anovaNarrativeEnd\"][\"summary\"],\\\n # self._completionStatus,\\\n # self._completionStatus)\n # CommonUtils.save_progress_message(self._messageURL,progressMessage)\n # self._dataframe_context.update_completion_status(self._completionStatus)\n CommonUtils.create_update_and_save_progress_message(self._dataframe_context,self._scriptWeightDict,self._scriptStages,self._analysisName,\"anovaNarrativeEnd\",\"info\",display=False,emptyBin=False,customMsg=None,weightKey=\"narratives\")\n\n\n if self._anovaNodes.get_card_count() > 0:\n self._story_narrative.add_a_node(self._anovaNodes)\n #self._generate_take_away()\n self._result_setter.set_anova_node(self._anovaNodes)\n\n def _generate_narratives(self):\n try:\n nColsToUse = self._analysisDict[self._analysisName][\"noOfColumnsToUse\"]\n except:\n nColsToUse = None\n self._anovaNodes = NarrativesTree()\n self._anovaNodes.set_name(\"Performance\")\n for measure_column in self._df_anova_result.get_measure_columns():\n measure_anova_result = self._df_anova_result.get_measure_result(measure_column)\n significant_dimensions_dict, insignificant_dimensions = measure_anova_result.get_OneWayAnovaSignificantDimensions()\n num_dimensions = len(list(significant_dimensions_dict.items())) + len(insignificant_dimensions)\n significant_dimensions = [k for k,v in sorted(list(significant_dimensions_dict.items()), key=lambda x: -x[1])]\n if nColsToUse != None:\n significant_dimensions = significant_dimensions[:nColsToUse]\n num_significant_dimensions = len(significant_dimensions)\n num_insignificant_dimensions = len(insignificant_dimensions)\n print(\"num_significant_dimensions\",num_significant_dimensions)\n if num_significant_dimensions > 0:\n mainCard = NormalCard(name = \"Overview of Key Factors\")\n data_c3 = []\n for sig_dim in significant_dimensions:\n data_c3.append({'dimension':sig_dim, 'effect_size':float(significant_dimensions_dict[sig_dim])})\n self.narratives = {}\n self.narratives[AnovaNarratives.KEY_HEADING] = \"%s Performance Analysis\" % (measure_column,)\n self.narratives['main_card'] = {}\n self.narratives['cards'] = []\n self.narratives['main_card'][AnovaNarratives.KEY_SUBHEADING] = \"Relationship between %s and other Dimensions\" % (measure_column)\n self.narratives['main_card'][AnovaNarratives.KEY_PARAGRAPH] = []\n data_dict = { \\\n 'significant_dimensions' : significant_dimensions,\n 'insignificant_dimensions' : insignificant_dimensions,\n 'num_significant_dimensions' : num_significant_dimensions,\n 'num_insignificant_dimensions' : num_insignificant_dimensions,\n 'num_dimensions' : num_significant_dimensions+num_insignificant_dimensions,\n 'target' : measure_column \\\n }\n output = {'header' : ''}\n output['content'] = NarrativesUtils.get_template_output(self._base_dir,'anova_template_1.html',data_dict)\n self.narratives['main_card'][AnovaNarratives.KEY_PARAGRAPH].append(output)\n output1 = {'header' : ''}\n output1['content'] = NarrativesUtils.get_template_output(self._base_dir,'anova_template_2.html',data_dict)\n lines = []\n lines += NarrativesUtils.block_splitter(output['content'],self._blockSplitter)\n data_c3 = NormalChartData(data_c3)\n chart_data = data_c3.get_data()\n chartDataValues = []\n effect_size_values = []\n for obj in chart_data:\n effect_size_values.append(obj[\"effect_size\"])\n chart_data_min = min(effect_size_values)\n if chart_data_min < 0.00001:\n for obj in chart_data:\n chartDataValues.append(str(obj[\"effect_size\"]))\n else:\n for obj in chart_data:\n chartDataValues.append(obj[\"effect_size\"])\n chart_json = ChartJson(data = chart_data,axes={'x':'dimension','y':'effect_size'},\n label_text={'x':'','y':'Effect Size (scaled exp values)'},chart_type='bar')\n chart_json.set_axis_rotation(True)\n # chart_json.set_yaxis_number_format(\".4f\")\n chart_json.set_yaxis_number_format(NarrativesUtils.select_y_axis_format(chartDataValues))\n # st_info = [\"Test : ANOVA\", \"Threshold for p-value : 0.05\", \"Effect Size : Tukey's HSD\"]\n statistical_info_array=[\n (\"Test Type\",\"ANOVA\"),\n (\"Effect Size\",\"ETA squared\"),\n (\"Max Effect Size\",chart_data[0][\"dimension\"]),\n (\"Min Effect Size\",chart_data[-1][\"dimension\"]),\n ]\n statistical_inferenc = \"\"\n if len(chart_data) == 1:\n statistical_inference = \"{} is the only variable that have significant association with the {} (Target) having an \\\n Effect size of {}\".format(chart_data[0][\"dimension\"],self._dataframe_context.get_result_column(),round(chart_data[0][\"effect_size\"],4))\n elif len(chart_data) == 2:\n statistical_inference = \"There are two variables ({} and {}) that have significant association with the {} (Target) and the \\\n Effect size ranges are {} and {} respectively\".format(chart_data[0][\"dimension\"],chart_data[1][\"dimension\"],self._dataframe_context.get_result_column(),round(chart_data[0][\"effect_size\"],4),round(chart_data[1][\"effect_size\"],4))\n else:\n statistical_inference = \"There are {} variables that have significant association with the {} (Target) and the \\\n Effect size ranges from {} to {}\".format(len(chart_data),self._dataframe_context.get_result_column(),round(chart_data[0][\"effect_size\"],4),round(chart_data[-1][\"effect_size\"],4))\n if statistical_inference != \"\":\n statistical_info_array.append((\"Inference\",statistical_inference))\n statistical_info_array = NarrativesUtils.statistical_info_array_formatter(statistical_info_array)\n lines += [C3ChartData(data=chart_json,info=statistical_info_array)]\n lines += NarrativesUtils.block_splitter(output1['content'],self._blockSplitter)\n mainCard.set_card_data(lines)\n self._anovaNodes.add_a_card(mainCard)\n self.narratives['main_card'][AnovaNarratives.KEY_PARAGRAPH].append(output1)\n self.narratives['main_card'][AnovaNarratives.KEY_CHART] = {}\n effect_size_chart = { 'heading' : '',\n 'labels' : {'Dimension':'Effect Size'},\n 'data' : significant_dimensions_dict}\n print(significant_dimensions_dict)\n self.narratives['main_card'][AnovaNarratives.KEY_CHART]['effect_size'] = effect_size_chart\n progressMessage = CommonUtils.create_progress_message_object(self._analysisName,\"custom\",\"info\",\"Analyzing Key Drivers\",self._completionStatus,self._completionStatus,display=True)\n CommonUtils.save_progress_message(self._messageURL,progressMessage,ignore=False)\n self._generate_dimension_narratives(significant_dimensions, measure_anova_result, measure_column)\n else:\n mainCard = NormalCard(name = \"Overview of Key Factors\")\n cardText=HtmlData(\"There are no dimensions in the dataset that have significant influence on {}\".format(measure_column))\n mainCard.set_card_data([cardText])\n self._anovaNodes.add_a_card(mainCard)\n\n\n def _generate_dimension_narratives(self,significant_dimensions, measure_anova_result, measure):\n self.narratives['cards'] = []\n anova_trend_result = measure_anova_result.get_trend_data()\n if len(significant_dimensions) == 0:\n self.narratives['cards'].append({'card1':'', 'card2':'', 'card3':''})\n self.narratives['variables'] = significant_dimensions\n for dimension in significant_dimensions:\n dimensionNode = NarrativesTree(name = dimension)\n narratives = OneWayAnovaNarratives(self._dataframe_context,measure, dimension, measure_anova_result, anova_trend_result,self._result_setter,dimensionNode,self._base_dir)\n self._anovaNodes.add_a_node(dimensionNode)\n self.narratives['cards'].append(narratives)\n","sub_path":"bi/narratives/anova/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"11918230","text":"from os import path\nimport re\nfrom setuptools import setup, find_packages\n\nhere = path.abspath(path.dirname(__file__))\n\ndef get_version():\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(path.join(here, 'openeats', '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\nversion = get_version()\nlong_description = \"\"\"Django webappto manage your recipes.\n\"\"\"\n\nsetup(\n name='openeats',\n version=version,\n description='Django webapp to manage recipes',\n long_description=long_description,\n author='Simon Hanna',\n url='https://github.com/simhnna/openeats',\n license='MIT',\n packages=['openeats'],\n include_package_data=True,\n install_requires=[\n 'django>=1.11',\n 'django-extensions',\n 'reportlab',\n ],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='django cooking recipes',\n python_requires='>=3.4',\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"287375795","text":"# %%\nimport pandas as pd\nimport matplotlib.pyplot as plt, numpy as np\n\nplt.rc('text', usetex=True)\ndf = pd.read_csv('tests/results.csv')\n\n# %%\nfig = plt.figure()\nax = fig.add_subplot(111)\nmethods = ('noisy', 'cerman', 'mst_out', 'quick')\nlabels = ('Corrupted', 'BTF [3]', 'MST $\\mathbf{\\hat{e}}_\\mathrm{WLS}$ (ours)', 'Tiled $\\mathbf{\\hat{e}}_\\mathrm{WLS}$ (ours)')\ndata = np.empty((len(methods), len(df.gain.unique()), 2))\n\nfor i, g in enumerate(df.gain.unique()):\n subset = df[df.gain == g]\n for j, m in enumerate(methods):\n \tdata[j,i] = subset[m].mean(), subset[m].var()\n\nfor i in range(len(data)):\n plt.errorbar(range(data.shape[1]), data[i,:,0], yerr=1.96*np.sqrt(data[i,:,1]/subset.shape[0]), capsize=4, fmt='x-')\n plt.xticks(ticks=range(data.shape[1]), labels=[str(g*100) for g in df.gain.unique()])\n for x, y in zip(np.arange(data.shape[1]), data[i,:,0]):\n plt.text(x-0.05, y, f'{y:.2f}', ha='right', va='top')\n\nplt.minorticks_on()\nplt.xlim(left=-0.5)\nax.set_xticks([], minor = True)\nplt.xlabel('ISO')\nplt.ylabel('Relative RMSE × 100')\nplt.grid(axis='y', which='major')\nplt.grid(axis='y', which='minor', linestyle=(0, (5, 8)))\nplt.legend(labels)\nplt.savefig('comparison.pdf', bbox_inches='tight')\nplt.show()\n","sub_path":"tests/exposure_estimation/plot_comparison.py","file_name":"plot_comparison.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"359862300","text":"import asyncio\n\nfrom aiokafka import AIOKafkaProducer\nimport functools\nimport time\n\n\ndef on_done(i):\n print('Send msg n°', i)\n\n\nasync def produce(mloop):\n producer = AIOKafkaProducer(\n loop=mloop, bootstrap_servers='infra-cp-kafka')\n\n await producer.start()\n try:\n # Produce message\n for i in range(0, 100):\n coro = await producer.send_and_wait(\"my_favorite_topic\", b\"Super message\")\n print(coro)\n time.sleep(0.4)\n finally:\n # Wait for all pending messages to be delivered or expire.\n await producer.stop()\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n\n loop.run_until_complete(produce(loop))\n","sub_path":"recipes/2019-07-24-test-producer-dns-fix.py","file_name":"2019-07-24-test-producer-dns-fix.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"175211071","text":"from dbmodel import Base, Person, Address\nfrom resource import PersonResource, AddressSubResource\nfrom data_access_layer import *\nfrom mapper.object_mapper import ObjectMapper\nfrom mapper.object_mapper import ObjectMapperException\n\n# define mappings\nmapper = ObjectMapper()\nmapper.create_map(Person, PersonResource, {'address_list': None})\nmapper.create_map(Address, AddressSubResource)\n\ndef get_person_resource_list():\n\tpeople = get_all_person()\n\n\tpeople_resource_list = []\n\t\n\tfor p in people:\n\t\tperson_resource = mapper.map(p, PersonResource)\n\t\taddr_list = get_person_address(p.id)\n\t\t\n\t\tfor a in addr_list:\n\t\t\taddr_resource = mapper.map(a, AddressSubResource)\n\t\t\tperson_resource.address_list.append(addr_resource)\n\n\t\tpeople_resource_list.append(person_resource)\n\n\treturn people_resource_list\n","sub_path":"backend/mapping_layer.py","file_name":"mapping_layer.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"556997386","text":"\"\"\"\r\n\r\nImplement RestAPI for collection object\r\n\r\n\"\"\"\r\n\r\nimport json\r\nimport falcon\r\nfrom mongo.mongo_db_config import CLIENT\r\nimport mongo.mongo_exceptions as custexp\r\n\r\n\r\nclass Collection(object):\r\n \"\"\"\r\n Class to implement get,post,put and delete\r\n\r\n \"\"\"\r\n @classmethod\r\n def checkDb(cls, database):\r\n if database not in CLIENT.list_database_names():\r\n raise custexp.DbError(\"Database doesn't exist\")\r\n else:\r\n return True\r\n\r\n def on_get(self, req, res, database):\r\n \"\"\" GET functionality\"\"\"\r\n try:\r\n if self.checkDb(database):\r\n #GO feth the collections in Db and respond 200OK\r\n dbs = CLIENT[database]\r\n body = dbs.collection_names()\r\n res.body = json.dumps(body, ensure_ascii=False)\r\n res.status = falcon.HTTP_200\r\n\r\n except custexp.DbError as exp:\r\n body = {\"Error\":str(exp)}\r\n res.body = json.dumps(body, ensure_ascii=False)\r\n res.status = falcon.HTTP_400\r\n\r\n def on_post(self, req, res, database):\r\n \"\"\" POST functionality\"\"\"\r\n try:\r\n if self.checkDb(database):\r\n if req.content_type == \"application/json\":\r\n try:\r\n request = json.load(req.stream)\r\n if ((not isinstance(request['collection'], str)) or (not isinstance(request['capped'], bool))):\r\n # JSON field validation\r\n raise custexp.IllegalArgumentException()\r\n dbs = CLIENT[database]\r\n if request['capped']:\r\n if not isinstance(request['size'], int):\r\n raise ValueError('Expecting integer for size')\r\n #create capped collection based on maxPoolSize\r\n dbs.create_collection(request['collection'], capped=True, size=request['size'])\r\n body = {\"CreatedCollection\":request['collection'], \"cappedsize\": request['size']}\r\n\r\n else:\r\n dbs.create_collection(request['collection'])\r\n body = {\"CreatedCollection\":request['collection'], \"capped\": False}\r\n\r\n res.body = json.dumps(body,ensure_ascii=False)\r\n res.status = falcon.HTTP_200\r\n\r\n except (KeyError, ValueError):\r\n raise custexp.IllegalArgumentException()\r\n else:\r\n raise custexp.ContentTypeUnsupported()\r\n except Exception as exp:\r\n body = {\"Error\":str(exp)}\r\n res.body = json.dumps(body,ensure_ascii=False)\r\n res.status = falcon.HTTP_400\r\n\r\n def on_delete(self, req, res, database):\r\n \"\"\"Delete functionality\"\"\"\r\n try:\r\n if self.checkDb(database):\r\n if req.content_type == \"application/json\":\r\n request = json.load(req.stream)\r\n if not isinstance(request['collection'],str):\r\n raise custexp.IllegalArgumentException()\r\n dbs = CLIENT[database]\r\n if request['collection'] not in dbs.collection_names():\r\n raise custexp.CollectionError(\"Collection does not exist\")\r\n dbs.drop_collection(request['collection'])\r\n body = {\"DroppedCollection\":request['collection']}\r\n res.body = json.dumps(body,ensure_ascii=False)\r\n res.status = falcon.HTTP_200\r\n\r\n else:\r\n raise custexp.ContentTypeUnsupported()\r\n except Exception as exp:\r\n body = {\"Error\":str(exp)}\r\n res.body = json.dumps(body,ensure_ascii=False)\r\n res.status = falcon.HTTP_400\r\n","sub_path":"api/mongo/actions/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"277874448","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 21 18:01:37 2018\n\n@author: kay\n\"\"\"\n\nimport graph\nimport copy\nfrom simulator import Simulator\nfrom node import Node\n\ndef mlpt():\n ''' Multi-power Topology Contorl Algorithm '''\n \n G_mlpt = copy.deepcopy(Node.interconnect_matrix) # innerconnect matrix of mia\n nodes = copy.deepcopy(Node.nodes)\n \n ##############################################################\n M = 0\n H = graph.flow(G_mlpt, Node.n)\n for i in range(Node.n):\n nei = nodes[i].double_neighbor\n n = [x[0] for x in nei] # id of beighbor of node-i \n d = [x[1] for x in nei]\n tempM = 0\n e = [None] * len(d)\n for j in range(len(d)):\n e[j] = H[i][ n[j] ] * Simulator.dis_to_power(d[j])\n tempM += e[j] \n V = []\n for k in range(len(d)):\n V.append([ n[k], d[k], e[k] ])\n nodes[i].neighbor= V\n nodes[i].neighbor.sort(key=lambda x:x[2],reverse=True)\n \n if tempM > M:\n M = tempM \n ################################################################ \n flag = True # is the power of node (game theory result) the NE?\n cnt = 0 # number of games\n while flag:\n flag = False\n cnt += 1\n for i in range(Node.n): # N->0\n (u, G_mlpt) = mlpt_utility(i, nodes, G_mlpt, M)\n if u != nodes[i].power:\n flag = True\n nodes[i].power = u \n print(\"Number of iterations of MLPT:\", cnt)\n# print(nodes[0].neighbor)\n# print(nodes[0].power) \n# print(nodes[1].power) \n# print(nodes[Node.n-1].power) \n return G_mlpt, nodes\n\n\ndef mlpt_utility(cid, nodes, G_mlpt, M): # cid:current id\n '''return No.i node optimal power and the index'''\n nei = nodes[cid].neighbor\n n = [x[0] for x in nei] # Neighbor Node id\n d = [x[1] for x in nei] # the distance of i and j\n E = [x[2] for x in nei] # energy by link\n \n utility_max = 0 \n# index = 0\n for i in range(len(E)):\n MG = copy.deepcopy(G_mlpt)\n if i != 0:\n MG[cid][ n[i-1] ] = 0\n MG[n[i-1]][ cid ] = 0 \n e = 0\n for j in range(len(E)):\n if MG[cid][ n[j] ] == 1:\n e += E[j]\n k = graph.Connect(MG, Node.n, cid)\n u = M * k - e\n \n if k > 0 and u > utility_max:\n utility_max = u\n G_mlpt = MG\n# index = i # maybe disconnect, so cannot use index\n ##################################################\n # Determine power\n ##################################################\n# power = d[index]\n power = 0\n# print(\"++++++++++++++++++++++++++++++++++++++++++++\")\n for i in range(len(d)):\n if G_mlpt[cid][ n[i] ] == 1 and d[i] > power:\n power = d[i]\n return power, G_mlpt","sub_path":"gt/mlpt.py","file_name":"mlpt.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"51374655","text":"#!/usr/bin/env python3\nimport socket, time, sys\nfrom multiprocessing import Process\n\n#define address & buffer size\nHOST = \"\"\nextern_host = 'www.google.com'\nextern_port = 80\nPORT = 8001\nBUFFER_SIZE = 1024\n\n\n# get host information\ndef get_remote_ip(host):\n print(f'Getting IP for {host}')\n try:\n remote_ip = socket.gethostbyname( host )\n except socket.gaierror:\n print('Hostname could not be resolved. Exiting')\n sys.exit()\n\n print(f'Ip address of {host} is {remote_ip}')\n return remote_ip\n\n\n#echo connections back to client\ndef handle_request(conn, proxy_end):\n # send data\n send_full_data = conn.recv(BUFFER_SIZE)\n print(f\"Sending recieved data {send_full_data} to goofle\")\n proxy_end.sendall(send_full_data)\n\n # remember to shut down!!\n proxy_end.shutdown(socket.SHUT_WR)\n\n data = proxy_end.recv(BUFFER_SIZE)\n print(f\"Sending recieved data {data} to client\")\n\n # send data back\n conn.send(data)\n\n\ndef main():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as proxy_start: #establish \"start\" of proxy (connects to localhost)\n # bind, and set to listening mode\n proxy_start.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n proxy_start.bind((HOST, PORT))\n proxy_start.listen(1)\n\n while True:\n # accept incoming connections from proxy_start, print information about connection\n conn, addr = proxy_start.accept()\n print(\"Connected by\", addr)\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as proxy_end: # establish \"end\" of proxy (connects to google)\n # get remote IP of google\n remote_ip = get_remote_ip(extern_host)\n\n # connect proxy_end to it\n proxy_end.connect((remote_ip, extern_port))\n\n # now for the multiprocessing...\n\n # allow for multiple connections with a Process daemon\n p = Process(target=handle_request, args=(conn, proxy_end))\n p.daemon = True\n p.start()\n # make sure to set target = handle_request when creating the process.\n\n # close the connection!\n conn.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"multi_proxy_server.py","file_name":"multi_proxy_server.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"70909910","text":"import os\n\nfrom contextlib import contextmanager\nfrom random import randint\nfrom PIL import Image\n\nfrom tests.util.params import LOGO_BASE64\n\n\n@contextmanager\ndef get_temporary_logo():\n path_to_logo = os.path.join(os.path.abspath('.'),\n 'tests/util/temp_logo.png')\n try:\n image = Image.new(\"RGBA\", (16, 16), (randint(1, 100),\n randint(1, 100),\n randint(1, 100),\n randint(1, 100)))\n image.save(path_to_logo)\n yield path_to_logo\n finally:\n os.remove(path_to_logo)\n\n\n@contextmanager\ndef get_temporary_text_file():\n path_to_file = os.path.join(os.path.abspath('.'),\n 'tests/util/temp_txt.txt')\n try:\n with open(path_to_file, \"w\") as b64_file:\n b64_file.write(LOGO_BASE64)\n yield path_to_file\n finally:\n os.remove(path_to_file)\n","sub_path":"tests/util/temp_files.py","file_name":"temp_files.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"26311089","text":"#\n# Copyright (c) 2014 TrendMicro. Data Center Services Research and Development. (dcsrd@dl.trendmicro.com)\n#\nimport os\nimport ConfigParser\n\n\nclass GeneralConfigParser():\n\n def __init__(self):\n pass\n\n def _config_init(self, root_path, name):\n config = ConfigParser.SafeConfigParser()\n filename = os.path.join(root_path, name) + \".ini\"\n if len(config.read(filename)) == 0:\n raise Exception(\"config file not found: %s\" % filename)\n return config\n\n def get_omelet_config_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"config\")\n\n\nclass OmeletConfigParser(GeneralConfigParser):\n\n def __init__(self, root_path=None):\n if not root_path:\n root_path = self.get_omelet_config_dir()\n self.config = self._config_init(root_path, \"config\")\n GeneralConfigParser.__init__(self)\n\n def get_packer_regions(self):\n try:\n regions = map(str.strip, self.config.get('ami_region', 'region').split(','))\n except:\n regions = list()\n return regions\n\n def get_packer_path(self):\n try:\n path = self.config.get('packer', 'path').strip()\n except:\n path = \"\"\n return path\n\n def get_packer_retry_cnt(self):\n try:\n cnt = self.config.get('packer', 'max_retry_cnt').strip()\n except:\n cnt = 0\n return int(cnt)\n\n def get_sns_region(self):\n try:\n region = self.config.get('notification', 'region').strip()\n except:\n region = None\n return region\n\n def get_sns_topic(self, name):\n topic_name = \"%s_topic\" % name\n try:\n topic = self.config.get('notification', topic_name).strip()\n except:\n topic = None\n return topic\n\n def get_sns_reporter(self):\n try:\n reporter = self.config.get('notification', 'reporter').strip()\n except:\n reporter = None\n return reporter\n\n def get_git_folder(self):\n path = dict()\n try:\n path['image_folder'] = self.config.get('git', 'image_folder').strip()\n path['cookbook_folder'] = self.config.get('git', 'cookbook_folder').strip()\n path['shell_folder'] = self.config.get('git', 'shell_folder').strip()\n except:\n raise ValueError(\"Could not get git folder path setting in config file\")\n return path\n\n def get_git_branch(self):\n try:\n branch = self.config.get('git', 'branch').strip()\n except:\n branch = 'master'\n \n return branch\n\n def get_glacier_region(self):\n try:\n region = self.config.get('glacier', 'region').strip()\n except:\n raise ValueError(\"Could no get Glacier region\")\n return region\n \nclass DBConfigParser(GeneralConfigParser):\n\n def __init__(self, root_path=None):\n if not root_path:\n root_path = self.get_omelet_config_dir()\n self.db = self._config_init(root_path, \"database\")\n GeneralConfigParser.__init__(self)\n\n def get_dynamodb_region(self):\n try:\n region = self.db.get('dynamodb', 'region').strip()\n except:\n raise ValueError(\"Could not get dynamoDB region in config file\")\n return region\n\n def get_dynamodb_tables(self):\n tables = dict()\n try:\n tables['image_table'] = self.db.get('dynamodb', 'image_table').strip()\n tables['tree_table'] = self.db.get('dynamodb', 'tree_table').strip()\n tables['history_table'] = self.db.get('dynamodb', 'history_table').strip()\n except:\n raise ValueError(\"Could not get dynamoDB setting in config file\")\n return tables\n\n","sub_path":"omelet/utils/configparser.py","file_name":"configparser.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"64491880","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom collections import OrderedDict\n\nimport json\n\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.externals import joblib\nfrom urllib.parse import quote_plus\nimport os\nfrom pymongo import MongoClient\n\nuri = \"mongodb://%s:%s@%s\" % (quote_plus(\"admin123\"), quote_plus(\"1234\"), \"wolfwatch.dlinkddns.com:27017/admin\")\nclient = MongoClient(uri)\ndb = client.crawler_db\nlist_cursor = db.word_table.find({},{\"_id\": 0,\"word\": 1})\n\n\n# kmeans_statemap.py 에다 단어별 결과물을 받게끔 함\ndef main(word):\n plt.rc('font', family='Malgun Gothic')\n # csv 읽기\n if not os.path.isfile(\"./sum2/\" + word + \"_sum.csv\"):\n print(word + \" file not exitst\")\n return\n if os.path.isfile(\"./seq2/\" + word + \"_seq.json\"):\n print(word + \" already trained\")\n return\n\n df = pd.read_csv('./sum/'+word+'_sum.csv', parse_dates=[0], dtype=np.float64)\n\n max_bound = 0\n\n premax = 0\n variance = 0\n pre_var = 0\n for coln in df.columns[1:]:\n col = df[coln] # csv에서 읽어온 칼럼\n ncol = [] # 정규화된 칼럼\n maxv = col[len(col) - 1] # 각 칼럼의 최대값\n variance = np.var(col.values) # 추가 #분산 구하기\n if premax > maxv:\n maxv = premax\n max_variance = pre_var\n else:\n premax = maxv\n pre_var = variance\n\n\n # 정규화된 데이터를 저장할 딕셔너리\n # { 'col1': [[0, 0], [0.2, 0.2], ...], 'col2': [[], [], ...], ... }\n normalized_data = {}\n # 각 칼럼(사이트)별로 실행할 텐서를 저장할 딕셔너리\n # { 'col1': [update_centroids, centroids, points, assignments], 'col2': [], ... }\n col_tensors = {}\n\n # 저장된 모델 불러와서 테스트\n k = 10 # k-means에 적용될 클러스터의 갯수\n step = float(1 / (k - 1))\n x = []\n x_temp = 0\n for i in range(k):\n temp = []\n temp.append(step * i)\n x.append(temp)\n x_temp += step\n # poly_regression 모델을 이용해서 Raw centroid를 구함.\n polynomial_features= PolynomialFeatures(degree=3)\n x_poly = polynomial_features.fit_transform(x)\n clf_from_joblib = joblib.load('centroid.pkl')\n y = clf_from_joblib.predict(x_poly)\n\n raw_centroids = [] # k-means에 적용할 용도\n raw_centroids2 = [] # polt에 그림 그리기 용도\n for i in range(k):\n y_temp = y[i][0]/y[k-1][0]\n raw_centroids.append([x[i][0], y_temp])\n raw_centroids2.append([x[i][0], y_temp * np.log(max_variance + 1) / 2])\n\n # 기간을 제외한 각 칼럼별로\n for coln in df.columns[1:]:\n col = df[coln] # csv에서 읽어온 칼럼\n ncol = [] # 정규화된 칼럼\n maxv = col[len(col) - 1] # 각 칼럼의 최대값 (TODO: 틀릴수도 있음)\n variance = np.var(col.values)\n ncol_centroids = []\n\n # 정규화\n if maxv == 0:\n # 칼럼의 최대값이 0일 경우 0으로 나누기를 피하기 위해 1로 변경\n maxv = 1\n for i in range(len(col)):\n # 정규화된 사용량 데이터를 ncol에 추가\n # 최댓값을 1에 맞춰서 정규화를 하되, 누적치를 차이를 반영하기위해, 분산을 곱해주고 2를 나눠준다.\n if i == 0:\n ncol.append([i / (len(col) - 1), (col[i] / maxv) * np.log(variance + 1)/2])\n elif i == len(col) - 1:\n ncol.append([i / (len(col) - 1), (col[i] / maxv) * np.log(variance + 1)/2])\n else:\n ncol.append([i / (len(col) - 1), (col[i] / maxv) * np.log(variance + 1)/2])\n\n # 각각의 col은 각각의 raw_centroid에 맞춰서 K-means를 수행하게된다.\n for i in range(len(raw_centroids)): # == K\n ncol_centroids.append([raw_centroids[i][0], raw_centroids[i][1]* ncol[len(col)-1][1]])\n\n if max_bound < ncol[len(col)-1][1]:\n max_bound = ncol[len(col)-1][1]\n # 만들어진 ncol을 normalized_data에 저장\n normalized_data[coln] = ncol\n\n # 텐서 생성\n points = tf.constant(ncol, dtype=np.float64)\n centroids = tf.Variable(ncol_centroids, dtype=np.float64)\n\n points_expanded = tf.expand_dims(points, 0)\n centroids_expanded = tf.expand_dims(centroids, 1)\n\n distances = tf.reduce_sum(tf.square(tf.subtract(points_expanded, centroids_expanded)), 2)\n assignments = tf.argmin(distances, 0)\n\n means = []\n for c in range(k):\n means.append(tf.reduce_mean(\n tf.gather(points,\n tf.reshape(\n tf.where(\n tf.equal(assignments, c)\n ),[1,-1])\n ),reduction_indices=[1]))\n\n new_centroids = tf.concat(means, 0)\n update_centroids = tf.assign(centroids, new_centroids)\n\n # 만들어진 칼럼 텐서들을 col_tensors에 저장\n col_tensors[coln] = [update_centroids, centroids, points, assignments]\n\n # 세션 실행 후 최종 centroid 값들을 저장할 딕셔너리\n # { 'col1': [[0, 0], [0, 0], ...], 'col2': [[0, 0], [0, 0], ...], ... }\n final_centroids = {}\n\n with tf.Session() as sess:\n # 세션 초기화\n sess.run(tf.global_variables_initializer())\n for coln, col_ten in col_tensors.items(): # 클러스터링을 칼럼별로 수행\n for step in range(30): # 반복 횟수\n # 세션 수행\n [_, col_centroid_values, col_points_values, col_assignment_values] = sess.run(col_ten)\n\n state = {}\n for i in range(len(raw_centroids)):\n state[i] = []\n for i in range(len(col_points_values)):\n state[col_assignment_values[i]].append(col_points_values[i][0])\n\n state_invalid = False\n # 값이 너무 작게 나온것은 영향을 거의 안받았다고 가정한다.\n if col_centroid_values[len(col_centroid_values)-1][1] < 1:\n state_invalid = True\n else : # 현재 칼럼에서 클러스터링이 ��바르게 수행되었는지 확인\n for k, v in state.items():\n if len(v) == 0:\n state_invalid = True\n break\n\n\n # 클러스터링이 올바르게 수행되지 않았을 경우 현재 칼럼을 저장하지 않음\n if state_invalid == False:\n # 현재 칼럼의 최종 centroid 값을 저장할 배열 생성\n final_centroids[coln] = []\n\n # 최종 centroid 값 저장\n for i in col_centroid_values:\n final_centroids[coln].append(i)\n\n # 데이터 plot하기\n plt.scatter(col_points_values[:, 0], col_points_values[:, 1], c=col_assignment_values, s=50, alpha=0.5)\n plt.plot(col_centroid_values[:, 0], col_centroid_values[:, 1], 'kx', markersize=10)\n\n # BEGIN 그래프 칼럼별로 보기\n #for i in raw_centroids:\n # plt.plot(i[0], i[1], 'rx', markersize=10)\n #plt.title(coln)\n #plt.show()\n #plt.clf()\n # END 그래프 칼럼별로 보기\n\n # BEGIN 전체 그래프 보기\n for i in raw_centroids2:\n plt.plot(i[0], i[1], 'rx', markersize=10)\n plt.show()\n # END 전체 그래프 보기\n\n n_centroids = {}\n # 처음과 마지막 centroid 제거 (부정확)\n for k, v in final_centroids.items():\n n_centroids[k] = v[1:-1]\n final_centroids = n_centroids\n\n def generate_rank(centroids, by):\n if by != \"x\" and by != \"y\": return None\n centroids_by_cluster = []\n for i in range(len(raw_centroids) - 2):\n this_cent = {}\n for site, cents in centroids.items():\n this_cent[site] = cents[i]\n centroids_by_cluster.append(this_cent)\n\n score = {}\n\n for cluster in centroids_by_cluster:\n ca = []\n for k, v in cluster.items():\n ca.append([k, v[0], v[1]])\n if by == \"x\": ca.sort(key=lambda x: x[1])\n elif by == \"y\": ca.sort(key=lambda x: x[2])\n for i in range(len(ca)):\n if ca[i][0] not in score: score[ca[i][0]] = 0\n score[ca[i][0]] += len(ca) - i + 1\n\n sa = []\n for k, v in score.items():\n sa.append([k, v])\n\n sa.sort(key=lambda x: x[1], reverse=True)\n\n rank = []\n for v in sa:\n rank.append(v[0])\n\n return rank\n\n def generate_spread_matrix(centroids):\n # centroids: { 'col1': [[0, 0], [0, 0], ...], 'col2': [[0, 0], [0, 0], ...], ... }\n\n # centroid를 cluster별로 정렬\n\n centroids_by_cluster = []\n for i in range(len(raw_centroids) - 2):\n this_cent = {}\n for site, cents in centroids.items():\n this_cent[site] = cents[i]\n centroids_by_cluster.append(this_cent)\n\n # 각 cluster별 y최소, y최대, x최소, x최대값 구하기\n\n clusters_bound = [] # (bottom, top, left, right)\n\n for cluster in centroids_by_cluster:\n cluster_bound = [max_bound, 0, max_bound, 0]\n for site, cent in cluster.items():\n if cluster_bound[0] > cent[1]: cluster_bound[0] = cent[1]\n if cluster_bound[1] < cent[1]: cluster_bound[1] = cent[1]\n if cluster_bound[2] > cent[0]: cluster_bound[2] = cent[0]\n if cluster_bound[3] < cent[0]: cluster_bound[3] = cent[0]\n clusters_bound.append(cluster_bound)\n\n # 영향력 계산\n # cluster bound가 (0, 0)~(1, 1)일때 bound 내의 사이트 a(ax, ay), 사이트 b(bx, by)에 대해 a → b 영향력:\n # ax <= bx일 때 ((1 - (bx - ax)) * ay)\n # ax > bx일 때 0\n\n spread_matrix = OrderedDict()\n\n for i in range(len(raw_centroids) - 2):\n this_cluster = centroids_by_cluster[i]\n cluster_bound = clusters_bound[i]\n for site_current, cent_current in this_cluster.items():\n for site_against, cent_against in this_cluster.items():\n if site_current == site_against: continue\n if cent_current[0] >= cent_against[0]: continue\n ax = (cent_current[0] - cluster_bound[2]) / (cluster_bound[3] - cluster_bound[2])\n bx = (cent_against[0] - cluster_bound[2]) / (cluster_bound[3] - cluster_bound[2])\n ay = (cent_current[1] - cluster_bound[0]) / (cluster_bound[1] - cluster_bound[0])\n influence = ((1 - (bx - ax)) * ay)\n if site_current not in spread_matrix: spread_matrix[site_current] = OrderedDict()\n if site_against not in spread_matrix[site_current]:\n spread_matrix[site_current][site_against] = influence\n else:\n spread_matrix[site_current][site_against] += influence\n\n dv = len(raw_centroids) - 2\n\n for site_current, sites_against in spread_matrix.items():\n for s, v in sites_against.items():\n spread_matrix[site_current][s] = v / dv\n\n return spread_matrix\n\n # 순위 계산\n time_rank = generate_rank(final_centroids, \"x\")\n influence_rank = generate_rank(final_centroids, \"y\")\n\n print(\"time_rank: \", end=\"\")\n print(time_rank, end=\"\\n\\n\")\n print(\"influence_rank: \", end=\"\")\n print(influence_rank)\n\n # 확산 행렬 생성\n spread_matrix = generate_spread_matrix(final_centroids)\n\n # D3.js용 데이터 생성\n d3v = {}\n d3v['nodes'] = []\n d3v['links'] = []\n\n i = 1\n for coln in df.columns[1:]:\n n = {}\n n['id'] = coln\n d3v['nodes'].append(n)\n\n sites_with_node = []\n\n for site_current, sites_against in spread_matrix.items():\n for s, v in sites_against.items():\n if v == 0: continue\n l = {}\n l['source'] = site_current\n l['target'] = s\n l['value'] = v\n if site_current not in sites_with_node: sites_with_node.append(site_current)\n if s not in sites_with_node: sites_with_node.append(s)\n d3v['links'].append(l)\n\n final_nodes = []\n for site in sites_with_node:\n n = {}\n n['id'] = site\n final_nodes.append(n)\n\n d3v['nodes'] = final_nodes\n with open('./seq2/'+word+'_seq.json', 'w', encoding=\"utf-8\") as fp:\n json.dump(d3v, fp, ensure_ascii=False, indent=4)\n print(d3v)\n\nfor item in list_cursor:\n word = item['word']\n print(word)\n main(word)","sub_path":"kmeans_statemap2.py","file_name":"kmeans_statemap2.py","file_ext":"py","file_size_in_byte":12939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"164868721","text":"import requests # pip install requests\nimport json\nimport hashlib\nimport hmac\nimport time #for nonce\nimport datetime\n\nclass BitfinexClient(object):\n KEY=\"xxx\"\n SECRET=\"xxx\"\n\n BASE_URL = \"https://api.bitfinex.com/\"\n\n def _nonce(self):\n return str(int(round(time.time() * 100000)))\n\n def _headers(self, path, nonce, body):\n\n signature = \"/api/\" + path + nonce + body\n h = hmac.new(self.SECRET.encode('utf8'), signature.encode('utf8'), hashlib.sha384)\n signature = h.hexdigest()\n\n return {\n \"bfx-nonce\": nonce,\n \"bfx-apikey\": self.KEY,\n \"bfx-signature\": signature,\n \"content-type\": \"application/json\"\n }\n\n def get_funding_wallet_balance(self):\n nonce = self._nonce()\n body = {}\n rawBody = json.dumps(body)\n path = \"v2/auth/r/wallets\"\n\n headers = self._headers(path, nonce, rawBody)\n\n r = requests.post(self.BASE_URL + path, headers=headers, data=rawBody, verify=True)\n\n return r\n\n def get_funding_offers(self, currency):\n nonce = self._nonce()\n body = {}\n rawBody = json.dumps(body)\n path = \"v2/auth/r/funding/credits/f\" + currency.upper()\n\n headers = self._headers(path, nonce, rawBody)\n\n r = requests.post(self.BASE_URL + path, headers=headers, data=rawBody, verify=True)\n\n return r\n\n\ndef get():\n\n r = BitfinexClient().get_funding_wallet_balance()\n\n if r.status_code != 200:\n print(\"Couldn't get data from bitfinex\")\n import sys\n sys.exit(1)\n\n\n funding_balances = ['USD', 'EUR']\n funding_data = {}\n\n\n for wallet in json.loads(r.content):\n\n if wallet[0] == 'funding':\n\n currency = wallet[1].upper()\n balance = wallet[2]\n\n if balance < 1:\n continue\n\n r = BitfinexClient().get_funding_offers(currency)\n used_amount = 0\n\n for offer in json.loads(r.content):\n used_amount += offer[5]\n\n funding_data[currency] = { 'balance': balance, 'used': used_amount, 'free': balance - used_amount}\n\n funding_data['date'] = str(datetime.datetime.now())\n return json.dumps(funding_data)\n\n\nif __name__ == \"__main__\":\n r = get()\n print(r)\n\n","sub_path":"ohaiLogs/src/utils/Bitfinex.py","file_name":"Bitfinex.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"142982104","text":"import numpy as np\n\nfrom aspics.buffers import Buffers\nfrom aspics.params import Params\n\n\nclass Snapshot:\n \"\"\"\n Thin wrapper around the .npz file format for saving/loading snapshots.\n This enables loading existing snapshots from file, or generating new snapshots full of random data or zeros.\n It also has a function for seeding initial infections in the population.\n Each snapshot consists of the data buffers used by OpenCL, as well as additional static data about the population\n which is not used in the runtime simulation but may be used for seeding infections at the snapshot stage.\n \"\"\"\n\n def __init__(\n self,\n nplaces,\n npeople,\n nslots,\n time,\n area_codes,\n not_home_probs,\n lockdown_multipliers,\n buffers,\n name=\"cache\",\n ):\n self.name = name\n self.nplaces = nplaces\n self.npeople = npeople\n self.nslots = nslots\n self.time = time\n self.area_codes = area_codes\n self.not_home_probs = not_home_probs\n self.lockdown_multipliers = lockdown_multipliers\n self.buffers = buffers\n\n def update_params(self, new_params):\n try:\n self.buffers.params[:] = new_params.asarray()\n except ValueError as e:\n print(\n f\"Snapshot.py caused an exception '{str(e)}'. This can happen if the parameters in the model \"\n f\"have changed after a snapshot has been created. Try deleting the snapshot file \"\n f\"'data/config/snapshots/{self.name}.npz' and re-running the model.\"\n )\n raise e\n\n def seed_prngs(self, seed):\n \"\"\"\n Recomputes the random states of the PRNGs passed to the kernels.\n The simulator runs deterministically for the same snapshot state, so calling this function gives new\n PRNG values to get enable stochastic results for different runs.\n \"\"\"\n np.random.seed(seed)\n self.buffers.people_prngs[:] = np.random.randint(\n np.uint32((1 << 32) - 1), size=self.npeople * 4, dtype=np.uint32\n )\n\n @classmethod\n def load_full_snapshot(cls, path):\n \"\"\"Creates a snapshot by reading the .npz file from the provided path.\"\"\"\n with np.load(path, allow_pickle=True) as file_data:\n nplaces = file_data[\"nplaces\"]\n npeople = file_data[\"npeople\"]\n nslots = file_data[\"nslots\"]\n time = file_data[\"time\"]\n area_codes = file_data[\"area_codes\"]\n not_home_probs = file_data[\"not_home_probs\"]\n lockdown_multipliers = file_data[\"lockdown_multipliers\"]\n buffers = Buffers(**{name: file_data[name] for name in Buffers._fields})\n return cls(\n nplaces,\n npeople,\n nslots,\n time,\n area_codes,\n not_home_probs,\n lockdown_multipliers,\n buffers,\n )\n\n def num_bytes(self):\n \"\"\"Returns size in bytes of this snapshot.\"\"\"\n total = 0\n for name in self.buffers._fields:\n total += getattr(self.buffers, name).nbytes\n return total\n","sub_path":"aspics/snapshot.py","file_name":"snapshot.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"169107188","text":"import pandas as pd \nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plot\nimport seaborn as sns\n\n\n# get csv from https://d396qusza40orc.cloudfront.net/phoenixassets/home_data.csv\ndef predict_house_price():\n home = pd.read_csv(\"course/week2/data/home_data.csv\") \n features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode'] \n \n print(home[features].head()) \n\n X = home[features]\n y = home['price']\n \n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n lm = LinearRegression()\n lm.fit(X_train, y_train)\n y_predict = lm.predict(X_test)\n # error vs sqft house\n plot.scatter(X_test['sqft_living'], y_predict-y_test)\n\n plot.show()\n\n\nif __name__ == \"__main__\":\n predict_house_price()","sub_path":"course/week2/house_price_all_attributes.py","file_name":"house_price_all_attributes.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"70330375","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Markup\nimport flask.ext.wtf as wtf\nfrom baseframe.forms import Form, RichTextField\nfrom hasweb.models.workspace import funnel_status\n\n\n__all__ = ['ProposalForm', 'FunnelSpaceForm', 'FunnelSectionForm',\n 'ConfirmSessionForm']\n\nDEFAULT_PROPOSAL_TEMPLATE = Markup(\"\"\"\nObjective\n\nDescription\n\nRequirements\n\nSlides\n\nLinks\n\n\"\"\")\n\n\nclass FunnelSpaceForm(Form):\n title = wtf.TextField(u\"Title\", validators=[wtf.Required()],\n description=u\"The name of the Workspace\")\n description = RichTextField(u\"Description\")\n proposal_template = wtf.TextAreaField(u\"Proposal Template\", default=DEFAULT_PROPOSAL_TEMPLATE, validators=[wtf.Required()])\n status = wtf.SelectField(u\"Status\", coerce=int,\n choices=[(type, funnel_status[type]) for type in funnel_status], validators=[wtf.Required()])\n\n\nclass FunnelSectionForm(Form):\n title = wtf.TextField('Title', validators=[wtf.Required()])\n description = wtf.TextAreaField('Description', validators=[wtf.Required()])\n public = wtf.BooleanField('Public?')\n\n\nclass ProposalForm(Form):\n title = wtf.TextField('Title', validators=[wtf.Required()])\n description = wtf.TextAreaField(u\"Description\", default=u\"\",\n validators=[wtf.Required()])\n session_type = wtf.RadioField('Session type', validators=[wtf.Required()], choices=[\n ('Lecture', 'Lecture'),\n ('Demo', 'Demo'),\n ('Tutorial', 'Tutorial'),\n ('Workshop', 'Workshop'),\n ('Discussion', 'Discussion'),\n ('Panel', 'Panel'),\n ])\n technical_level = wtf.RadioField('Technical level', validators=[wtf.Required()], choices=[\n ('Beginner', 'Beginner'),\n ('Intermediate', 'Intermediate'),\n ('Advanced', 'Advanced'),\n ])\n is_speaking = wtf.RadioField(\"Are you speaking?\", coerce=int,\n choices=[(1, u\"I will be speaking\"),\n (0, u\"I’m proposing a topic for someone to speak on\")])\n bio = wtf.TextAreaField(u\"Bio\", default=u\"\", validators=[wtf.Required()])\n phone = wtf.TextField(u'Phone number', validators=[wtf.Required()],\n description=u\"A phone number we can call you at to discuss your proposal, if required. \"\n \"Will not be displayed\")\n section_id = wtf.SelectField(u'Section', coerce=int)\n\n\nclass ConfirmSessionForm(wtf.Form):\n \"\"\"\n Dummy form for CSRF\n \"\"\"\n pass\n","sub_path":"hasweb/forms/workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"527407992","text":"import functools\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django.utils.translation import ugettext as _\n\n\n@functools.lru_cache(maxsize=None)\ndef login_context():\n helper = FormHelper()\n helper.form_class = 'narrow-form'\n helper.add_input(Submit('submit', _('Login')))\n\n return {\n 'extra_context': {\n 'form_helper': helper\n }\n }","sub_path":"ticket_distro/system/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"573122328","text":"class BumbleBeeError(Exception):\n\n errmsgs = {\n 1001: '_GET: resp not ok.',\n 1002: '_GET: cannot decode JSON.',\n 1003: '_POST: resp not ok.',\n 1004: '_POST: cannot decode JSON.'\n }\n\n def __init__(self, err_code=None):\n if not err_code:\n print('no err_code')\n else:\n msg = self.errmsgs[err_code]\n print(f'Error: {msg}')\n\n\nclass BumbleBeePersonError(BumbleBeeError):\n errmsgs = {\n 2001: 'Nothing here yet',\n }\n\n\nclass BumbleBeeAnswerError(BumbleBeeError):\n errmsgs = {\n 3001: 'Answer.__init__: must contain \"answer_id\" or \"doc\"',\n 3002: 'Answer.aloha: this id is not in MongoDB',\n }\n\n\nclass BumbleBeeQuestionError(BumbleBeeError):\n errmsgs = {\n 4001: 'Nothing here yet',\n }\n","sub_path":"bees/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"41022362","text":"import bge\n\nUVSPEED = 0.005\n\nfirst = None\n\ndef init(cont):\n global first\n \n if first == None:\n first = \"Done\"\n cont.owner['First'] = True\n else :\n cont.owner['First'] = False\n \n cont.script = 'water.run'\n \ndef run(cont):\n\n if cont.owner['First'] == True:\n own = cont.owner\n mesh = own.meshes[0]\n array = mesh.getVertexArrayLength(0)\n for v in range(0,array):\n vert = mesh.getVertex(0,v) \n vert.v += UVSPEED\n","sub_path":"BGMC18 CaveFly V4/BGMC18 CaveFly/scripts/water.py","file_name":"water.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"234754940","text":"import sys\nimport argparse\nimport os\nimport json\nimport re\nimport spacy\nimport html\n\n\nnlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])\nsentencizer = nlp.create_pipe(\"sentencizer\")\nnlp.add_pipe(sentencizer)\n\n\ndef replace_newlines(comment):\n \"\"\"\n Replace newlines with spaces\n :param comment: (str)\n :return: modified comment\n \"\"\"\n return re.sub(r\"\\n{1,}\", \" \", comment)\n\n\ndef replace_tabs(comment):\n \"\"\"\n Replace tabs with spaces\n :param comment: (str)\n :return: modified comment\n \"\"\"\n return re.sub(r\"\\t{1,}\", \" \", comment)\n\n\ndef replace_carriage_returns(comment):\n \"\"\"\n Replace carriage returns with spaces\n :param comment: (str)\n :return: modified comment\n \"\"\"\n return re.sub(r\"\\r{1,}\", \" \", comment)\n\n\ndef unescape_html(comment):\n \"\"\"\n Unescape HTML from comment\n :param comment: (str)\n :return: modified comment\n \"\"\"\n return html.unescape(comment)\n\n\ndef remove_urls(comment):\n \"\"\"\n Remove URLs from comment\n :param comment: (str)\n :return: modified comment\n \"\"\"\n return re.sub(r\"(http|www)\\S+\", \"\", comment)\n\n\ndef remove_duplicate_spaces(comment):\n \"\"\"\n Remove duplicate spaces from comment\n :param comment: (str)\n :return: modified comment\n \"\"\"\n return re.sub(r\" {1,}\", \" \", comment)\n\n\ndef lemmatize_tag(comment):\n \"\"\"\n Tokenize, lemmatize and add POS tags to the tokens in comment\n :param comment: (str)\n :return: tokens (post-lemmatizationa and POS tagging) concatenated with spaces in a sentences and new lines concated\n with new lines.\n \"\"\"\n # get Spacy document for modComm\n nlpComm = nlp(comment)\n # use Spacy document for modComm to create a string.\n # * Insert \"\\n\" between sentences.\n # * Split tokens with spaces.\n # * Write \"/POS\" after each token.\n modComm = \"\"\n for sent in nlpComm.sents:\n for token in sent:\n # lemmatize\n if re.match(r\"-\\S+\", token.lemma_) and not re.match(r\"-\\S+\", token.text):\n modComm += token.text\n else:\n modComm += token.lemma_\n # add the tag\n modComm += \"/\" + token.tag_\n # add a space between the tokens\n modComm += \" \"\n\n # separate the sentences with newlines\n modComm += '\\n'\n return modComm\n\n\ndef remove_duplicate_new_lines(comment):\n \"\"\"\n Replace multiple duplicate newlines with a single newline\n :param comment: (str)\n :return: modified string\n \"\"\"\n return re.sub(r\"\\n{1,}\", \"\\n\", comment)\n\n\ndef remove_space_before_new_lines(comment):\n \"\"\"\n Remove the space before the newline\n :param comment: (str)\n :return: modified string\n \"\"\"\n return re.sub(r\" \\n\", \"\\n\", comment)\n\n\ndef remove_leading_spaces(comment):\n \"\"\"\n Remove leading spaces\n :param comment: str\n :return: mod string with leading spaces removed\n \"\"\"\n return comment.lstrip(' ')\n\n\ndef replace_middot(comment):\n \"\"\"\n Replace the middot character with a space\n :param comment:\n :return:\n \"\"\"\n return re.sub(r\" {1,}\", \" \", comment)\n\n\ndef preproc1(comment , steps=range(1, 5)):\n ''' This function pre-processes a single comment\n\n Parameters: \n comment : string, the body of a comment\n steps : list of ints, each entry in this list corresponds to a preprocessing step \n\n Returns:\n modComm : string, the modified comment \n '''\n modComm = comment\n if 1 in steps:\n modComm = replace_newlines(modComm)\n modComm = replace_tabs(modComm)\n modComm = replace_carriage_returns(modComm)\n if 2 in steps:\n modComm = unescape_html(modComm)\n if 3 in steps:\n modComm = remove_urls(modComm)\n if 4 in steps:\n modComm = remove_duplicate_spaces(modComm)\n\n modComm = replace_middot(modComm)\n modComm = remove_leading_spaces(modComm)\n modComm = lemmatize_tag(modComm)\n modComm = remove_duplicate_new_lines(modComm)\n modComm = remove_space_before_new_lines(modComm)\n\n return modComm\n\n\ndef get_subsample(data, maxCount, ID):\n '''\n Get a subsample with maxCount number of items to process from the file data, starting sampling at index [ID%len(X)]\n :param data: the file data\n :param maxCount: size of the subsample\n :param ID: student ID\n :return: subsample\n '''\n length = len(data)\n start_index = ID[0] % length #inclusive\n end_index = (start_index + maxCount)% length #exclusive\n if end_index <= start_index :\n subsample = data[start_index:]\n subsample += data[:end_index]\n else:\n subsample = data[start_index:end_index]\n return subsample\n\n\ndef main(args):\n allOutput = []\n for subdir, dirs, files in os.walk(indir):\n for file in files:\n fullFile = os.path.join(subdir, file)\n print( \"Processing \" + fullFile)\n\n data = json.load(open(fullFile))\n\n # select appropriate args.max lines\n subsample = get_subsample(data, args.max, args.ID)\n del data\n\n # read those lines with something like `j = json.loads(line)`\n json_subsample = [json.loads(line) for line in subsample]\n\n # choose to retain fields from those lines that are relevant to you\n # add a field to each selected line called 'cat' with the value of 'file' (e.g., 'Alt', 'Right', ...)\n filtered_subsample = [{'id': obj[\"id\"], 'body': obj[\"body\"], 'cat': file} for obj in json_subsample]\n del json_subsample\n\n # process the body field (j['body']) with preproc1(...) using default for `steps` argument\n # replace the 'body' field with the processed text\n for index, item in enumerate(filtered_subsample):\n filtered_subsample[index]['body'] = preproc1(item['body'])\n\n # append the result to 'allOutput'\n allOutput += filtered_subsample\n\n fout = open(args.output, 'w')\n fout.write(json.dumps(allOutput))\n fout.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Process each .')\n parser.add_argument('ID', metavar='N', type=int, nargs=1,\n help='your student ID')\n parser.add_argument(\"-o\", \"--output\", help=\"Directs the output to a filename of your choice\", required=True)\n parser.add_argument(\"--max\", type=int, help=\"The maximum number of comments to read from each file\", default=10000)\n parser.add_argument(\"--a1_dir\", help=\"The directory for A1. Should contain subdir data. Defaults to the directory for A1 on cdf.\", default='/u/cs401/A1')\n \n args = parser.parse_args()\n\n if (args.max > 200272):\n print( \"Error: If you want to read more than 200,272 comments per file, you have to read them all.\" )\n sys.exit(1)\n \n indir = os.path.join(args.a1_dir, 'data')\n main(args)\n","sub_path":"IdentifyingPoliticalPersuationOnReddit/a1_preproc.py","file_name":"a1_preproc.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"482783509","text":"# coding: utf-8\n\nimport os\nimport random\nimport numpy as np\n\n######## read all boxes\ntrain = open('train.txt', 'r')\ntrain_read = train.readlines()\nbox_wh = []\n\nfor train_line in train_read:\n txt_name = train_line.rstrip('\\n') + '.txt' #annotations_txt: width height channel \\n xmin ymin xmax ymax\n #print(txt_name)\n txt = open('annotations_txt/'+txt_name, 'r')\n txt_read = txt.readlines()\n flag = 0\n for line in txt_read:\n if flag == 0:\n flag = 1\n continue\n line = line.split(' ')\n xmin = float(line[0])\n ymin = float(line[1])\n xmax = float(line[2])\n ymax = float(line[3])\n box_wh.append([xmax-xmin, ymax-ymin])\n\ndef distance(box, centroid):\n return (abs(box[0]-centroid[0])+abs(box[1]-centroid[1]))\n\ndef cluster_aspect_ratio(box, anchor_num):\n ########## cluster\n print('clustering aspec ratios ******************************************************')\n box_cnt = len(box)\n boxes = []\n for p in range(box_cnt):\n boxes.append([box[p][0]/box[p][1], box[p][1]/box[p][0]])\n centroids = []\n centroids_last = []\n boxes_c_a = []\n boxes_c_d = []\n boxes_c_cnt = np.zeros(anchor_num)\n rand_k = random.sample(range(0, box_cnt), anchor_num)\n for i in range(0, anchor_num):\n width = boxes[rand_k[i]][0]\n height = boxes[rand_k[i]][1]\n centroids.append([width, height])\n centroids_last.append([0, 0])\n\n flag = 0\n cluster_cnt = 1\n while not flag:\n #print('clustering', cluster_cnt)\n flag_cnt = 0\n boxes_c_cnt = np.zeros(anchor_num)\n boxes_c_a = [] #生成数组\n boxes_c_d = []\n for n in range(0, anchor_num):\n boxes_c_a.append([])\n boxes_c_d.append([])\n #print(boxes_c_d) \n for i in range(0, box_cnt):\n dist = []\n for j in range(0, anchor_num):\n dist.append(distance(boxes[i], centroids[j]))\n min_dist_idx = dist.index(min(dist))\n boxes_c_a[min_dist_idx].append(boxes[i])\n boxes_c_cnt[min_dist_idx] += 1\n boxes_c_d[min_dist_idx].append(min(dist))\n for k in range(0, anchor_num):\n #print(centroids[k])\n centroids[k] = np.sum(boxes_c_a[k], axis=0)/(float(boxes_c_cnt[k]))\n if centroids[k][0] == centroids_last[k][0] and centroids[k][1] == centroids_last[k][1]:\n flag_cnt += 1\n centroids_last[k] = centroids[k]\n if flag_cnt == anchor_num:\n flag = 1\n #print('********************min max************************')\n #for m in range(0, anchor_num):\n # print('min', boxes_c_a[m][boxes_c_d[m].index(min(boxes_c_d[m]))])\n # print('max', boxes_c_a[m][boxes_c_d[m].index(max(boxes_c_d[m]))])\n #print('********************min max************************')\n cluster_cnt += 1\n print('********************centroids************************')\n write_flag = 0\n for l in range(0, anchor_num):\n print(centroids[l], boxes_c_cnt[l])\n if write_flag:\n txt_name = str(int(centroids[l][0])) + '_' + str(int(centroids[l][1])) + '.txt'\n txt = open(txt_name, 'w')\n for p in range(int(boxes_c_cnt[l])):\n txt.write(str(boxes_c_a[l][p][0]) + ' ' + str(boxes_c_a[l][p][1]) + '\\n')\n txt.close()\n print('clustering aspec ratios ******************************************************')\n\ncluster_aspect_ratio(box_wh, 6)\n \n \n \n \n \n \n \n \n \n \n \n \n\n","sub_path":"assets/python_scripts/cluster_ratio.py","file_name":"cluster_ratio.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"277783793","text":"import nltk\nnltk.download('punkt')\nimport random\nfrom statemachine import StateMachine, State\nimport sys\nfrom get_recipes import *\nfrom irc_socket import *\nimport webscrape as top5_scraper\n\n\ninitial_outreaches = [\"Hi\", \"Hello\", \"Hey there\", \"Howdy\", \"Yoooo\", \"Yo\", \"Hey\", \"Welcome\"]\nsecondary_outreaches = [\"Hello???\", \"Anyone there???\", \"Hiii\", \"Hellooo\", \"I said hi\", \"excuse me???\"]\nfrustrated_phrases = [\"Screw you!\", \"Well, bye then\", \"Whatever, fine. Don't answer\", \"Ugh ok, bye\",\n \"Forget youuuuuu (in CeeLo Green voice)\", \"I'm leaving...\"]\nconfused_phrases = [\"I don't understand\", \"Can you say that again?\", \"Nani?!?\", \"What did you say?\",\n \"Literally idk dude\", \"Bro, what???\", \"Huh???\"]\n\n# when chatbot is speaker one\npairs_outreach = {\"intro\": [\"What's up?\", \"How's it going\", \"What's happening?\"]}\n\n# when chatbot is speaker two\npairs_response = {\n \"intro\": [\"Hello\", \"Hey there\", \"Howdy\", \"Yoooo\", \"Hey\", \"Welcome\"],\n \"inquiry\": [(\"I'm good\", \"How are you?\"), (\"I'm fine, thanks\", \"And you?\"),\n (\"Nothing much\", \"You?\"), (\"Great\", \"What about you?\")]\n}\n\nget_next_outreach = lambda utterance: random.choice(pairs_outreach[utterance])\nget_next_response = lambda utterance: random.choice(pairs_response[utterance])\n\n# travel rec stuff\nimport travelRecs as trav\nimport pandas as pd\n\ntravel_time_questions = [\"When are you leaving?\",\n \"When would you like to travel?\",\n \"What time are you thinking of going?\",\n \"When are you free to fly?\"]\n\ntravel_temp_questions = [\"How hot do you like it?\",\n \"What temperature are you looking for?\",\n \"What's the weather you're looking for?\"]\n\ntravel_guess = [\"OK, I'll take a guess then.\",\n \"Well that isn't helpful, but I can take a shot in the dark.\",\n \"Alright, let me choose some of my favorites.\"]\n\ntravel_recs = [\"I've heard {place} is great in {month}!\",\n \"If you're traveling in {month}, head to {place}!\",\n \"When {month} comes around, visit {place}!\",\n \"I'd recommend {place}, during the month of {month}.\"]\n\ntemp_words = {\n \"hot\": 85, \"cold\": 50, \"warm\": 75, \"cool\": 65, \"chilly\": 55, \"toasty\": 85,\n}\n\n# Music chart \ngenres_supported = [\"alternative\", \"country\", \"pop\", \"rap\", \"all music\"]\n\nguess_genre = (\"I guess I'll just give you top 5 for all music then...\", \"all music\")\n\nask_for_genre_script = \"Please specify what genre you would like to see top 5 songs for, \" \\\n \"you can choose between: `alternative`, `country`, `pop`, or `rap`. \" \\\n \"Or you can say `all music`, to get top 5 irrespective of genre\"\n\nask_for_artist_check = \"Do you want to check if a particular artist in this top 5 list? \" \\\n \"Please answer in the form: `artist {artist_first_name}` or \" \\\n \"`artist {artist_first_name} {artist_last_name}`\"\n\n\n\ndef main():\n if len(sys.argv) == 4:\n server = sys.argv[1]\n channel = sys.argv[2].strip(\"\\\"\")\n botnick = sys.argv[3]\n bot = ChatBot(server=server, channel=channel, botnick=botnick)\n else:\n bot = ChatBot()\n\n bot.init_bot()\n bot.run_bot()\n\n\n# state keeps track of where in the discourse we are\nclass ChatState(StateMachine):\n start = State('START', initial=True)\n initial_outreach = State('INITIAL_OUTREACH')\n secondary_outreach = State('SECONDARY_OUTREACH')\n outreach_reply = State('OUTREACH_REPLY')\n inquiry = State('INQUIRY')\n # this is a super state that encapsulates inquiry reply and inquiry of speaker two\n inquiry_super = State('INQUIRY_SUPER')\n inquiry_reply = State('INQUIRY_REPLY')\n giveup_frustrated = State('GIVEUP_FRUSTRATED')\n end = State('END')\n\n reach_out = start.to(initial_outreach)\n response = initial_outreach.to(outreach_reply) | start.to(outreach_reply)\n no_reply_one = initial_outreach.to(secondary_outreach)\n no_reply_after_second = secondary_outreach.to(giveup_frustrated)\n retry_secondary = secondary_outreach.to(secondary_outreach)\n second_response = secondary_outreach.to(outreach_reply)\n retry_outreach = outreach_reply.to(outreach_reply)\n no_inquiry = outreach_reply.to(giveup_frustrated)\n inquiry_given = outreach_reply.to(inquiry)\n inquiry_response = inquiry.to(inquiry_super)\n retry_inquiry = inquiry.to(inquiry)\n to_next_inquiry = inquiry_super.to(inquiry_reply)\n ignore_after_inquiry = inquiry.to(giveup_frustrated)\n ignore_after_inquiry_two = inquiry_super.to(giveup_frustrated)\n happy_end = inquiry_reply.to(end)\n giveup_end = giveup_frustrated.to(end)\n restart = start.from_(start, initial_outreach, secondary_outreach, outreach_reply, inquiry,\n inquiry_reply, inquiry_super, giveup_frustrated, end)\n\n\nclass ChatBot: # init here\n def __init__(self, server=\"irc.freenode.net\", channel=\"#CSC482\", botnick=\"Default-bot\"):\n self.bot_state = ChatState()\n self.bot_response = \"\"\n self.awaiting_response = False\n self.users = []\n self.target = \"\"\n self.server = server\n self.channel = channel\n self.botnick = botnick if \"-bot\" in botnick[-4:] else botnick + \"-bot\"\n self.irc = IRCSocket()\n self.irc.connect(server, channel, self.botnick)\n self.user_text = \"\"\n self.retries = 0\n self.spoke_first = None\n self.sent_forget = False\n self.seconds_passed = 0\n self.travel_month = \"\"\n self.travel_temp = 0\n self.travel_df = None\n self.music_genre = \"\"\n self.music_artist = \"\"\n self.top5_music_df = None\n\n def init_bot(self):\n while True:\n text = self.irc.get_response()\n if \"JOIN\" in text:\n break\n time.sleep(1)\n self.get_names()\n\n def get_names(self):\n names = self.irc.get_names(self.channel)\n names_no_bot = [name for name in names if self.botnick not in name]\n self.users = names_no_bot\n\n def check_msg(self, _text):\n return \"PRIVMSG\" in _text and self.channel in _text and self.botnick + \":\" in _text\n\n def get_user_text(self, _text):\n exp_index = _text.find(\"!\")\n who_sent = _text[1:exp_index] if exp_index > 0 else \"\"\n if not self.target or (self.bot_state.is_secondary_outreach and self.seconds_passed > 10):\n self.target = who_sent\n\n name_index = _text.find(self.botnick)\n self.user_text = _text[name_index + len(self.botnick) + 1:].strip().lower() # +1 to get rid of colon\n\n # print(f\"{who_sent} said `{self.user_text}`\")\n self.check_for_commands()\n if who_sent != self.target:\n return False\n return True\n\n def check_for_commands(self):\n if \"die\" == self.user_text:\n self.irc.kill_self(self.channel)\n exit()\n elif \"forget\" == self.user_text:\n self.sent_forget = True\n self.bot_state.restart()\n\n def wait_for_text(self, no_message_func, has_message_func):\n text = self.get_timed_response() # first 30 seconds\n if self.sent_forget:\n self.sent_forget = False\n self.awaiting_response = True # treat like no response in pipeline\n return False\n if text:\n self.awaiting_response = False\n has_message_func()\n else:\n self.awaiting_response = True\n no_message_func()\n return self.awaiting_response\n\n def get_timed_response(self):\n self.seconds_passed = 0\n text = None\n while self.seconds_passed != 30 and not self.sent_forget:\n # if self.seconds_passed % 5 == 0:\n # print(f\"{self.seconds_passed} tries\")\n text = self.get_response()\n if text:\n return text\n self.seconds_passed += 1\n return text\n\n def get_response(self):\n text = None\n if self.irc.poll_read_response():\n text = self.irc.get_response()\n if text:\n for line in text.split(\"\\n\"):\n if self.check_msg(line) and self.get_user_text(line):\n return line\n text = None\n return text\n\n def send_question_answer_pair(self, resp, send_question=True):\n if isinstance(resp, tuple):\n answer = resp[0]\n question = resp[1] # question\n self.irc.send_dm(self.channel, self.target, answer)\n if send_question:\n self.irc.send_dm(self.channel, self.target, question)\n\n @staticmethod\n def remove_conjunctions(_text):\n conjunctions = {\"'s\": \"is\", \"'re\": \"are\", \"'t\": \"not\", \"'d\": \"did\"}\n tokenized_text = nltk.word_tokenize(_text)\n for i in range(len(tokenized_text)):\n if tokenized_text[i] in conjunctions.keys():\n tokenized_text[i] = conjunctions[tokenized_text[i]]\n return ' '.join(tokenized_text)\n\n @staticmethod\n def normalize_response(_text):\n intro_words = [\"hey\", \"hello\", \"hi\", \"yo\", \"welcome\", \"howdy\"]\n one_word_inquiry = [\"you?\"]\n inquiry_start = [\"how\", \"what\", \"and\"]\n inquiry_next = [\"you\", \"going\", \"happening\", \"good\", \"popping\", \"cracking\", \"everything\", \"things\", \"life\",\n \"up\"]\n slang_phrases = [\"wassup\", \"sup\", \"wazzup\", \"poppin\", \"crackin\", \"whaddup\", \"it do\"]\n if _text.lower() == one_word_inquiry:\n return \"inquiry\"\n processed_text = ChatBot.remove_conjunctions(_text).lower()\n for start in inquiry_start:\n for nxt in inquiry_next:\n if start and nxt in processed_text:\n return \"inquiry\"\n for slang in slang_phrases:\n if slang in processed_text:\n return \"inquiry\"\n for word in intro_words:\n if word in processed_text:\n return \"intro\"\n return \"unknown\"\n\n @staticmethod\n def check_unique_question_hari(_text):\n # asks top X artists -> ask for genre -> user gives genre -> bot sends top 10 songs for genre\n # -> user asks if artist in top 10 -> bot says yes or no and prints song\n start_word = [\"what\", \"which\", \"who\", \"did\", \"is\", \"was\"]\n domain_phrases = [\"top 5\", \"songs\", \"records\", \"music\"]\n clean_text = ChatBot.remove_conjunctions(_text).lower()\n for sw in start_word:\n for phrase in domain_phrases:\n if sw and phrase in clean_text:\n return True\n return False\n\n def check_unique_question_clay(self, _text):\n # asks travel recommendation -> bot asks when you want to visit -> user gives time -> bot asks what temperature\n # -> users sends temperature -> bot sends final recommendation\n travel_words = [\"travel\", \"go\", \"fly\", 'explore', \"visit\"]\n question_start = [\"where\", \"what\"]\n processed_text = ChatBot.remove_conjunctions(_text).lower()\n for start in question_start:\n for nxt in travel_words:\n if start and nxt in processed_text:\n return True\n\n return False\n\n def check_unique_question_archit(self, _text):\n # asks recipe or ingredients in food -> bot gives recipe -> user asked for ingredients\n # -> bot returns ingredients\n recipe = False\n ingredients = False\n food_item = get_food_item(_text)\n if food_item is None:\n return True\n # print(\"Text:\", _text)\n if \"recipe\" in _text or \"make\" in _text:\n recipe = True\n if \"ingredients\" in _text or \"materials\" in _text:\n ingredients = True\n # print(\"Inredients:\", ingredients)\n # print(\"Recipe:\", recipe)\n food_item.replace(\"?\",\"\")\n links = get_3_links(food_item)\n if len(links) == 0:\n self.irc.send_dm(self.channel, self.target, \"I don't know that food item sorry!\")\n return True\n\n search = None\n root_links = []\n for link in links:\n temp = link\n root_links.append(get_root_website(temp))\n self.irc.send_dm(self.channel, self.target, \"Which link do you want information from? (Enter 1, 2, or 3)\")\n i = 1\n while i < 4:\n # print(\"Link:\", links[i-1], \"Root:\", root_links[i-1])\n # index = root_links[i-1].find(\".\")+1 # find index of .\n msg = str(i)+\": \"+root_links[i-1]\n self.irc.send_dm(self.channel, self.target, msg)\n i += 1\n text = self.get_timed_response()\n\n if not text:\n self.irc.send_dm(self.channel, self.target, \"Fine I'll just take a guess on what you like\")\n search = random.choice(links)\n else: \n if \"1\" in text:\n search = links[0]\n elif \"2\" in text:\n search = links[1]\n elif \"3\" in text:\n search = links[2] \n\n data = get_recipe(search)\n if ingredients:\n if len(data[0]) < 1:\n self.irc.send_dm(self.channel, self.target, \"Sorry this site didn't have ingredients\")\n return True\n for ingred in data[0]:\n self.irc.send_dm(self.channel, self.target, ingred)\n self.irc.send_dm(self.channel, self.target, \"Would you like to know the recipe? (Yes or No)\")\n self.get_more_info(data,True)\n else:\n if data[1] == \"\":\n self.irc.send_dm(self.channel, self.target, \"Sorry this site didn't have a recipe\")\n return True\n recipe_list = data[1].split(\"\\n\")\n\n for step in recipe_list:\n self.irc.send_dm(self.channel, self.target, step)\n self.irc.send_dm(self.channel, self.target, \"Would you like to know the ingredients? (Yes or No)\")\n self.get_more_info(data,False)\n return True\n \n def get_more_info(self,data,data_type):\n text = self.get_timed_response()\n # print(\"DATA\",data)\n # print(\"Ingredients\",data[0])\n ingredients = data[0]\n if not text:\n self.irc.send_dm(self.channel, self.target, \"Guess not. Cya!\")\n return\n if \"n\" in text.lower():\n self.irc.send_dm(self.channel, self.target, \"Alright. Have fun cooking!\")\n return \n if \"y\" in text.lower() or \"s\" in text.lower():\n if data_type == True: # data_type True = get recipe\n \n if data[1] == \"\":\n self.irc.send_dm(self.channel, self.target, \"Sorry this site didn't have a recipe\")\n return\n recipe_list = data[1].split(\"\\n\")\n for step in recipe_list:\n self.irc.send_dm(self.channel, self.target, step)\n self.irc.send_dm(self.channel, self.target, \"Alright. Have fun cooking!\")\n return\n else: # data_type False = get ingredients\n if len(data[0]) < 1:\n self.irc.send_dm(self.channel, self.target, \"Sorry this site didn't have ingredients\")\n return\n for ingred in ingredients:\n self.irc.send_dm(self.channel, self.target, ingred)\n self.irc.send_dm(self.channel, self.target, \"Alright. Have fun cooking!\")\n return\n \n\n def answer_top5_music_query(self, _text):\n self.set_music_genre()\n self.set_music_artist()\n if not self.music_genre:\n self.irc.send_dm(self.channel, self.target, ask_for_genre_script)\n self.wait_for_text(self.guess_genre, self.set_music_genre)\n if self.bot_state.is_start:\n self.music_genre = None\n self.music_artist = None\n return\n if self.music_genre:\n self.top5_music_df = top5_scraper.get_top5_dataframe(self.music_genre)\n if not self.music_artist:\n formatted_strs = []\n for result in self.top5_music_df.values.tolist():\n formatted_str = f\"The song ranked {result[0]} is: {result[1]}\"\n formatted_strs.append(formatted_str)\n for string in formatted_strs:\n self.irc.send_dm(self.channel, self.target, string)\n self.irc.send_dm(self.channel, self.target, ask_for_artist_check)\n self.wait_for_text(self.missing_artist, self.set_music_artist)\n if self.bot_state.is_start:\n self.music_genre = None\n self.music_artist = None\n return\n if self.music_artist:\n if not self.music_genre:\n self.guess_genre()\n resp = \"\"\n for result in self.top5_music_df.values.tolist():\n if self.music_artist.lower() in result[2].lower():\n resp = f'Artist {result[2]} sang {result[1]}, ' \\\n f'which ranked {result[0]} on the top 5 list for {self.music_genre}'\n if resp:\n self.irc.send_dm(self.channel, self.target, resp)\n self.music_artist = None\n return\n neg_resp = f\"Artist not found in top 5 for {self.music_genre}\"\n self.irc.send_dm(self.channel, self.target, neg_resp)\n self.music_artist = None\n return\n self.irc.send_dm(self.channel, self.target, \"I'm leaving...\")\n\n def set_music_genre(self):\n for genre in genres_supported:\n if genre in self.user_text:\n self.music_genre = genre\n\n def set_music_artist(self):\n split_text = self.user_text.split(\" \")\n for i in range(len(split_text)):\n if split_text[i] == \"artist\":\n if i + 1 == len(split_text) - 1:\n artist_fname = split_text[i + 1]\n self.music_artist = artist_fname\n elif i + 2 < len(split_text):\n artist_fname = split_text[i + 1]\n artist_lname = split_text[i + 2] if split_text[i + 2][0].isupper() else None\n if artist_lname:\n self.music_artist = artist_fname + \" \" + artist_lname\n return\n self.music_artist = artist_fname\n\n def guess_genre(self):\n self.irc.send_dm(self.channel, self.target, guess_genre[0])\n self.music_genre = guess_genre[1]\n\n def missing_artist(self):\n resp = \"That's not an artist / you didn't format correctly... I'm leaving\"\n self.irc.send_dm(self.channel, self.channel, resp)\n self.bot_state.restart()\n\n def recommend_travel(self, _text):\n if not isinstance(self.travel_df, pd.DataFrame):\n # this takes a bit maybe warn user?\n # self.irc.send_dm(self.channel, self.target, 'Let me think a bit.')\n self.travel_df = trav.get_travel_df()\n\n self.get_travel_time(_text)\n self.get_travel_temp(_text)\n\n if not self.travel_month:\n self.prompt_for_travel_time()\n if self.bot_state.is_start:\n self.travel_month = None\n self.travel_temp = None\n return\n\n if not self.travel_temp:\n self.prompt_for_travel_temp()\n if self.bot_state.is_start:\n self.travel_month = None\n self.travel_temp = None\n return\n\n # make recommendations\n month_options = self.travel_df.loc[self.travel_df.loc[:, \"Month\"] == self.travel_month, :]\n temp_index = (month_options['Low Temp'] <= self.travel_temp) &\\\n (month_options['High Temp'] >= self.travel_temp)\n final_options = list(month_options.loc[temp_index, \"Town\"])\n # print(f\"options: {final_options}\")\n if len(final_options) == 0:\n final_options = list(month_options.loc[:, \"Town\"])\n\n travel_place = random.choice(final_options)\n recommendation = random.choice(travel_recs).format(place=travel_place.strip(),\n month=self.travel_month.title())\n self.irc.send_dm(self.channel, self.target, recommendation)\n\n def prompt_for_travel_time(self):\n self.irc.send_dm(self.channel, self.target, random.choice(travel_time_questions))\n self.wait_for_text(self.random_travel_time, self.get_travel_time_user_text)\n\n def prompt_for_travel_temp(self):\n self.irc.send_dm(self.channel, self.target, random.choice(travel_temp_questions))\n self.wait_for_text(self.random_travel_temp, self.get_travel_temp_user_text)\n\n def get_travel_time_user_text(self):\n self.get_travel_time(self.user_text, take_guess=True)\n\n def get_travel_temp_user_text(self):\n self.get_travel_temp(self.user_text, take_guess=True)\n\n def get_travel_time(self, _text, take_guess=False):\n for month in trav.months.keys():\n if month in _text:\n self.travel_month = month\n for season in trav.seasons.keys():\n if season in _text:\n self.travel_month = random.choice(trav.seasons[season])\n\n if not self.travel_month and take_guess:\n self.random_travel_time()\n\n def get_travel_temp(self, _text, take_guess=False):\n words = nltk.word_tokenize(_text)\n for word in words:\n if word in temp_words.keys():\n self.travel_temp = temp_words[word]\n if word.isdigit():\n self.travel_temp = int(word)\n\n if not self.travel_temp and take_guess:\n self.random_travel_temp()\n\n def random_travel_time(self):\n self.irc.send_dm(self.channel, self.target, random.choice(travel_guess))\n self.travel_month = random.choice(trav.months.keys())\n\n def random_travel_temp(self):\n self.irc.send_dm(self.channel, self.target, random.choice(travel_guess))\n self.travel_temp = random.choice(list(temp_words.values()))\n\n\n def run_bot(self):\n while True:\n # print(f\"state: {self.bot_state}\")\n if self.bot_state.is_start:\n self.start_state()\n elif self.bot_state.is_initial_outreach:\n self.initial_outreach_state()\n elif self.bot_state.is_secondary_outreach:\n self.secondary_outreach_state()\n elif self.bot_state.is_outreach_reply:\n self.outreach_reply_state()\n elif self.bot_state.is_inquiry:\n self.inquiry_state()\n elif self.bot_state.is_inquiry_super:\n self.inquiry_state()\n elif self.bot_state.is_inquiry_reply:\n self.inquiry_reply_state()\n elif self.bot_state.is_giveup_frustrated:\n self.giveup_state()\n elif self.bot_state.is_end:\n self.end_state()\n else:\n print(\"State error\")\n\n def start_state(self):\n self.bot_state.reach_out()\n\n def initial_outreach_state(self):\n # we are always speaker one\n self.target = \"\"\n # we are speaker one, we are speaker two\n self.spoke_first = self.wait_for_text(self.bot_state.no_reply_one, self.bot_state.response)\n if self.spoke_first:\n # append name list\n self.get_names()\n self.target = random.choice(list(self.users))\n # print(f\"reaching out to {self.target}\")\n self.irc.send_dm(self.channel, self.target, random.choice(initial_outreaches))\n return\n # code here (check for unique question and then branch to new state machine)\n if self.check_unique_question_hari(self.user_text):\n # fill in with logic to try unique functionality\n self.answer_top5_music_query(self.user_text)\n self.bot_state.restart()\n elif self.check_unique_question_clay(self.user_text):\n # fill in with logic to try unique functionality\n self.recommend_travel(self.user_text)\n self.bot_state.restart()\n elif self.check_unique_question_archit(self.user_text):\n # fill in with logic to try unique functionality\n self.bot_state.restart()\n\n def secondary_outreach_state(self):\n if self.wait_for_text(self.bot_state.retry_secondary, self.bot_state.second_response):\n self.irc.send_dm(self.channel, self.target, random.choice(secondary_outreaches))\n self.wait_for_text(self.bot_state.no_reply_after_second, self.bot_state.second_response)\n\n def outreach_reply_state(self):\n if self.spoke_first: # we are speaker one\n max_retries = 3\n normalized_text = self.normalize_response(self.user_text)\n if normalized_text in pairs_outreach.keys():\n resp = get_next_outreach(normalized_text) # should look like a question\n self.bot_state.inquiry_given()\n self.retries = 0\n self.irc.send_dm(self.channel, self.target, resp)\n elif self.retries <= max_retries:\n resp = random.choice(confused_phrases)\n self.irc.send_dm(self.channel, self.target, resp)\n self.wait_for_text(self.bot_state.retry_outreach, self.bot_state.retry_outreach)\n if self.awaiting_response:\n return\n self.retries += 1\n else:\n self.bot_state.no_inquiry()\n else: # we are speaker two\n normalized_text = self.normalize_response(self.user_text)\n if normalized_text in pairs_response.keys(): # intro or intro and question if they asked a question\n resp = get_next_response(normalized_text)\n self.bot_state.inquiry_given()\n self.bot_response = resp if isinstance(resp, tuple) \\\n else self.irc.send_dm(self.channel, self.target, resp)\n return\n resp = random.choice(confused_phrases)\n self.irc.send_dm(self.channel, self.target, resp)\n self.wait_for_text(self.bot_state.retry_outreach, self.bot_state.retry_outreach)\n\n def inquiry_state(self):\n if self.spoke_first:\n # bot just asked a question\n self.wait_for_text(self.bot_state.ignore_after_inquiry, self.bot_state.inquiry_response)\n if self.awaiting_response:\n return\n self.wait_for_text(self.bot_state.ignore_after_inquiry_two, self.bot_state.to_next_inquiry)\n normalized_text = self.normalize_response(self.user_text)\n if normalized_text in pairs_response.keys():\n resp = get_next_response(normalized_text)\n self.send_question_answer_pair(resp, False) # send just answer here\n else:\n resp = random.choice(confused_phrases)\n self.irc.send_dm(self.channel, self.target, resp)\n elif self.bot_response:\n self.send_question_answer_pair(self.bot_response)\n self.bot_response = \"\"\n else:\n self.wait_for_text(self.bot_state.ignore_after_inquiry, self.bot_state.inquiry_response)\n if self.awaiting_response:\n return\n normalized_text = self.normalize_response(self.user_text)\n if normalized_text in pairs_response.keys():\n # question if they asked a question\n resp = get_next_response(normalized_text)\n self.bot_state.to_next_inquiry()\n self.send_question_answer_pair(resp)\n else:\n self.bot_state.ignore_after_inquiry_two()\n\n def inquiry_reply_state(self):\n if self.spoke_first:\n self.bot_state.happy_end()\n else:\n self.wait_for_text(self.bot_state.happy_end, self.bot_state.happy_end)\n\n def giveup_state(self):\n self.irc.send_dm(self.channel, self.target, random.choice(frustrated_phrases))\n self.bot_state.giveup_end()\n\n def end_state(self):\n self.bot_state.restart()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"src/chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":28349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"244584575","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport json\nimport base64\nfrom optparse import OptionParser\n\nfrom twisted.internet import defer\nfrom twisted.internet import reactor\n\nfrom twisted.web import server\nfrom twisted.web.server import Site\nfrom twisted.web.static import File\nfrom twisted.web.client import getPage\nfrom twisted.web.resource import Resource\n\nimport settings\n\nSTATUS_FAILED = 'red'\nSTATUS_SUCCESS = 'blue'\nSTATUS_FAILED_PROCESSING = 'red_anime'\nSTATUS_SUCCESS_PROCESSING = 'blue_anime'\n\nJENKINS_API_URL = settings.JENKINS_URL + '/api/json'\n\n\ndef jenkins_get_build_id(data):\n return data.get('name', '')\n\n\ndef jenkins_get_build_status(data):\n status = data.get('color')\n\n return {\n STATUS_FAILED: 'error',\n STATUS_SUCCESS: 'success',\n STATUS_FAILED_PROCESSING: 'processing',\n STATUS_SUCCESS_PROCESSING: 'processing'\n }.get(status, '')\n\n\ndef jenkins_get_author_name(data):\n try:\n return data['changeSet']['items'][0]['author']['fullName']\n except:\n try:\n return data['actions'][1]['causes'][0]['userName']\n except:\n return 'unknown'\n\n\ndef download_page(url):\n basic_auth = base64.encodestring('%s:%s' % (settings.JENKINS_USER,\n settings.JENKINS_API_TOKEN))\n basic_auth = basic_auth.strip()\n request_headers = {\n 'Accept': 'application/json',\n 'Authorization': 'Basic %s' % basic_auth,\n }\n\n return getPage(url=url, headers=request_headers)\n\n\nclass BaseResource(Resource):\n isLeaf = True\n\n def process_response(self, response):\n try:\n return json.loads(response)\n except:\n return {}\n\n def reply(self, response, request):\n request.setResponseCode(200)\n request.setHeader('Content-Type', 'application/json')\n request.write(self.generate_response_data(response))\n\n if not request.finished:\n request.finish()\n\n return request\n\n def generate_response_data(response):\n raise NotImplementedError\n\n\nclass BuildsInfoResource(BaseResource):\n def generate_response_data(self, response):\n response_data = {}\n for success, data in response:\n if not success:\n continue\n\n build_data = data['builds'][0]\n response_data[data['name']] = {\n 'status': jenkins_get_build_status(data),\n 'name': data['displayName'],\n 'duration': build_data['duration'],\n 'author': jenkins_get_author_name(build_data)\n }\n\n return json.dumps(response_data)\n\n def get_all_builds(self):\n for row in settings.BUILDS:\n for build in row:\n if build:\n yield build\n\n def generate_request_url(self, build_id):\n return '%s/job/%s/api/json?depth=1&pretty=true' % \\\n (settings.JENKINS_URL, build_id)\n\n def render_GET(self, request):\n deferreds = []\n for build_id in self.get_all_builds():\n deferred = download_page(self.generate_request_url(build_id))\n deferred.addCallback(self.process_response)\n deferreds.append(deferred)\n\n deferred_list = defer.DeferredList(deferreds)\n deferred_list.addCallback(self.reply, request)\n\n return server.NOT_DONE_YET\n\n\nclass BuildsListResource(BaseResource):\n def get_jobs(self, jobs):\n return dict((job['name'], job) for job in jobs)\n\n def generate_response_data(self, response):\n response_data = []\n jobs = self.get_jobs(response.get('jobs', []))\n\n for row in settings.BUILDS:\n row_response_data = []\n for build in row:\n build_data = jobs.get(build, {})\n row_response_data.append({\n 'id': jenkins_get_build_id(build_data),\n })\n response_data.append(row_response_data)\n\n return json.dumps(response_data)\n\n def render_GET(self, request):\n deferred = download_page(JENKINS_API_URL)\n deferred.addCallback(self.process_response)\n deferred.addCallback(self.reply, request)\n\n return server.NOT_DONE_YET\n\n\nclass MonitorResource(Resource):\n isLeaf = False\n\n def render_GET(self, request):\n return open('index.html').read()\n\n def getChild(self, name, request):\n if name == '':\n return self\n\n return Resource.getChild(self, name, request)\n\n\ndef make_factory():\n root = MonitorResource()\n root.putChild('list', BuildsListResource())\n root.putChild('info', BuildsInfoResource())\n root.putChild('static', File('static'))\n\n factory = Site(root)\n\n return factory\n\n\ndef main(args):\n if not all((settings.JENKINS_URL,\n settings.JENKINS_USER,\n settings.JENKINS_API_TOKEN)):\n sys.exit(u'Jenkins params not set!')\n\n parser = OptionParser(usage='%prog ARGUMENTS')\n parser.add_option('-p', '--port',\n action='store',\n help='TCP port to listen to')\n options, args = parser.parse_args()\n if not options.port:\n parser.error('Specify TCP port to listen to')\n\n factory = make_factory()\n reactor.listenTCP(int(options.port), factory)\n reactor.run()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"585687737","text":"#!/usr/bin/env python3\n\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport equations\n\ndata_output = 'data/simulations/single_ligand.json'\n\ntspan = np.array([0, 120 * 60]) # 2 hour window\n\nunits = 1e9 # 1e9 for nM, 1e6 for μM, etc\n\nL1 = 30e-9\nR = 800e-9\nalpha = 0.06\nm = np.array([R, L1, 0]) * units\n\nk = np.array([1e-5, 2.2e-4])\n\nprint(f\"Simulated conditions: α: {alpha * 100:.1f}% Kd: {k[1]/k[0]:.3f}nM\")\nsimulated_binding = equations.simulate_one_ligand_one_receptor_binding(k, m, tspan, alpha)\n\nresult = {\n 'time': simulated_binding.t.tolist(),\n 'available_receptor': simulated_binding.y[0,:].tolist(),\n 'labeled_ligand': simulated_binding.y[1,:].tolist(),\n 'labeled_receptor_complexes': simulated_binding.y[2,:].tolist(),\n 'label_kinetic_parameters': k.tolist(),\n 'initial_conditions': m.tolist(),\n 'units': 'nM',\n 'alpha': alpha\n }\n\nwith open(data_output, 'w') as fd:\n fd.write(json.dumps(result))\nfd.close()\n","sub_path":"one_ligand_simulated_binding.py","file_name":"one_ligand_simulated_binding.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"268767994","text":"###################################################\n######## Transformace hlasek\n###################################################\n\ndef kraceni(vokala):\n \"\"\"\n program slouží ke zkrácení vokál\n s ohledem na historický vývoj\n jsou zde tedy pro ilustraci zaznamenány vztahy ů x o; ou x u\n !!! jedná se o kostru k rozšíření !!!\n Funkce: vstupem je řetězec - vokála a pokud je validní, výstupem je její krátká varianta\n \"\"\"\n if vokala == \"á\":\n vokala = \"a\"\n elif vokala == \"ů\":\n vokala = \"o\"\n elif vokala == \"ou\":\n vokala = \"u\"\n elif vokala == \"é\":\n vokala == \"e\"\n return vokala\n\ndef dlouzeni(vokala):\n \"\"\"\n program slouží k prodloužení vokál\n jedná se protiklad ke krácení\n !!! jedná se o kostru k rozšíření !!!\n \"\"\"\n if vokala == \"a\":\n vokala = \"á\"\n elif vokala == \"o\":\n vokala = \"ů\"\n elif vokala == \"u\":\n vokala = \"ou\"\n elif vokala == \"e\":\n vokala == \"é\"\n return vokala\n\ndef palatalizace1(k):\n \"\"\"\n program zajišťuje změkčování vybraných konzonantů\n podle historického vývoje první palatalizace\n + poslední stupeň palatalizace alveolár v češtině (r=>ř)\n \"\"\"\n if k == 'k': # \"k\" změním na \"č\"\n k = 'č'\n elif k == 'g': # \"g\" změním na \"ž\" (nemělo by k tomu docházet, g se vyvinulo ve spisovném jazyce na \"h\", ponechávám pro možnost program rozšířit)\n k = 'ž'\n elif k == 'h': # \"h\" změním na \"ž\"\n k = 'ž'\n elif k == \"ch\": # spřežku \"ch\" změním na \"š\"\n k = 'š'\n elif k == 'r': # \"r\" změním na \"ř\"\n k = 'ř'\n return k # vracím výsledek\n\ndef palatalizace2(k):\n \"\"\"\n program zajišťuje změkčování vybraných konzonantů\n podle historického vývoje\n funkce je téměř totožná s první palatalizací,\n druhá palatalizace však proběhla jinak a má jiné výsledky\n \"\"\"\n if k == 'k': # \"k\" změním na \"c\"\n k = 'c'\n elif k == 'g': # \"g\" změním na \"z\"\n k = 'z'\n elif k == 'h': # \"h\" změním na \"z\"\n k = 'z'\n elif k == \"ch\": # spřežku \"ch\" změním na s\n k = 's'\n return n # vracím výsledek, pokud nebyl palatizovaný, je týž\n\n\ndef stupnovani(slovo, kat):\n \"\"\"\n program slouží ke stupňování\n vstupem je nominativ a kategorie\n kategorie jsou Adj a Adv\n Tzn. ze vstupu adekvátnímu zápisu v korpusu jako tag A...1....1.*\n vytvoří výstup A...1....2.* a při volání prefixace i A...1....3.*\n V budoucnu by měla umět také pro vstup ekvivalentní D........1.*\n vytvořit D........2.* a D........3.*\n \"\"\"\n if kat == \"Adj\":\n nepravidelnost = [\"dobrý\"] # prostor pro další\n \"\"\"\n Pro nepravidelné stupňování bude nutné utvořit slovník (slovo - 1. stupeň)\n (Prozatím seznam, se slovníkem příliš neumím zacházet)\n \"\"\"\n if slovo in nepravidelnost:\n print (\"!! nepravidelné\") #prozatímní řešení, vytvoří i tak hypotetický 2. stupeň\n pass\n zmena = slovo[-2] #která hláska se bude měnit\n slovo = slovo[0:-2] # zkrátíme slovo tak, aby končilo před hláskou změna\n vysledek1 = \"\"\n vysledek2 = \"\" # pro 3. stupeň - bude se nejspíš volat derivace prefixů\n seznam = [\"l\", \"r\"]\n seznam2 = [\"n\", \"m\", \"v\", \"t\"]\n seznam3 = [\"d\", \"p\", \"h\"]\n if zmena in seznam:\n vysledek1 = slovo + palatalizace1(zmena)+ \"ejší\"\n elif zmena in seznam2:\n vysledek1 = slovo + zmena + \"ější\"\n elif zmena in seznam3:\n if slovo[-1] == \"c\":\n zmena = \"ch\"\n slovo = slovo[0:-1]\n vysledek1 = slovo + palatalizace1(zmena) + \"ší\"\n else:\n vysledek1 = slovo + palatalizace1(zmena) + \"í\"\n if kat == \"Adv\": # předpokládáme, že tahle funkce bude moci zahrnout i adverbia\n pass\n print (vysledek1)\n\n # pro třetí stupeň by se volal skript prefixace\n\"\"\" Pozor na slovo krátký - kratší - nutné zjistit, jak častý je to jev, zda\nvhodný do seznamu nebo pro vlastní pravidlo \"\"\"\n\nstupnovani(\"mladý\", \"Adj\")\nstupnovani(\"hezký\", \"Adj\")\nstupnovani(\"plachý\", \"Adj\")\nstupnovani(\"drahý\", \"Adj\")\nstupnovani(\"smutný\", \"Adj\")\nstupnovani(\"znamenitý\", \"Adj\")\nstupnovani(\"hebký\", \"Adj\") #netvoří hebčejší, ale jen hebčí!\nstupnovani(\"přímý\", \"Adj\")\nstupnovani(\"vzácný\", \"Adj\")\nstupnovani(\"dobrý\", \"Adj\")\nstupnovani(\"nový\", \"Adj\")\nstupnovani(\"veselý\", \"Adj\")\n\n","sub_path":"transformace_hlasek.py","file_name":"transformace_hlasek.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"139720281","text":"\"\"\"This is module oop_takehome.partD. We define a class named Person.\n \n The file stories/storyD.txt shows what behavior we want from our Rectangle class.\n\n\"\"\"\n\n__course__ = 'CS131A:Wostner:Fall2016'\n__author__ = 'mvarga2'\n\n\nclass Person():\n\n persons = list()\n\n def __init__(self, first, last):\n self.firstname = first\n self.lastname = last\n\n # REPLACE THE pass BELOW WITH YOUR OWN CODE\n # Just one line that adds this Person instance to \n # the persons class attribute that you see above.\n #ass\n self.__class__.persons.append(self)\n\n # Give Person instances a nice representation.\n\n def __repr__(self):\n template = 'Person({},{})'\n return template.format(self.firstname, self.lastname)\n\n # Control what Person instances look like when cast as a str object.\n\n def __str__(self):\n template = '{}, {}'\n return template.format(self.lastname, self.firstname)\n \n\n def initials(self):\n \"\"\"Method that returns the initials of the Person.\n\n >>> p = Person('Albert', 'Einstein')\n >>> p.initials()\n 'A.E.'\n \"\"\"\n #print(self.firstname[0])\n\n # REPLACE THE pass BELOW WITH YOUR OWN CODE\n return self.firstname[0] + '.' + self.lastname[0] + '.'\n\n\n\n\n\nif __name__ == '__main__':\n import doctest\n # doctest.testmod()\n doctest.testmod(optionflags=12)\n doctest.testfile('stories/storyD.txt') \n\n\n\n\n\n","sub_path":"oop_takehome/partD.py","file_name":"partD.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"259533825","text":"import pandas as pd\nimport json\n\n#CHANGE THE INPUT FILE NAME\ncsv = pd.read_csv('combined_BK_AUS_metaphlan.species_with_temps.csv')\n\ntaxonomy = {\n 'k': 'kingdom',\n 'p': 'phylum',\n 'c': 'class',\n 'o': 'order',\n 'f': 'family',\n 'g': 'genus',\n 's': 'species',\n 't': 'subspecies',\n}\n\n#CHANGE THE HEADER NAMES\nlocations = ['MEL 1', 'MEL 3', 'SYD 1', 'SYD 3']\n\noutput = {'name': 'Taxonomical Data', 'children': []} \n\nfor location in locations:\n output['children'].append({\n 'name': location,\n 'children': [],\n })\n\ndef parse_location(row):\n loc = row['Location']\n return loc\n\ndef parse_row(row):\n\n hierarchy = row['ID'].split('|')\n for location_idx, location in enumerate(locations):\n if row[location] > 0:\n location_data = output['children'][location_idx]\n\n d = location_data['children']\n for h in hierarchy:\n k, v = h.split('__') \n child_idx = -1\n for j in xrange(len(d)):\n if d[j]['name'] == v:\n child_idx = j\n child_data = d[j]\n\n if child_idx < 0:\n if v.endswith('_unclassified'):\n v = v[:-len('_unclassified')]\n child_data = {\n 'name': v,\n 'children': [],\n 'size': row[location],\n }\n d.append(child_data)\n if h == hierarchy[-1] and k == 's':\n pass\n else:\n d = child_data['children']\n\n\ncsv.apply(parse_row, axis=1)\n\n#CHANGE THE INDEX NUMBER TO CORRESPOND TO HEADER HIVE\n#EXAMPLE:\n#0 = Mel 1, 1 = Mel 3, 2 = Syd 1, 3 = Syd 2\noutput = output['children'][3] \n\noutput['children'] = [child for child in output['children'] if len(child['children']) > 0]\n\n#CHANGE THE FILE NAME TO CORRESPOND TO CORRECT HEADER/HIVE\njson.dump(output, open('meta_file_Syd2.json', 'w'), indent=2, separators=(',', ': '))\n\n","sub_path":"all_hives_radial_graph_for_future_use.py","file_name":"all_hives_radial_graph_for_future_use.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"399685215","text":"\"\"\"\nLoRaWAN Specification v1.0.2\nTest Case Group: Functionality\nTest Name: FUN_06\n\"\"\"\n#################################################################################\n# MIT License\n#\n# Copyright (c) 2018, Pablo D. Modernell, Universitat Oberta de Catalunya (UOC).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#################################################################################\nimport conformance_testing.test_step_sequence\nimport lorawan.lorawan_conformance.functionality.fun_steps as fun_steps\n\n\nclass TestAppManager(conformance_testing.test_step_sequence.TestManager):\n \"\"\"\n The TestAppManager (Test Application Manager) is a TestManager defined in each test, it specifies the\n different steps that the test performs.\n\n LoRaWAN Test FUN 06: Test the handling of the ACK bit in the confirmed message exchange, verifying the uplink\n retransmission of a message when the server does not send the acknowledgement.\n\n PRECONDITION: DUT (Device Under Test) is in TEST MODE.\n \"\"\"\n def __init__(self, test_session_coordinator):\n super().__init__(test_name=__name__.split(\".\")[-1],\n ctx_test_session_coordinator=test_session_coordinator)\n\n # ------------------------------------------------------------------------------------------------\n self.s2_check_retransmissions = fun_steps.CountCheckFCntUp(ctx_test_manager=self,\n step_name=\"S2CheckRetransmissions\",\n next_step=None,\n default_rx1_window=True,\n count_limit=2,\n sequence_increment=0)\n self.add_step_description(step_name=\"Step 2: S2CheckRetransmissions\",\n description=(\n \"The TAS don’t send the acknowledgment of the uplink messages to vefify the \"\n \"node retransmissions. After checking 2 retransmissions the UNCONFIRMED \"\n \"messages are configured again for all subsequent uplink communication.\\n\"\n \"- Reception from DUT: TAOK message with the downlink counter.\\n\"\n \"- TAS sends: ACK the messages and configures the UNCONFIRMED uplink frames \"\n \"(plain text FRMPaylod=0x03).\\n\"))\n\n # ------------------------------------------------------------------------------------------------\n self.s1_actok_to_setconfirmed = fun_steps.ActOkToSetConfirmed(ctx_test_manager=self,\n step_name=\"S1ActOKToSetConfirmed\",\n next_step=self.s2_check_retransmissions,\n default_rx1_window=True)\n self.add_step_description(step_name=\"Step 1: S1ActOKToSetConfirmed\",\n description=(\n \"Checks the downlink counter of the TAOK message and configures the node \"\n \"to use CONFIRMED uplink frames sending a test ID 2.\\n\"\n \"- Reception from DUT: TAOK message with the downlink counter.\\n\"\n \"- TAS sends: Triggers the usage of CONFIRMED uplink frames \"\n \"(plain text FRMPaylod=0x02).\\n\"))\n\n # ------------------------------------------------------------------------------------------------\n # Set Initial Step\n self.current_step = self.s1_actok_to_setconfirmed\n self.add_step_description(step_name=\"Test ID: TD_LoRaWAN_\",\n description=(\n \"Objective: Test the handling of the ACK bit in the confirmed message exchange, \"\n \"verifying the uplink retransmission of a message when the server does \"\n \"not send the acknowledgement.\\n\"\n \"References: LoRaWAN Specification v1.0.2.\\n\"\n \"Pre-test conditions: The DUT has an active session with the TAS and \"\n \"is in Test Mode.\\n\"))\n\n\n","sub_path":"lorawan/lorawan_conformance/functionality/td_lorawan_fun_06.py","file_name":"td_lorawan_fun_06.py","file_ext":"py","file_size_in_byte":5612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"437606828","text":"# # -*- coding: utf-8 -*-\nimport os\nfrom app.models import Gene\nfrom target_site_finder.talen import seek_model\n\n\ndef run(*args):\n path, uuid_file = args\n if not os.path.exists(path):\n os.mkdir(path)\n with open(uuid_file) as handle:\n for line in handle:\n uuid = line.rstrip()\n print(uuid)\n gene = Gene.objects.get(uuid=uuid)\n gid = '{}_{}_{}'.format(gene.symbol, gene.gi, gene.rank)\n\n url = os.path.join(path, gid)\n os.mkdir(url)\n\n lr_path = os.path.join(url, 'lr')\n spacer_path = os.path.join(url, 'spacer')\n os.mkdir(lr_path)\n os.mkdir(spacer_path)\n\n [target_site.save(lr_path, spacer_path) for target_site in seek_model(gene)]\n\n\n\"\"\"\n./manage.py runscript seek_talen --script-args=/data/talen /data/protein_coding_gene_uuid.csv\n\"\"\"\n","sub_path":"app/scripts/seek_talen.py","file_name":"seek_talen.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"25477883","text":"from setuptools import setup\n\n\nVERSION = \"0.1.8\"\n\nsetup(\n name='Xlsxcursor',\n description=\"Xlsxcursor for xlsxwriter.\",\n version=VERSION,\n url='https://github.com/KokocGroup/xslxcursor',\n download_url='https://github.com/KokocGroup/xslxcursor/tarball/v{}'.format(VERSION),\n packages=['xlsxcursor'],\n install_requires=[\n 'xlsxwriter',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"321753080","text":"from django.shortcuts import render\nfrom BarraPuntoApp.models import Page\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views.decorators.csrf import csrf_exempt\n\n#update\nfrom xml.sax import make_parser\nfrom urllib import request, error\n\n# Create your views here.\n\ncontent_bp = {}\n\nfrom xml.sax.handler import ContentHandler\n\nclass myContentHandler(ContentHandler):\n\n def __init__ (self):\n self.inItem = False\n self.inContent = False\n self.theContent = \"\"\n self.titles = []\n self.links = []\n\n def startElement (self, name, attrs):\n if name == 'item':\n self.inItem = True\n elif self.inItem:\n if name == 'title':\n self.inContent = True\n elif name == 'link':\n self.inContent = True\n\n def endElement (self, name):\n if name == 'item':\n self.inItem = False\n elif self.inItem:\n if name == 'title':\n self.titles.append(self.theContent)\n self.inContent = False\n self.theContent = \"\"\n elif name == 'link':\n self.links.append(self.theContent)\n self.inContent = False\n self.theContent = \"\"\n\n def characters (self, chars):\n if self.inContent:\n self.theContent = self.theContent + chars\n\n\ndef show_bp():\n content = \"\"\n for k,v in content_bp.items():\n content += \"
  • \" + k + \"
  • \"\n return content\n\n@csrf_exempt\ndef cms_put(request, rec):\n if request.method == \"GET\":\n try:\n page = Page.objects.get(name=rec)\n resp = (\"
    \" + page.page +\n \"
      \" + show_bp() + \"
    \")\n return HttpResponse(resp)\n except ObjectDoesNotExist:\n return HttpResponse(\"Content not found\", status=404)\n elif request.method == \"PUT\":\n page = Page(name=rec, page=request.body)\n page.save()\n return HttpResponse(\"Succesfully added page: \" + rec)\n else:\n return HttpResponse(\"Method not allowed\", status=405)\n\ndef update(req):\n theParser = make_parser()\n theHandler = myContentHandler()\n theParser.setContentHandler(theHandler)\n\n url = \"http://barrapunto.com/index.rss\"\n xmlStream = request.urlopen(url)\n theParser.parse(xmlStream)\n\n for i in range(len(theHandler.titles)):\n content_bp[theHandler.titles[i]] = theHandler.links[i]\n\n return HttpResponse(\"

    Content succesfully updated!

    \" +\n \"

    Pages are:

      \" + show_bp() + \"
    \")\n","sub_path":"ContentBarraPunto/BarraPuntoApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"166275253","text":"import json\nimport time\n\nfrom PIL import Image\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\n\nfrom chaojiying_Python.chaojiying import Chaojiying_Client\nfrom utils import ua_pond, sleep_time, ip_pool, ip_port\nfrom values import account_number, card_number, card_password, choice_sever_SOHU\n\n# account_number = account_number() # 账号\n# card_number = card_number()\n# card_password = card_password()\n\n\nclass SoHu(object):\n '''搜狐充值入口'''\n def __init__(self, choice):\n # 驱动参数\n self.chao_ji_ing = Chaojiying_Client('nap2017', 'qweasdzxc', '909537') # 用户中心>>软件ID 生成一个替换 96001\n self.options = webdriver.ChromeOptions()\n self.options.add_argument('--user-agent={}'.format(ua_pond))\n self.options.add_argument('window-size=1920x1080') # 指定浏览器分辨率\n self.options.add_argument('--headless') # 无界面模式\n self.options.add_argument('--disable-gpu') # 隐藏gpu界面\n # self.options.add_argument('--proxy-server=http://{}'.format(ip_port())) # ip 代理\n self.driver = webdriver.Chrome('/usr/local/bin/chromedriver',chrome_options=self.options)\n # 基本参数\n self.url = 'http://chong.changyou.com/'\n self.choice_game = choice_sever_SOHU(choice) # 选择游戏\n\n def parse(self, an, cn, cp):\n # # xss注入参数\n self.driver.find_element_by_xpath('//a[@class=\"normal fast_payBtn\"]').click() # 点击快速充值\n # self.driver.find_element_by_xpath('//input[@name=\"cn\"]').send_keys(account_number) # 账号\n # self.driver.find_element_by_xpath('//input[@name=\"cn2\"]').send_keys(account_number) # 确认账号\n # self.driver.find_element_by_xpath('//input[@name=\"cardNo\"]').send_keys(card_number) # 卡号\n # self.driver.find_element_by_xpath('//input[@name=\"cardPwd\"]').send_keys(card_password) # 密码\n self.driver.find_element_by_xpath('//input[@name=\"cn\"]').send_keys(an) # 账号\n self.driver.find_element_by_xpath('//input[@name=\"cn2\"]').send_keys(an) # 确认账号\n self.driver.find_element_by_xpath('//input[@name=\"cardNo\"]').send_keys(cn) # 卡号\n self.driver.find_element_by_xpath('//input[@name=\"cardPwd\"]').send_keys(cp) # 密码\n time.sleep(sleep_time())\n self.driver.find_element_by_xpath('//*[@id=\"fast_pay_ul\"]/li[4]/div').click() # 1\n self.driver.find_element_by_xpath(self.choice_game).click() # 2\n # 选择大区\n try:\n self.driver.find_element_by_xpath('//*[@id=\"dlgame_select_rid\"]/div').click() # 选择大区1\n if self.choice_game == '//a[text()=\"幻想神域\"]': # 31\n self.driver.find_element_by_xpath('//a[text()=\"一区-启源大陆\"]').click() # 选择大区2\n elif self.choice_game == '//a[text()=\"海战世界\"]': # 55\n self.driver.find_element_by_xpath('//a[text()=\"中途岛(双线区)\"]').click() # 选择大区2\n elif self.choice_game == '//a[text()=\"星际战甲\"]': # 42\n self.driver.find_element_by_xpath('//*[@id=\"dlgame_region_id_box\"]/a').click() # 选择大区2\n except:\n print('你选的是前五个')\n time.sleep(sleep_time())\n\n def fight(self):\n # 验证码截取参数\n self.driver.save_screenshot('/www/wwwroot/www.stargame.com/media/sohu_code.png')\n imgelement = self.driver.find_element_by_id('checkcodeId') # 定位验证码\n location_dict = imgelement.location # 获取验证码x,y轴坐标\n print('lo: ', location_dict)\n\n if self.choice_game == '//a[text()=\"幻想神域\"]':\n print('幻想神域')\n lo_location = {'x': 1128, 'y': 722}\n # lo_location = {'x': 616, 'y': 722}\n elif self.choice_game == '//a[text()=\"海战世界\"]':\n print('海战世界')\n lo_location = {'x': 1128, 'y': 722}\n # lo_location = {'x': 616, 'y': 722}\n elif self.choice_game == '//a[text()=\"星际战甲\"]':\n print('星际战甲')\n lo_location = {'x': 1128, 'y': 722}\n # lo_location = {'x': 616, 'y': 722}\n else:\n print('前五个')\n lo_location = {'x': 1128, 'y': 680}\n # lo_location = {'x': 618, 'y': 679}\n\n size_dict = imgelement.size # 获取验证码的长宽\n print(size_dict)\n size = {'height': 33, 'width': 91}\n\n rangle = (int(lo_location['x']), int(lo_location['y']), int(lo_location['x'] + size['width']),\n int(lo_location['y'] + size['height'])) # 写成我们需要截取的位置坐标\n print(rangle)\n i = Image.open(\"/www/wwwroot/www.stargame.com/media/sohu_code.png\") # 打开截图\n frame4 = i.crop(rangle) # 使用Image的crop函数,从截图中再次截取我们需要的区域\n rgb_im = frame4.convert('RGB')\n rgb_im.save('/www/wwwroot/www.stargame.com/media/sohu_save.png') # 保存我们接下来的验证码图片 进行打码\n\n # 打码\n im = open('/www/wwwroot/www.stargame.com/media/sohu_save.png', 'rb').read()\n data = self.chao_ji_ing.PostPic(im, 1005)\n print(data)\n if data['pic_str'] == '':\n statue = {'STATUE': 404, 'MSG': '系统超时'}\n self.chao_ji_ing.ReportError(data['pic_id'])\n # print(statue)\n return statue\n\n # data_dict = {'err_no': 0, 'err_str': 'OK', 'pic_id': '9122415085089600039', 'pic_str': 'RNCZM', 'md5': '22fda2e7d516a448c271486dc35f32f0'}\n # code = data_dict['pic_str']\n # if data_dict['pic_str'] == '':\n # statue = {'STATUE': 404, 'MSG': '系统超时'}\n # chao_ji_ing.ReportError(data['pic_id'])\n\n code = data['pic_str']\n self.driver.find_element_by_id('annexcode').send_keys(code) # 验证码\n time.sleep(sleep_time())\n self.driver.save_screenshot('/www/wwwroot/www.stargame.com/media/sohu_yc_code.png') # 保存验证码后截图\n self.driver.find_element_by_xpath('//a[text()=\"充值\"]').click() # 立即充值\n st = self.driver.find_element_by_xpath('//p[@id=\"error\"]').text\n if st == '呦!您输入的验证码不正确呀':\n statue = {'STATUE': 400, 'MESSAGE': '验证码错误, 打码失败'}\n self.chao_ji_ing.ReportError(data['pic_id'])\n # print(statue)\n return statue\n else:\n statue = {'STATUE': 200, 'MESSAGE': 'SUCCESS'}\n # 确认订单信息\n # 获取充值结果\n # print(statue)\n return statue\n\n def save(self, statue):\n # 获取充值结果\n print(statue)\n print(self.driver.title)\n time.sleep(5)\n self.driver.quit()\n return statue\n \n def run(self, an, cn, cp):\n time.sleep(sleep_time())\n self.driver.get(self.url) # 打开网页\n self.parse(an, cn, cp)\n data = self.fight()\n a = self.save(data)\n return a\n\n# if __name__ == '__main__':\n# choice = '3'\n# R = SoHu(choice)\n# R.run()","sub_path":"sohu_worm.py","file_name":"sohu_worm.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378409149","text":"from django.conf.urls import patterns, url\n\nfrom rango.controllers import user, about, category, page, search\n\nurlpatterns = patterns('',\n\t\t\t\t\t\turl(r'^$', category.index, name='index'),\n\t\t\t\t\t\turl(r'^add_category/$', category.add, name='add_category'),\n\t\t\t\t\t\turl(r'^category/(?P\\w+)/$', category.show, name='category'),\n\t\t\t\t\t\turl(r'^category/(?P\\w+)/add_page/$', page.add, name='add_page'),\n\t\t\t\t\t\turl(r'^goto/$', page.track_url, name='track_url'),\n\t\t\t\t\t\turl(r'^register/$', user.register, name='register'),\n\t\t\t\t\t\turl(r'^login/', user.attempt_login, name='login'),\n\t\t\t\t\t\turl(r'^logout/', user.attempt_logout, name='logout'),\n\t\t\t\t\t\turl(r'^profile/', user.profile, name='profile'),\n\t\t\t\t\t\turl(r'^other_profiles/', user.other_profiles, name='other_profiles'),\n\t\t\t\t\t\turl(r'^about/', about.show, name='about'),\n\t\t\t\t\t\turl(r'^search/', search.find, name='search'),\n\t\t\t\t\t\turl(r'^like_category/', category.increase_like, name='increase_like'),\n\t\t\t\t\t\turl(r'^suggest_category/$', category.suggest_category, name='suggest_category'),\n\t\t\t\t\t\turl(r'^add_auto_page/$', category.add_auto_page, name='add_auto_page'),\n\t\t\t\t\t )\n","sub_path":"rango/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"8992188","text":"from http.client import RemoteDisconnected\nfrom xmlrpc.client import Fault\n\nfrom fastapi import APIRouter, HTTPException, Path\nfrom XenGarden.GuestMetrics import GuestMetrics\nfrom XenGarden.Host import Host\nfrom XenGarden.session import create_session\n\nfrom API.v1.GuestMetrics.serialize import serialize\nfrom app.settings import Settings\n\nrouter = APIRouter()\n\n\n@router.get(\"/{cluster_id}/guest/{guest_uuid}\")\nasync def guest_get_by_uuid(\n cluster_id: str = Path(\n default=None, title=\"cluster_id\", description=\"Cluster ID\"\n ),\n guest_uuid: str = Path(\n default=None, title=\"guest_uuid\", description=\"Guest UUID\"\n ),\n):\n \"\"\" Get GuestMetrics by UUID \"\"\"\n try:\n # KeyError Handling\n try:\n session = create_session(\n _id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()\n )\n except KeyError as key_error:\n raise HTTPException(\n status_code=400, detail=f\"{key_error} is not a valid path\"\n )\n\n guest: GuestMetrics = Host.get_by_uuid(\n session=session, uuid=guest_uuid\n )\n\n if guest is not None:\n ret = dict(success=True, data=serialize(guest))\n else:\n ret = dict(success=False)\n\n session.xenapi.session.logout()\n return ret\n except Fault as xml_rpc_error:\n raise HTTPException(\n status_code=int(xml_rpc_error.faultCode),\n detail=xml_rpc_error.faultString,\n )\n except RemoteDisconnected as rd_error:\n raise HTTPException(status_code=500, detail=rd_error.strerror)\n","sub_path":"API/v1/GuestMetrics/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"395518812","text":"# this converts a for loop iterating through a range to java/c++/javascript syntax\nimport sys, re\n\nargs = sys.argv[1:]\nsyntax = 'python convert_for_loop.py lang \"python_for_loop\" [**params]'\n\nif args[0] == \"help\":\n print(\"NOTE: This only works with python for loops iterating through a range\", \"\\n\\n\\tproper syntax: \" + syntax, '\\n')\n sys.exit(0)\n\n# making sure the user supplied enough and correct arguments\nassert len(args) >= 2, \"insufficient argruments, proper syntax: \" + syntax\nassert args[0].lower() in {'java', 'javascript', 'js', 'cpp', 'c++'}, \"first argument must be the language you are converting too\"\n\nif args[0] in {'java', 'cpp', 'c++'}:\n identifier = 'int'\nelse:\n identifier = 'let'\n\ndef configure_len(var):\n if var is not None:\n if var.startswith('len('):\n if args[0] in {'cpp', 'c++'}:\n return f'size({var[4:-1]})'\n else:\n return f'{var[4:-1]}.length'\n return var\n\ndef check_syntax():\n if items is None:\n raise SyntaxError(f\"the syntax for the provided for loop ( \\\"{args[1]}\\\" ) is incorrect\\n\\tNote: Make sure that all necessary parameters are provided\")\n\ndef check_numbers_syntax():\n if end is None and step is None: # i.e. if the range is only provided one number\n assert int(start) >= 0, f\"python's range function does not support range({start})\"\n elif end is not None and step is None: # i.e. if the range is provided 2 numbers\n assert int(start) < int(end), f\"python's range function does not support range({start}, {end})\"\n else: # i.e. if the range is provided 3 numbers\n assert int(start) < int(end) and int(step) >= 0, f\"python's range function does not support range({start}, {end}, {step})\"\n\n\nif len(args) >= 3:\n if '-var' in args[2:]:\n items = re.search(r\"for\\s+?([\\w_]+)\\s+in\\s+range\\((-?[\\w_()\\[\\]'\\\"]+),?\\s*(-?[\\w_()\\[\\]'\\\"]+)?,?\\s*(-?[\\w_()\\[\\]'\\\"]+)?\\):?\", args[1])\n check_syntax()\n var, start, end, step = map(configure_len, items.groups())\nelse:\n items = re.search(r\"for\\s+?([\\w_]+)\\s+in\\s+range\\((-?[\\d]+),?\\s*(-?[\\d]+)?,?\\s*(-?[\\d]+)?\\):?\", args[1])\n check_syntax()\n var, start, end, step = items.groups()\n check_numbers_syntax()\n\nif end is None and step is None: # i.e. if the range is only provided one number\n # start becomes the end with the starting number as 0\n print(f'for ({identifier} {var} = 0; {var} < {start}; {var}++) {\"{\"}\\n\\t// code goes here\\n{\"}\"}')\nelif end is not None and step is None: # i.e. if the range is provided 2 numbers\n print(f'for ({identifier} {var} = {start}; {var} < {end}; {var}++) {\"{\"}\\n\\t// code goes here\\n{\"}\"}')\nelse: # i.e. if the range is provided 3 numbers\n print(f'for ({identifier} {var} = {start}; {var} < {end}; {var} += {step}) {\"{\"}\\n\\t// code goes here\\n{\"}\"}')","sub_path":"not_school/random_stuff/convert_for_loop.py","file_name":"convert_for_loop.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"154718616","text":"'''\nCanvas Model\n'''\n\nclass Canvas:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.canvas = {}\n\n def saveData(self, x, y, data):\n self.canvas[(x, y)] = data\n\n def createLine(self, x1, y1, x2, y2):\n for i in range(x1, x2+1):\n for j in range(y1, y2+1):\n self.saveData(i, j, 'x')\n return\n\n def createRectangle(self, x1, y1, x2, y2):\n for i in range(x1, x2+1):\n self.saveData(i, y1, 'x')\n self.saveData(i, y2, 'x')\n\n for j in range(y1+1, y2):\n self.saveData(x1, j, 'x')\n self.saveData(x2, j, 'x')\n return\n\n def fillArea(self, x, y, colour):\n if self.canvas.get((x, y)) == 'x':\n print(color.RED + 'Error: line found' + color.END)\n elif (x, y) in self.canvas:\n self.fill(x, y, colour, self.canvas.get((x, y)))\n else:\n self.fill(x, y, colour)\n\n def fill(self, x, y, colour, old=None):\n if self.canvas.get((x, y), None) == old:\n self.saveData(x, y, colour)\n if (x > 1):\n self.fill(x-1, y, colour, old)\n if (y > 1):\n self.fill(x, y-1, colour, old)\n if (x < self.width):\n self.fill(x+1, y, colour, old)\n if (y < self.height):\n self.fill(x, y+1, colour, old)\n return\n\n def drawCanvas(self):\n print(color.CYAN + ' -'*(self.width+2) + color.END)\n for j in range(1, self.height+1):\n print(color.CYAN + ' |' + color.END, end='')\n for i in range(1, self.width+1):\n print(' ' + str(self.canvas.get((i, j), ' ')), end='')\n print(color.CYAN + ' |' + color.END)\n print(color.CYAN + ' -'*(self.width+2) + color.END)\n return\n\n\nclass color:\n CYAN = '\\033[96m'\n RED = '\\033[91m'\n END = '\\033[0m'\n","sub_path":"src/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"590688607","text":"from sanic import Sanic\nfrom sanic import response\nimport sys\nimport time\nfrom game import *\nfrom logger import *\n\nimport logging\n\nlogger = logging.getLogger(\"SERVER\")\n\napp = Sanic()\n\nPLAYER_NAME = None\n\n@app.post(\"/start\")\nasync def start(req):\n logger.info(\"比赛开始, req: %s\", req.json)\n return response.json({})\n\n\n@app.post(\"/end\")\nasync def end(req):\n logger.info(\"比赛结束, req: %s\", req.json)\n return response.json({})\n\n\n@app.post(\"/step\")\nasync def step(req):\n start_time = time.time()\n data = req.json\n\n player1 = get_player(data['player1'])\n player2 = get_player(data['player2'])\n walls = get_walls(data['walls'])\n jobs = get_jobs(data['jobs'])\n\n game.player1 = player1\n game.player2 = player2\n game.walls = walls\n game.jobs = jobs\n\n game.refresh(PLAYER_NAME)\n action = game.step()\n logger.info(\"action: %s\", action)\n\n logger.info(\"耗时:%ss\", time.time() - start_time)\n return response.json({'action': action})\n\n\ndef get_player(p):\n player = Player(p['x'], p['y'])\n player.name = p['name']\n player.home = Point(p['home_x'], p['home_y'])\n player.nJobs = p['n_jobs']\n player.value = p['value']\n player.score = p['score']\n return player\n\n\ndef get_walls(walls_data):\n return [Wall(w['x'], w['y']) for w in walls_data]\n\n\ndef get_jobs(jobs_data):\n return [Job(j['x'], j['y'], j['value']) for j in jobs_data]\n\n\n@app.listener('after_server_start')\nasync def notify_server_started(app, loop):\n logger.info(\"started\")\n\nif __name__ == \"__main__\":\n if sys.argv[1].isdigit():\n port = int(sys.argv[1])\n PLAYER_NAME = sys.argv[2]\n init_logger('SERVER', 'DEBUG')\n app.run(host=\"0.0.0.0\", port=port)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"55176059","text":"import math # 计算最大公约数\r\nimport numpy as np # 计算行列式的值\r\n\r\n\r\nclass MyError(ValueError):\r\n pass\r\n\r\n\r\n# 检查矩阵格式是否合法以及是否可逆\r\ndef check_matrix(A, M):\r\n if (not isinstance(A, list)) or (not isinstance(A[0], list)) or (not isinstance(A[0][0], int)):\r\n raise MyError('Invalid matrix format.')\r\n mat = np.array(A)\r\n D = int(np.linalg.det(A)) % M\r\n if math.gcd(D, M) > 1:\r\n return False\r\n return True\r\n # raise MyError('This matrix does not have a modular inversion matrix.')\r\n # print('Valid Matrix.')\r\n\r\n\r\n\r\n# 矩阵的第一类初等变换:交换矩阵第i行与第j行\r\ndef swap(A, i, j):\r\n temp = A[j]\r\n A[j] = A[i]\r\n A[i] = temp\r\n\r\n\r\n# 矩阵的第二类初等变换:将矩阵第i行乘以n\r\ndef mul_n(A, i, n, M):\r\n a = A[i]\r\n A[i] = [a[x] * n % M for x in range(len(a))]\r\n\r\n\r\n# 矩阵的第三类初等变换:矩阵第i行减去n倍的j行\r\ndef sub(A, i, j, n, M):\r\n a = A[i]\r\n b = A[j]\r\n A[i] = [(a[x] - n * b[x]) % M for x in range(len(a))]\r\n\r\n\r\n# 找到符合要求的第i行\r\ndef find_row(A, i, M):\r\n start = i\r\n while A[start][i] == 0 or math.gcd(A[start][i], M) > 1:\r\n start = start + 1\r\n return start\r\n\r\n\r\n# 返回一个整数的模逆元素\r\ndef mod_rev(num, mod):\r\n if(num == 0 or math.gcd(num, mod) > 1):\r\n raise MyError('modular inversion does not exists.')\r\n else:\r\n i = 1\r\n while i * num % mod != 1:\r\n i = i + 1\r\n return i\r\n\r\n\r\ndef disp(mat):\r\n print('')\r\n for i in range(len(mat)):\r\n for j in range(len(mat[i])):\r\n print(mat[i][j], end='\\t')\r\n print('')\r\n print('')\r\n\r\n\r\ndef matrix_rev(A, M):\r\n try:\r\n dim = len(A)\r\n # concatenate with a unit matrix\r\n for i in range(dim):\r\n for j in range(dim):\r\n if j == i:\r\n A[i].append(1)\r\n else:\r\n A[i].append(0)\r\n # transform\r\n for i in range(dim):\r\n target_row = find_row(A, i, M)\r\n swap(A, i, target_row)\r\n n = mod_rev(A[i][i], M)\r\n mul_n(A, i, n, M)\r\n for j in range(dim):\r\n if j != i:\r\n sub(A, j, i, A[j][i], M)\r\n # get result\r\n A_rev = [A[i][dim:] for i in range(dim)]\r\n return A_rev\r\n except Exception as e:\r\n print(e)\r\n\r\n# print(matrix_rev([[9, 4], [5, 7]], 26))\r\n","sub_path":"古典密码/Hill/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"152322522","text":"import copy\nimport os\nimport pickle\nfrom collections import deque\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\nimport torch.nn.functional as F\nfrom guacamol.distribution_matching_generator import DistributionMatchingGenerator\nfrom rdkit import Chem\nfrom torch.utils.data import DataLoader, Dataset\nfrom tqdm import tqdm\n\nfrom data.gen_targets import get_symbol_list\nfrom src.data.loader import SizeSampler\nfrom src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k\n\nif int(tf.__version__.split('.')[0]) <= 1:\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n\nclass MockGenerator(DistributionMatchingGenerator):\n def __init__(self, smiles_list, num_samples_to_generate, train_smiles_list=None, remove_non_novel=False):\n self.smiles_list = smiles_list\n if remove_non_novel is True:\n self.smiles_list = [s for s in self.smiles_list if s not in train_smiles_list]\n self.smiles_list = self.smiles_list[:num_samples_to_generate]\n\n def generate(self, number_samples):\n smiles_to_return = self.smiles_list[:number_samples]\n self.smiles_list = self.smiles_list[number_samples:] + self.smiles_list[:number_samples]\n return smiles_to_return\n\nclass GenDataset(Dataset):\n def __init__(self, dataset, number_samples):\n self.dataset = dataset\n self.mol_nodeinds = self.dataset.mol_nodeinds # for SizeSampler\n self.number_samples = number_samples\n\n def __getitem__(self, index):\n return self.dataset[index]\n\n def __len__(self):\n return self.number_samples\n\nclass GraphGenerator(DistributionMatchingGenerator):\n def __init__(self, train_data, model, generation_algorithm, random_init, num_iters, num_sampling_iters, batch_size,\n edges_per_batch=-1, retrieve_train_graphs=False, local_cpu=False, cp_save_dir=None,\n set_seed_at_load_iter=False, graph_type='QM9', sample_uniformly=False, mask_comp_to_predict=False,\n maintain_minority_proportion=False, no_edge_present_type='learned', mask_independently=False,\n one_property_per_loop=False, checkpointing_period=1, save_period=1, evaluation_period=1,\n evaluate_finegrained=False, save_finegrained=False, variables_per_gibbs_iteration=1, top_k=-1,\n save_init=False):\n super().__init__()\n self.model = model\n self.generation_algorithm = generation_algorithm\n self.random_init = random_init\n self.sample_uniformly = sample_uniformly\n self.num_iters = num_iters\n self.num_sampling_iters = num_sampling_iters\n self.num_argmax_iters = self.num_iters - self.num_sampling_iters\n self.train_data = train_data\n self.batch_size = batch_size\n self.edges_per_batch = edges_per_batch\n self.local_cpu = local_cpu\n self.cp_save_dir = cp_save_dir\n self.calculate_length_dist()\n self.get_special_inds()\n self.set_seed_at_load_iter = set_seed_at_load_iter\n self.symbol_list = get_symbol_list(graph_type)[:self.train_data.num_node_types]\n self.retrieve_train_graphs = retrieve_train_graphs\n self.mask_comp_to_predict = mask_comp_to_predict\n self.maintain_minority_proportion = maintain_minority_proportion\n self.no_edge_present_type = no_edge_present_type\n self.mask_independently = mask_independently\n self.one_property_per_loop = one_property_per_loop\n self.index_method = get_index_method()\n self.checkpointing_period = checkpointing_period\n self.save_period = save_period\n self.evaluation_period = evaluation_period\n self.evaluate_finegrained = evaluate_finegrained\n self.save_finegrained = save_finegrained\n self.variables_per_gibbs_iteration = variables_per_gibbs_iteration\n self.top_k = top_k\n self.save_init = save_init\n self.model_forward = self.model_forward_cgvae if self.model.__class__.__name__ == 'CGVAE' \\\n else self.model_forward_mgm\n\n self.node_int = 1\n self.edge_int = 2\n if self.one_property_per_loop is True:\n self.hydrogen_int, self.charge_int, self.is_in_ring_int, self.is_aromatic_int, self.chirality_int = \\\n tuple(range(3, 8))\n else:\n self.hydrogen_int = self.charge_int = self.is_in_ring_int = self.is_aromatic_int = self.chirality_int = \\\n self.node_int\n\n def generate(self, number_samples):\n load_path, load_iters = get_load_path(self.num_sampling_iters, self.num_argmax_iters, self.cp_save_dir)\n all_init_nodes, all_init_edges, all_node_masks, all_edge_masks, all_init_hydrogens, all_init_charge, \\\n all_init_is_in_ring, all_init_is_aromatic, all_init_chirality = self.get_all_init_variables(load_path,\n number_samples)\n\n if self.set_seed_at_load_iter is True:\n set_seed_if(load_iters)\n\n retrieve_train_graphs = self.retrieve_train_graphs\n for j in range(load_iters, self.num_iters):\n if j > 0:\n retrieve_train_graphs = False\n if self.generation_algorithm == 'Gibbs':\n self.train_data.do_not_corrupt = True\n loader = self.get_dataloader(all_init_nodes, all_init_edges, all_node_masks,\n all_init_hydrogens, all_init_charge, all_init_is_in_ring, all_init_is_aromatic,\n all_init_chirality, number_samples, retrieve_train_graphs)\n\n use_argmax = (j >= self.num_sampling_iters)\n all_init_nodes, all_init_edges, all_init_hydrogens, all_init_charge, all_init_is_in_ring,\\\n all_init_is_aromatic, all_init_chirality, all_node_masks,\\\n smiles_list = self.carry_out_iteration(loader, use_argmax)\n\n return smiles_list\n\n def generate_with_evaluation(self, num_samples_to_generate, smiles_dataset_path, output_dir,\n num_samples_to_evaluate, evaluate_connected_only=False):\n\n load_path, load_iters = get_load_path(self.num_sampling_iters, self.num_argmax_iters, self.cp_save_dir)\n all_init_nodes, all_init_edges, all_node_masks, all_edge_masks, all_init_hydrogens, all_init_charge, \\\n all_init_is_in_ring, all_init_is_aromatic, all_init_chirality = self.get_all_init_variables(load_path,\n num_samples_to_generate)\n\n if self.save_init is True and self.random_init is True and load_iters == 0:\n # Save smiles representations of initialised molecules\n smiles_list = []\n num_nodes = all_node_masks.sum(-1)\n for i in range(len(all_init_nodes)):\n mol = graph_to_mol(all_init_nodes[i, :int(num_nodes[i])].astype(int),\n all_init_edges[i, :int(num_nodes[i]), :int(num_nodes[i])].astype(int),\n all_init_charge[i, :int(num_nodes[i])].astype(int),\n all_init_chirality[i, :int(num_nodes[i])].astype(int),\n min_charge=self.min_charge, symbol_list=self.symbol_list)\n smiles_list.append(Chem.MolToSmiles(mol))\n save_smiles_list(smiles_list, os.path.join(output_dir, 'smiles_0_0.txt'))\n del smiles_list, mol, num_nodes\n\n if self.set_seed_at_load_iter is True:\n set_seed_if(load_iters)\n\n retrieve_train_graphs = self.retrieve_train_graphs\n for j in tqdm(range(load_iters, self.num_iters)):\n if j > 0:\n retrieve_train_graphs = False\n if self.generation_algorithm == 'Gibbs':\n self.train_data.do_not_corrupt = True\n loader = self.get_dataloader(all_init_nodes, all_init_edges, all_node_masks,\n all_init_hydrogens, all_init_charge, all_init_is_in_ring, all_init_is_aromatic,\n all_init_chirality, num_samples_to_generate, retrieve_train_graphs)\n\n use_argmax = (j >= self.num_sampling_iters)\n all_init_nodes, all_init_edges, all_init_hydrogens, all_init_charge, all_init_is_in_ring,\\\n all_init_is_aromatic, all_init_chirality, all_node_masks,\\\n smiles_list = self.carry_out_iteration(loader, use_argmax)\n\n sampling_iters_completed = min(j + 1, self.num_sampling_iters)\n argmax_iters_completed = max(0, j + 1 - self.num_sampling_iters)\n if (j + 1 - load_iters) % self.checkpointing_period == 0:\n self.save_checkpoints(all_init_nodes, all_init_edges, all_init_hydrogens, all_init_charge,\n all_init_is_in_ring, all_init_is_aromatic, all_init_chirality,\n sampling_iters_completed, argmax_iters_completed)\n\n if (j + 1 - load_iters) % self.save_period == 0 or (self.save_finegrained is True and (j + 1) <= 10):\n smiles_output_path = os.path.join(output_dir, 'smiles_{}_{}.txt'.format(\n sampling_iters_completed, argmax_iters_completed))\n save_smiles_list(smiles_list, smiles_output_path)\n\n if (j + 1 - load_iters) % self.evaluation_period == 0 or \\\n (self.evaluate_finegrained is True and (j + 1) <= 10):\n json_output_path = os.path.join(output_dir, 'distribution_results_{}_{}.json'.format(\n sampling_iters_completed, argmax_iters_completed))\n evaluate_uncond_generation(MockGenerator(smiles_list, num_samples_to_generate),\n smiles_dataset_path, json_output_path, num_samples_to_evaluate,\n evaluate_connected_only)\n\n\n def carry_out_iteration(self, loader, use_argmax):\n mols, smiles_list = [], []\n all_final_nodes, all_final_edges, all_final_hydrogens, all_final_charge, all_final_is_in_ring, \\\n all_final_is_aromatic, all_final_chirality, all_final_node_masks = [], [], [], [], [], [], [], []\n print('Generator length: {}'.format(len(loader)), flush=True)\n for init_nodes, init_edges, orig_nodes, orig_edges, node_masks, edge_masks, node_target_types,\\\n edge_target_types, init_hydrogens, orig_hydrogens, init_charge, orig_charge, init_is_in_ring,\\\n orig_is_in_ring, init_is_aromatic, orig_is_aromatic, init_chirality, orig_chirality, \\\n hydrogen_target_types, charge_target_types, is_in_ring_target_types, is_aromatic_target_types, \\\n chirality_target_types in tqdm(loader):\n if self.local_cpu is False:\n init_nodes = init_nodes.cuda(); init_edges = init_edges.cuda(); init_hydrogens = init_hydrogens.cuda()\n init_charge = init_charge.cuda(); init_is_in_ring = init_is_in_ring.cuda()\n init_is_aromatic = init_is_aromatic.cuda(); init_chirality = init_chirality.cuda()\n node_masks = node_masks.cuda(); edge_masks = edge_masks.cuda()\n\n if self.generation_algorithm == 'gibbs':\n init_nodes, init_edges, init_hydrogens, init_charge, init_is_in_ring, init_is_aromatic, init_chirality \\\n = self.carry_out_gibbs_sampling_sweeps(init_nodes, init_edges,\n init_hydrogens, init_charge, init_is_in_ring, init_is_aromatic,\n init_chirality, node_masks, edge_masks, use_argmax)\n elif self.generation_algorithm == 'simultaneous':\n init_nodes, init_edges, init_hydrogens, init_charge, init_is_in_ring, init_is_aromatic, init_chirality \\\n = self.sample_simultaneously(init_nodes, init_edges,\n init_hydrogens, init_charge, init_is_in_ring, init_is_aromatic,\n init_chirality, node_masks, edge_masks, node_target_types,\n edge_target_types, hydrogen_target_types, charge_target_types,\n is_in_ring_target_types, is_aromatic_target_types,\n chirality_target_types, use_argmax)\n\n init_nodes = init_nodes.cpu(); init_edges = init_edges.cpu(); init_hydrogens = init_hydrogens.cpu()\n init_charge = init_charge.cpu(); init_is_in_ring = init_is_in_ring.cpu()\n init_is_aromatic = init_is_aromatic.cpu(); init_chirality = init_chirality.cpu()\n node_masks = node_masks.cpu(); edge_masks = edge_masks.cpu()\n\n self.append_and_convert_graphs(init_nodes, init_edges, init_hydrogens, init_charge, init_is_in_ring,\n init_is_aromatic, init_chirality, node_masks, all_final_nodes, all_final_edges,\n all_final_hydrogens, all_final_charge, all_final_is_in_ring, all_final_is_aromatic,\n all_final_chirality, all_final_node_masks, mols, smiles_list)\n \n return all_final_nodes, all_final_edges, all_final_hydrogens, all_final_charge, all_final_is_in_ring, \\\n all_final_is_aromatic, all_final_chirality, all_final_node_masks, smiles_list\n\n def get_all_init_variables(self, load_path, number_samples):\n if load_path is not None:\n with open(load_path, 'rb') as f:\n load_info = pickle.load(f)\n if self.model.embed_hs is True:\n all_init_nodes, all_init_edges, all_init_hydrogens, all_init_charge, \\\n all_init_is_in_ring, all_init_is_aromatic, all_init_chirality = load_info\n else:\n all_init_nodes, all_init_edges = load_info\n all_node_masks = [(init_nodes != self.node_empty_index) for init_nodes in all_init_nodes]\n all_edge_masks = [(init_edges != self.edge_empty_index) for init_edges in all_init_edges]\n else:\n lengths = self.sample_lengths(number_samples)\n all_init_nodes, all_init_edges, all_node_masks, all_edge_masks, all_init_hydrogens, all_init_charge,\\\n all_init_is_in_ring, all_init_is_aromatic, all_init_chirality = self.get_masked_variables(lengths,\n number_samples, self.train_data.num_node_types, self.train_data.num_edge_types, self.edges_per_batch<=0)\n return all_init_nodes, all_init_edges, all_node_masks, all_edge_masks, all_init_hydrogens, all_init_charge,\\\n all_init_is_in_ring, all_init_is_aromatic, all_init_chirality\n\n def get_dataloader(self, all_init_nodes, all_init_edges, all_node_masks, all_init_hydrogens,\n all_init_charge, all_init_is_in_ring, all_init_is_aromatic, all_init_chirality, number_samples,\n retrieve_train_graphs):\n gen_dataset = GenDataset(self.train_data, number_samples)\n if retrieve_train_graphs is False:\n gen_dataset.dataset.mol_nodeinds = [init_nodes[:int(all_node_masks[i].sum())]\n for i, init_nodes in enumerate(all_init_nodes)]\n gen_dataset.dataset.num_hs = [init_hydrogens[:int(all_node_masks[i].sum())]\n for i, init_hydrogens in enumerate(all_init_hydrogens)]\n gen_dataset.dataset.charge = [init_charge[:int(all_node_masks[i].sum())] - abs(self.min_charge)\n for i, init_charge in enumerate(all_init_charge)]\n gen_dataset.dataset.is_in_ring = [init_is_in_ring[:int(all_node_masks[i].sum())]\n for i, init_is_in_ring in enumerate(all_init_is_in_ring)]\n gen_dataset.dataset.is_aromatic = [init_is_aromatic[:int(all_node_masks[i].sum())]\n for i, init_is_aromatic in enumerate(all_init_is_aromatic)]\n gen_dataset.dataset.chirality = [init_chirality[:int(all_node_masks[i].sum())]\n for i, init_chirality in enumerate(all_init_chirality)]\n gen_dataset.dataset.adj_mats = [init_edges[:int(all_node_masks[i].sum()), :int(all_node_masks[i].sum())]\n for i, init_edges in enumerate(all_init_edges)]\n if self.edges_per_batch > 0:\n batch_sampler = SizeSampler(gen_dataset, self.edges_per_batch)\n batch_sampler.batches.reverse()\n loader = DataLoader(gen_dataset, batch_sampler=batch_sampler)\n else:\n loader = DataLoader(gen_dataset, batch_size=self.batch_size)\n return loader\n\n def carry_out_gibbs_sampling_sweeps(self, init_nodes, init_edges, init_hydrogens, init_charge, init_is_in_ring,\n init_is_aromatic, init_chirality, node_masks, edge_masks, use_argmax):\n init_nodes_copy = copy.deepcopy(init_nodes.cpu())\n if self.edges_per_batch > 0:\n max_nodes = len(init_nodes[0])\n max_edges = int(max_nodes * (max_nodes - 1) / 2)\n else:\n max_nodes, max_edges = self.max_nodes, self.max_edges\n num_nodes = node_masks.sum(-1)\n unique_edge_coords, num_unique_edges, generation_arrays, nodes_arrays, edges_arrays = \\\n self.get_unshuffled_update_order_arrays(len(init_nodes), num_nodes)\n\n max_num_components = max_nodes + max_edges\n if self.one_property_per_loop is True:\n num_properties = 5\n max_num_components += num_properties * max_nodes\n generation_queue = deque(get_shuffled_array(generation_arrays, max_num_components).transpose())\n nodes_queues = self.get_shuffled_queues(nodes_arrays, max_nodes)\n edges_queues = self.get_shuffled_queues(edges_arrays, max_edges)\n if self.mask_independently is True:\n hydrogen_queues = self.get_shuffled_queues(nodes_arrays, max_nodes)\n charge_queues = self.get_shuffled_queues(nodes_arrays, max_nodes)\n is_in_ring_queues = self.get_shuffled_queues(nodes_arrays, max_nodes)\n is_aromatic_queues = self.get_shuffled_queues(nodes_arrays, max_nodes)\n chirality_queues = self.get_shuffled_queues(nodes_arrays, max_nodes)\n with torch.no_grad():\n while len(generation_queue) > 0:\n next_target_types = [generation_queue.popleft() \\\n for _ in range(min(len(generation_queue), self.variables_per_gibbs_iteration))]\n next_target_types = np.vstack(next_target_types)\n\n node_update_graphs = np.where(next_target_types == self.node_int)[1]\n edge_update_graphs = np.where(next_target_types == self.edge_int)[1]\n\n # replace nodes, node properties and edges\n nodes_to_update = [nodes_queues[ind].popleft() for ind in node_update_graphs]\n edges_to_update = [edges_queues[ind].popleft() for ind in edge_update_graphs]\n if self.mask_independently is True:\n hydrogen_update_graphs = np.where(next_target_types == self.hydrogen_int)[1]\n charge_update_graphs = np.where(next_target_types == self.charge_int)[1]\n is_in_ring_update_graphs = np.where(next_target_types == self.is_in_ring_int)[1]\n is_aromatic_update_graphs = np.where(next_target_types == self.is_aromatic_int)[1]\n chirality_update_graphs = np.where(next_target_types == self.chirality_int)[1]\n hydrogens_to_update = [hydrogen_queues[ind].popleft() for ind in hydrogen_update_graphs]\n charge_to_update = [charge_queues[ind].popleft() for ind in charge_update_graphs]\n is_in_ring_to_update = [is_in_ring_queues[ind].popleft() for ind in is_in_ring_update_graphs]\n is_aromatic_to_update = [is_aromatic_queues[ind].popleft() for ind in is_aromatic_update_graphs]\n chirality_to_update = [chirality_queues[ind].popleft() for ind in chirality_update_graphs]\n else:\n hydrogens_to_update = charge_to_update = is_in_ring_to_update = is_aromatic_to_update = \\\n chirality_to_update = nodes_to_update\n\n if self.mask_comp_to_predict is True:\n self.mask_one_entry_per_graph(init_nodes, init_edges, init_hydrogens,\n init_charge, init_is_in_ring, init_is_aromatic, init_chirality,\n node_update_graphs, edge_update_graphs,\n hydrogen_update_graphs, charge_update_graphs,\n is_in_ring_update_graphs, is_aromatic_update_graphs,\n chirality_update_graphs,\n nodes_to_update, edges_to_update,\n hydrogens_to_update, charge_to_update, is_in_ring_to_update,\n is_aromatic_to_update, chirality_to_update,\n node_masks, edge_masks)\n\n node_scores, edge_scores, hydrogen_scores, charge_scores, is_in_ring_scores, is_aromatic_scores, \\\n chirality_scores = self.model_forward(init_nodes, init_edges,\n node_masks, edge_masks, init_hydrogens, init_charge,\n init_is_in_ring, init_is_aromatic, init_chirality)\n\n if self.maintain_minority_proportion is True:\n self.drop_minority_loc_majority_scores(init_nodes_copy, node_scores)\n\n node_preds, edge_preds, hydrogen_preds, charge_preds, is_in_ring_preds, is_aromatic_preds,\\\n chirality_preds = self.predict_from_scores(node_scores, edge_scores, hydrogen_scores,\n charge_scores, is_in_ring_scores, is_aromatic_scores, chirality_scores,\n use_argmax)\n\n init_nodes, init_edges, init_hydrogens, \\\n init_charge, init_is_in_ring, init_is_aromatic, init_chirality, = self.update_components(\n nodes_to_update, edges_to_update, hydrogens_to_update, charge_to_update,\n is_in_ring_to_update, is_aromatic_to_update, chirality_to_update,\n init_nodes, init_edges, init_hydrogens,\n init_charge, init_is_in_ring, init_is_aromatic, init_chirality,\n node_update_graphs, edge_update_graphs, hydrogen_update_graphs, charge_update_graphs,\n is_in_ring_update_graphs, is_aromatic_update_graphs, chirality_update_graphs,\n node_preds, edge_preds, hydrogen_preds,\n charge_preds, is_in_ring_preds, is_aromatic_preds, chirality_preds)\n\n return init_nodes, init_edges, init_hydrogens, init_charge, init_is_in_ring, init_is_aromatic, init_chirality\n\n def sample_simultaneously(self, init_nodes, init_edges, init_hydrogens, init_charge, init_is_in_ring,\n init_is_aromatic, init_chirality, node_masks, edge_masks, node_target_types,\n edge_target_types, hydrogen_target_types, charge_target_types, is_in_ring_target_types,\n is_aromatic_target_types, chirality_target_types, use_argmax):\n with torch.no_grad():\n node_scores, edge_scores, hydrogen_scores, charge_scores, is_in_ring_scores, is_aromatic_scores,\\\n chirality_scores, = self.model_forward(init_nodes, init_edges, node_masks,\n edge_masks, init_hydrogens, init_charge, init_is_in_ring, init_is_aromatic,\n init_chirality)\n node_preds, edge_preds, hydrogen_preds, charge_preds, is_in_ring_preds, is_aromatic_preds,\\\n chirality_preds = self.predict_from_scores(node_scores, edge_scores, hydrogen_scores,\n charge_scores, is_in_ring_scores, is_aromatic_scores, chirality_scores, use_argmax)\n\n init_nodes[node_target_types != 0] = node_preds[node_target_types != 0]\n edge_target_coords = np.where(edge_target_types != 0)\n init_edges[edge_target_coords] = edge_preds[edge_target_coords]\n init_edges[edge_target_coords[0], edge_target_coords[2], edge_target_coords[1]] = edge_preds[edge_target_coords]\n init_hydrogens[hydrogen_target_types != 0] = hydrogen_preds[hydrogen_target_types != 0]\n init_charge[charge_target_types != 0] = charge_preds[charge_target_types != 0]\n init_is_in_ring[is_in_ring_target_types != 0] = is_in_ring_preds[is_in_ring_target_types != 0]\n init_is_aromatic[is_aromatic_target_types != 0] = is_aromatic_preds[is_aromatic_target_types != 0]\n init_chirality[chirality_target_types != 0] = chirality_preds[chirality_target_types != 0]\n\n return init_nodes, init_edges, init_hydrogens, init_charge, init_is_in_ring, init_is_aromatic, init_chirality\n\n def get_unshuffled_update_order_arrays(self, batch_size, num_nodes):\n unique_edge_coords, num_unique_edges, generation_arrays, nodes_arrays, edges_arrays = [], [], [], [], []\n for i in range(batch_size):\n unique_edge_coords.append(list(zip(*np.triu_indices(num_nodes[i], k=1))))\n num_unique_edges.append(len(unique_edge_coords[i]))\n if self.one_property_per_loop is True:\n generation_array = np.array([self.node_int] * int(num_nodes[i]) +\n [self.edge_int] * int(num_unique_edges[i]) +\n [self.hydrogen_int] * int(num_nodes[i]) + [self.charge_int] * int(num_nodes[i]) +\n [self.is_in_ring_int] * int(num_nodes[i]) + [self.is_aromatic_int] * int(num_nodes[i]) +\n [self.chirality_int] * int(num_nodes[i]))\n else:\n generation_array = np.array([self.node_int] * int(num_nodes[i]) +\n [self.edge_int] * int(num_unique_edges[i]))\n generation_arrays.append(generation_array)\n nodes_arrays.append(np.arange(int(num_nodes[i])))\n edges_arrays.append(unique_edge_coords[i])\n return unique_edge_coords, num_unique_edges, generation_arrays, nodes_arrays, edges_arrays\n\n def get_shuffled_queues(self, array_to_shuffle, max_num_components):\n shuffled_array = get_shuffled_array(array_to_shuffle, max_num_components)\n queues = [deque(array) for array in shuffled_array]\n return queues\n\n def mask_one_entry_per_graph(self, init_nodes, init_edges, init_hydrogens,\n init_charge, init_is_in_ring, init_is_aromatic, init_chirality,\n node_update_graphs, edge_update_graphs, hydrogen_update_graphs, charge_update_graphs,\n is_in_ring_update_graphs, is_aromatic_update_graphs, chirality_update_graphs,\n nodes_to_update, edges_to_update, hydrogens_to_update, charge_to_update,\n is_in_ring_to_update, is_aromatic_to_update, chirality_to_update,\n node_masks, edge_masks):\n if nodes_to_update:\n init_nodes[node_update_graphs, nodes_to_update] = self.node_mask_index\n if self.no_edge_present_type == 'zeros':\n node_masks[node_update_graphs, nodes_to_update] = 1\n if hydrogens_to_update:\n init_hydrogens[hydrogen_update_graphs, hydrogens_to_update] = self.h_mask_index\n if charge_to_update:\n init_charge[charge_update_graphs, charge_to_update] = self.charge_mask_index\n if is_in_ring_to_update:\n init_is_in_ring[is_in_ring_update_graphs, is_in_ring_to_update] = self.is_in_ring_mask_index\n if is_aromatic_to_update:\n init_is_aromatic[is_aromatic_update_graphs, is_aromatic_to_update] = self.is_aromatic_mask_index\n if chirality_to_update:\n init_chirality[chirality_update_graphs, chirality_to_update] = self.chirality_mask_index\n if edges_to_update:\n coords_array = np.array(edges_to_update).transpose()\n init_edges[edge_update_graphs, coords_array[0], coords_array[1]] = init_edges[edge_update_graphs,\n coords_array[1], coords_array[\n 0]] = self.edge_mask_index\n if self.no_edge_present_type == 'zeros':\n edge_masks[edge_update_graphs, coords_array[0], coords_array[1]] = edge_masks[edge_update_graphs,\n coords_array[0], coords_array[1]] = 1\n\n def model_forward_mgm(self, init_nodes, init_edges, node_masks, edge_masks, init_hydrogens, init_charge,\n init_is_in_ring, init_is_aromatic, init_chirality):\n node_scores, edge_scores, hydrogen_scores, charge_scores, is_in_ring_scores, is_aromatic_scores, \\\n chirality_scores = self.model(init_nodes, init_edges, node_masks, edge_masks, init_hydrogens,\n init_charge, init_is_in_ring, init_is_aromatic, init_chirality)\n return node_scores, edge_scores, hydrogen_scores, charge_scores, is_in_ring_scores, is_aromatic_scores, \\\n chirality_scores\n\n def model_forward_cgvae(self, init_nodes, init_edges, node_masks, edge_masks, init_hydrogens):\n node_target_inds_vector = getattr(init_nodes == self.node_mask_index, self.index_method)()\n edge_target_coords_matrix = getattr(init_edges == self.edge_mask_index, self.index_method)()\n node_scores, edge_scores, hydrogen_scores, _, _, _, _ = self.model.prior_forward(\n init_nodes, init_edges, node_masks, edge_masks, init_hydrogens,\n node_target_inds_vector, edge_target_coords_matrix\n )\n return node_scores, edge_scores, hydrogen_scores\n\n def drop_minority_loc_majority_scores(self, init_nodes, node_scores, majority_node_index=1):\n minority_node_locs = np.where(init_nodes != majority_node_index)\n node_scores[list(minority_node_locs) +\n [np.ones_like(minority_node_locs[0]) * majority_node_index]] = -9999\n\n def predict_from_scores(self, node_scores, edge_scores, hydrogen_scores, charge_scores, is_in_ring_scores,\n is_aromatic_scores, chirality_scores,use_argmax=False):\n if use_argmax is True:\n node_preds = torch.argmax(F.softmax(node_scores, -1), dim=-1)\n edge_preds = torch.argmax(F.softmax(edge_scores, -1), dim=-1)\n hydrogen_preds = torch.argmax(F.softmax(hydrogen_scores, -1), dim=-1)\n charge_preds = torch.argmax(F.softmax(charge_scores, -1), dim=-1)\n is_in_ring_preds = torch.argmax(F.softmax(is_in_ring_scores, -1), dim=-1)\n is_aromatic_preds = torch.argmax(F.softmax(is_aromatic_scores, -1), dim=-1)\n chirality_preds = torch.argmax(F.softmax(chirality_scores, -1), dim=-1)\n else:\n if self.top_k > 0:\n node_scores = filter_top_k(node_scores, self.top_k)\n edge_scores = filter_top_k(edge_scores, self.top_k)\n hydrogen_scores = filter_top_k(hydrogen_scores, self.top_k)\n charge_scores = filter_top_k(charge_scores, self.top_k)\n is_in_ring_scores = filter_top_k(is_in_ring_scores, self.top_k)\n is_aromatic_scores = filter_top_k(is_aromatic_scores, self.top_k)\n chirality_scores = filter_top_k(chirality_scores, self.top_k)\n\n node_preds = torch.distributions.Categorical(F.softmax(node_scores, -1)).sample()\n edge_preds = torch.distributions.Categorical(F.softmax(edge_scores, -1)).sample()\n hydrogen_preds = torch.distributions.Categorical(F.softmax(hydrogen_scores, -1)).sample()\n charge_preds = torch.distributions.Categorical(F.softmax(charge_scores, -1)).sample()\n is_in_ring_preds = torch.distributions.Categorical(F.softmax(is_in_ring_scores, -1)).sample()\n is_aromatic_preds = torch.distributions.Categorical(F.softmax(is_aromatic_scores, -1)).sample()\n chirality_preds = torch.distributions.Categorical(F.softmax(chirality_scores, -1)).sample()\n return node_preds, edge_preds, hydrogen_preds, charge_preds, is_in_ring_preds, is_aromatic_preds, \\\n chirality_preds\n\n def update_components(self, nodes_to_update, edges_to_update, hydrogens_to_update,\n charge_to_update, is_in_ring_to_update, is_aromatic_to_update, chirality_to_update,\n init_nodes, init_edges, init_hydrogens,\n init_charge, init_is_in_ring, init_is_aromatic, init_chirality,\n node_update_graphs, edge_update_graphs, hydrogen_update_graphs, charge_update_graphs,\n is_in_ring_update_graphs, is_aromatic_update_graphs, chirality_update_graphs,\n node_preds, edge_preds, hydrogen_preds,\n charge_preds, is_in_ring_preds, is_aromatic_preds, chirality_preds):\n if nodes_to_update:\n init_nodes[node_update_graphs, nodes_to_update] = node_preds[node_update_graphs, nodes_to_update]\n if hydrogens_to_update:\n init_hydrogens[hydrogen_update_graphs, hydrogens_to_update] = \\\n hydrogen_preds[hydrogen_update_graphs, hydrogens_to_update]\n if charge_to_update:\n init_charge[charge_update_graphs, charge_to_update] = charge_preds[charge_update_graphs, charge_to_update]\n if is_in_ring_to_update:\n init_is_in_ring[is_in_ring_update_graphs, is_in_ring_to_update] = is_in_ring_preds[\n is_in_ring_update_graphs, is_in_ring_to_update]\n if is_aromatic_to_update:\n init_is_aromatic[is_aromatic_update_graphs, is_aromatic_to_update] = is_aromatic_preds[\n is_aromatic_update_graphs, is_aromatic_to_update]\n if chirality_to_update:\n init_chirality[chirality_update_graphs, chirality_to_update] = chirality_preds[\n chirality_update_graphs, chirality_to_update]\n if edges_to_update:\n coords_array = np.array(edges_to_update).transpose()\n init_edges[edge_update_graphs, coords_array[0], coords_array[1]] = init_edges[\n edge_update_graphs,\n coords_array[1], coords_array[0]] = edge_preds[\n edge_update_graphs, coords_array[0], coords_array[1]]\n return init_nodes, init_edges, init_hydrogens, init_charge, init_is_in_ring, init_is_aromatic, init_chirality\n\n def append_and_convert_graphs(self, init_nodes, init_edges, init_hydrogens, init_charge, init_is_in_ring,\n init_is_aromatic, init_chirality, node_masks, all_final_nodes, all_final_edges,\n all_final_hydrogens, all_final_charge, all_final_is_in_ring, all_final_is_aromatic,\n all_final_chirality, all_final_node_masks, mols, smiles_list):\n init_nodes, init_edges, init_hydrogens = init_nodes.numpy(), init_edges.numpy(), init_hydrogens.numpy()\n num_nodes = node_masks.sum(-1)\n for i in range(len(init_nodes)):\n all_final_nodes.append(init_nodes[i])\n all_final_edges.append(init_edges[i])\n all_final_node_masks.append(node_masks[i])\n if self.model.embed_hs is True:\n all_final_hydrogens.append(init_hydrogens[i])\n all_final_charge.append(init_charge[i])\n all_final_is_in_ring.append(init_is_in_ring[i])\n all_final_is_aromatic.append(init_is_aromatic[i])\n all_final_chirality.append(init_chirality[i])\n mol = graph_to_mol(init_nodes[i, :int(num_nodes[i])], init_edges[i, :int(num_nodes[i]), :int(num_nodes[i])],\n init_charge[i, :int(num_nodes[i])], init_chirality[i, :int(num_nodes[i])],\n min_charge=self.min_charge, symbol_list=self.symbol_list)\n mols.append(mol)\n smiles_list.append(Chem.MolToSmiles(mol))\n\n def save_checkpoints(self, all_final_nodes, all_final_edges, all_final_hydrogens, all_final_charge,\n all_final_is_in_ring, all_final_is_aromatic, all_final_chirality, num_sampling_iters,\n num_argmax_iters):\n if self.cp_save_dir is not None:\n to_save = [all_final_nodes, all_final_edges]\n if self.model.embed_hs is True:\n to_save.extend([all_final_hydrogens, all_final_charge, all_final_is_in_ring, all_final_is_aromatic,\n all_final_chirality])\n save_path = os.path.join(self.cp_save_dir, 'gen_checkpoint_{}_{}.p'.format(\n num_sampling_iters, num_argmax_iters))\n with open(save_path, 'wb') as f:\n pickle.dump(to_save, f)\n\n def calculate_length_dist(self):\n lengths_dict = {}\n for mol_nodeind in self.train_data.mol_nodeinds:\n length = len(mol_nodeind)\n if length not in lengths_dict:\n lengths_dict[length] = 1\n else:\n lengths_dict[length] += 1\n # Normalise\n for key in lengths_dict:\n lengths_dict[key] /= len(self.train_data)\n self.length_dist = lengths_dict\n\n def get_special_inds(self):\n self.node_empty_index = self.train_data.node_empty_index\n self.edge_empty_index = self.train_data.edge_empty_index\n self.node_mask_index = self.train_data.node_mask_index\n self.edge_mask_index = self.train_data.edge_mask_index\n self.max_nodes = self.train_data.max_nodes\n self.max_edges = int(self.max_nodes * (self.max_nodes-1)/2)\n if self.train_data.num_hs is not None:\n self.h_mask_index = self.train_data.h_mask_index\n self.h_empty_index = self.train_data.h_empty_index\n self.charge_mask_index = self.train_data.charge_mask_index\n self.is_in_ring_mask_index = self.train_data.is_in_ring_mask_index\n self.is_aromatic_mask_index = self.train_data.is_aromatic_mask_index\n self.chirality_mask_index = self.train_data.chirality_mask_index\n self.min_charge = self.train_data.min_charge\n else:\n self.h_mask_index = self.h_empty_index = 0\n\n def sample_lengths(self, number_samples=1):\n lengths = np.array(list(self.length_dist.keys()))\n probs = np.array(list(self.length_dist.values()))\n samples = np.random.choice(lengths, number_samples, p=probs)\n return samples\n\n def get_masked_variables(self, lengths, number_samples, num_node_types, num_edge_types, pad=True):\n if pad is True:\n init_nodes = np.ones((number_samples, self.max_nodes)) * self.node_empty_index\n node_mask = np.zeros((number_samples, self.max_nodes))\n init_edges = np.ones((number_samples, self.max_nodes, self.max_nodes)) * self.edge_empty_index\n edge_mask = np.zeros((number_samples, self.max_nodes, self.max_nodes))\n hydrogens = np.ones((number_samples, self.max_nodes)) * self.h_empty_index\n init_charge = np.ones((number_samples, self.max_nodes)) * abs(self.min_charge)\n init_is_in_ring = np.zeros((number_samples, self.max_nodes))\n init_is_aromatic = np.zeros((number_samples, self.max_nodes))\n init_chirality = np.zeros((number_samples, self.max_nodes))\n else:\n init_nodes, node_mask, init_edges, edge_mask, hydrogens, init_charge, init_is_in_ring, init_is_aromatic, \\\n init_chirality = [], [], [], [], [], [], [], [], []\n for sample_num, length in enumerate(lengths):\n if pad is False:\n init_nodes.append(np.ones(length) * self.node_empty_index)\n node_mask.append(np.zeros(length))\n init_edges.append(np.ones((length, length)) * self.edge_empty_index)\n edge_mask.append(np.zeros((length, length)))\n hydrogens.append(np.ones(length) * self.h_empty_index)\n init_charge.append(np.ones(length) * abs(self.min_charge))\n init_is_in_ring.append(np.zeros(length))\n init_is_aromatic.append(np.zeros(length))\n init_chirality.append(np.zeros(length))\n if self.random_init:\n if self.sample_uniformly is True:\n node_samples = np.random.randint(0, num_node_types, size=length)\n edge_samples = np.random.randint(0, num_edge_types, size=int(length * (length - 1) / 2))\n hydrogen_samples = np.random.randint(0, self.h_mask_index, size=length)\n charge_samples = np.random.randint(0, self.charge_mask_index, size=length)\n is_in_ring_samples = np.random.randint(0, self.is_in_ring_mask_index, size=length)\n is_aromatic_samples = np.random.randint(0, self.is_aromatic_mask_index, size=length)\n chirality_samples = np.random.randint(0, self.chirality_mask_index, size=length)\n else:\n node_samples = torch.distributions.Categorical(1/self.train_data.node_weights).sample(\n [length]).numpy()\n edge_samples = torch.distributions.Categorical(1/self.train_data.edge_weights).sample(\n [int(length * (length - 1) / 2)]).numpy()\n hydrogen_samples = torch.distributions.Categorical(1/self.train_data.h_weights).sample(\n [length]).numpy()\n charge_samples = torch.distributions.Categorical(1/self.train_data.charge_weights).sample(\n [length]).numpy()\n is_in_ring_samples = torch.distributions.Categorical(1/self.train_data.is_in_ring_weights).sample(\n [length]).numpy()\n is_aromatic_samples = torch.distributions.Categorical(1/self.train_data.is_aromatic_weights).sample(\n [length]).numpy()\n chirality_samples = torch.distributions.Categorical(1/self.train_data.chirality_weights).sample(\n [length]).numpy()\n init_nodes[sample_num][:length] = node_samples\n rand_edges = deque(edge_samples)\n for i in range(length):\n init_edges[sample_num][i, i] = 0\n for j in range(i, length):\n if i != j:\n init_edges[sample_num][i, j] = init_edges[sample_num][j, i] = rand_edges.pop()\n hydrogens[sample_num][:length] = hydrogen_samples\n init_charge[sample_num][:length] = charge_samples\n init_is_in_ring[sample_num][:length] = is_in_ring_samples\n init_is_aromatic[sample_num][:length] = is_aromatic_samples\n init_chirality[sample_num][:length] = chirality_samples\n else:\n init_nodes[sample_num][:length] = self.node_mask_index\n init_edges[sample_num][:length, :length] = self.edge_mask_index\n hydrogens[sample_num][:length] = self.h_mask_index\n init_charge[sample_num][:length] = self.charge_mask_index\n init_is_in_ring[sample_num][:length] = self.is_in_ring_mask_index\n init_is_aromatic[sample_num][:length] = self.is_aromatic_mask_index\n init_chirality[sample_num][:length] = self.chirality_mask_index\n node_mask[sample_num][:length] = 1\n edge_mask[sample_num][:length, :length] = 1\n return init_nodes, init_edges, node_mask, edge_mask, hydrogens, init_charge, init_is_in_ring,\\\n init_is_aromatic, init_chirality\n\ndef get_load_path(num_sampling_iters, num_argmax_iters, cp_save_dir):\n all_cp_iters = {}\n for fname in os.listdir(cp_save_dir):\n if 'gen_checkpoint' not in fname: continue\n split_fname = os.path.splitext(fname)[0].split('_')\n cp_sampling_iters, cp_argmax_iters = int(split_fname[2]), int(split_fname[3])\n if cp_sampling_iters in all_cp_iters.keys():\n all_cp_iters[cp_sampling_iters].append(cp_argmax_iters)\n else:\n all_cp_iters[cp_sampling_iters] = [cp_argmax_iters]\n\n if len(all_cp_iters) == 0:\n return None, 0\n\n cp_max_sampling_iters = max(all_cp_iters.keys())\n sampling_iters_to_load = min(cp_max_sampling_iters, num_sampling_iters)\n if sampling_iters_to_load == num_sampling_iters and sampling_iters_to_load in all_cp_iters.keys():\n argmax_iters_to_load = min(max(all_cp_iters[sampling_iters_to_load]), num_argmax_iters)\n else:\n argmax_iters_to_load = 0\n if sampling_iters_to_load == argmax_iters_to_load == 0:\n return None, 0\n\n load_path = os.path.join(cp_save_dir,\n 'gen_checkpoint_{}_{}.p'.format(sampling_iters_to_load, argmax_iters_to_load))\n return load_path, sampling_iters_to_load + argmax_iters_to_load\n\n\ndef get_shuffled_array(arrays, length=None):\n \"\"\"\n :arg\n arrays: list of generation_arrays\n length: length of an output generation array with padding\n :returns\n shuffled_arrays: padded matrix of shape (number of generation arrays, length)\n \"\"\"\n if type(arrays[0][0]) == tuple:\n shuffled_arrays = np.ones((len(arrays), length), dtype=(int, 2)) * -1\n else:\n shuffled_arrays = np.ones((len(arrays), length)) * -1\n for i, array in enumerate(arrays):\n array = np.random.permutation(array)\n shuffled_arrays[i, :len(array)] = array\n return shuffled_arrays\n\ndef save_smiles_list(smiles_list, smiles_output_path):\n with open(smiles_output_path, 'w') as f:\n for smiles in smiles_list:\n f.write(smiles + '\\n')\n\ndef evaluate_uncond_generation(mock_generator, smiles_dataset_path,\n json_output_path, num_samples_to_evaluate, evaluate_connected_only=False):\n from guacamol.assess_distribution_learning import _assess_distribution_learning\n if evaluate_connected_only is True:\n mock_generator.smiles_list = [s for s in mock_generator.smiles_list if '.' not in s]\n _assess_distribution_learning(mock_generator, smiles_dataset_path, json_output_file=json_output_path,\n benchmark_version='v1', number_samples=num_samples_to_evaluate)","sub_path":"src/model/graph_generator.py","file_name":"graph_generator.py","file_ext":"py","file_size_in_byte":48164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"347338124","text":"from typing import List\n\nimport psycopg2\n\nfrom daos.note_dao import NoteDao\nfrom exceptions.resource_not_found import ResourceNotFound\nfrom models.note import Note\n\n\nclass NoteDAOImpl(NoteDao):\n\n @staticmethod\n def add_note(cursor, note: Note) -> Note:\n if note.week_number < 0:\n raise ValueError()\n try:\n \"\"\"Creates a note for an Associate on a given week for a Batch\"\"\"\n sql = \"insert into notes (batch_id, cont, associate_id, week_number) values(%s, %s, %s, %s) returning id\"\n cursor.execute(sql, (note.batch_id, note.content, note.associate_id,\n note.week_number))\n n_id = cursor.fetchone()[0]\n note.note_id = n_id\n return note\n except psycopg2.Error as e:\n if int(e.pgcode) == 23503 or int(e.pgcode) == 42830:\n raise ResourceNotFound(\"The foreign keys provided do not exist\")\n\n @staticmethod\n def get_single_note(cursor, note_id: int) -> Note:\n \"\"\"Takes in an id for a note record and returns a Note object\"\"\"\n sql = \"select * from notes where id = %s\"\n cursor.execute(sql, [note_id])\n records = cursor.fetchall()\n if records:\n record = records[0]\n note = Note(note_id=record[0],\n batch_id=record[1],\n content=record[2],\n associate_id=record[3],\n week_number=record[4])\n return note\n else:\n raise ResourceNotFound(\"No note could be found with the given id\")\n\n @staticmethod\n def get_all_notes(cursor) -> List[Note]:\n \"\"\"Returns all the notes\"\"\"\n sql = \"select * from notes\"\n cursor.execute(sql)\n records = cursor.fetchall()\n notes = []\n for note in records:\n notes.append(\n Note(note_id=note[0],\n batch_id=note[1],\n content=note[2],\n associate_id=note[3],\n week_number=note[4]))\n return notes\n\n @staticmethod\n def update_note(cursor, updated: Note) -> Note:\n \"\"\"Updates note\"\"\"\n if updated.week_number < 0:\n raise ValueError()\n try:\n sql = \"update notes set batch_id = %s, cont = %s, associate_id = %s, week_number = %s where id = %s returning id\"\n cursor.execute(\n sql, (updated.batch_id, updated.content, updated.associate_id,\n updated.week_number, updated.note_id))\n n_id = cursor.fetchone()\n if n_id is not None:\n return updated\n else:\n raise ResourceNotFound(\"Note could not be found\")\n except psycopg2.Error as e:\n if int(e.pgcode) == 23503 or int(e.pgcode) == 42830:\n raise ResourceNotFound(\"The foreign keys provided do not exist\")\n\n @staticmethod\n def delete_note(cursor, note_id: int) -> bool:\n \"\"\"Deletes a note and returns True if successful\"\"\"\n sql = \"delete from notes where id = %s returning id\"\n cursor.execute(sql, [note_id])\n n_id = cursor.fetchone()\n if n_id is not None:\n return True\n else:\n raise ResourceNotFound(\"No note could be found with the given id\")\n","sub_path":"daos/daos_impl/note_dao_impl.py","file_name":"note_dao_impl.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"587497451","text":"\"\"\"\nSituation: you start a game of dominion with 7 coppers and 3 estates in your\ndeck. In the first 2 turns, you buy silver and moneylender. On turns\n3 and 4, you want to figure out what the possible coinages are. What are the\npossible coinages?\n\"\"\"\n\nfrom MoneyLender import SingleGame\n\nNUM_RUNS = 100000\n\n# possible coinages on turns 3 and 4\n# [1 coin, 2 coins, 3 coins, 4 coins, 5 coins, 6 coins, 7 coins]\noutcomesA = [0, 0, 0, 0, 0, 0, 0]\noutcomesB = [0, 0, 0, 0, 0, 0, 0]\n\nprint(\"Moneylender + silver simulated \" + str(NUM_RUNS) + \" times:\")\n\nfor number in range(NUM_RUNS):\n game = SingleGame()\n game.simulateRun()\n\n # update possible coinages\n outcomesA[game.a - 1] += 1\n outcomesB[game.b - 1] += 1\n\nprint(\"Possible coinage on turn 3: \" + str(outcomesA))\nprint(\"Possible coinage on turn 4: \" + str(outcomesB))\n\n","sub_path":"dominion.py","file_name":"dominion.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"386742487","text":"import json\n\ndef main():\n with open('county_demographics.json') as demographics_data:\n counties = json.load(demographics_data)\n print(alphabetically_first_county(counties))\n print(county_most_under_18(counties))\n print(percent_most_under_18(counties))\n print(most_under_18(counties))\n print(state_with_most_counties(counties))\n print(county_with_highest_income(counties))\n\ndef alphabetically_first_county(counties):\n \"\"\"Return the county with the name that comes first alphabetically.\"\"\"\n first_county = counties[0][\"County\"]\n for x in counties:\n if x[\"County\"] < first_county:\n first_county = x[\"County\"]\n return first_county\n\n\ndef county_most_under_18(counties):\n \"\"\"Return the name and state of a county (\", \") with the highest percent of under 18 year olds.\"\"\"\n most_under = counties[0][\"County\"]\n state = counties[0][\"State\"]\n percent = counties[0][\"Age\"][\"Percent Under 18 Years\"]\n for x in counties:\n if x[\"Age\"][\"Percent Under 18 Years\"] > percent:\n most_under = x[\"County\"]\n state = x[\"State\"]\n percent = x[\"Age\"][\"Percent Under 18 Years\"]\n return most_under + \", \" + state\n\n \ndef percent_most_under_18(counties):\n \"\"\"Return the highest percent of under 18 year olds.\"\"\"\n most_under = counties[0][\"County\"]\n percent = counties[0][\"Age\"][\"Percent Under 18 Years\"]\n for x in counties:\n if x[\"Age\"][\"Percent Under 18 Years\"] > percent:\n most_under = x[\"County\"]\n percent = x[\"Age\"][\"Percent Under 18 Years\"]\n return str(percent)\n \ndef most_under_18(counties):\n \"\"\"Return a list with the name and state of a county (\", \") and the percent of under 18 year olds for a county with the highest percent of under 18 year olds.\"\"\"\n return county_most_under_18(counties) + \": \" + percent_most_under_18(counties) + \" are under 18 years old.\"\n \ndef state_with_most_counties(counties):\n \"\"\"Return a state that has the most counties.\"\"\"\n #Make a dictionary that has a key for each state and the values keep track of the number of counties in each state\n \n #Find the state in the dictionary with the most counties\n \n #Return the state with the most counties\n\n los = {}\n\n for x in counties:\n state = x[\"State\"]\n tf = state in los\n if (tf == False):\n los[state] = 0\n\n else:\n los[state] = los[state] + 1\n\n most = los[\"CA\"]\n state = \"\"\n for x in los:\n if (los[x] > most):\n most = los[x]\n state = x\n\n return state + \": \" + str(most) + \" counties.\"\n \ndef county_with_highest_income(counties):\n \"\"\"Compute and return an interesting fact using the demographic data about the counties in the US.\"\"\"\n most_under = counties[0][\"County\"]\n state_most_under = counties[0][\"State\"]\n percent = counties[0][\"Income\"][\"Median Household Income\"]\n for x in counties:\n if (x[\"Income\"][\"Median Household Income\"] > percent):\n most_under = x[\"County\"]\n state_most_under = x[\"State\"]\n percent = x[\"Income\"][\"Median Household Income\"]\n return state_most_under + \", \" + most_under + \": \" + str(percent)\n\nif __name__ == '__main__':\n main()\n","sub_path":"countyDemographicsUnfinished.py","file_name":"countyDemographicsUnfinished.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"218425161","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom fractions import Fraction\n\ntry:\n a = Fraction(input('분수를 입력'))\n print(\"OK\", a)\nexcept Exception as ex:\n print(ex)\n print(\"Error 2\")\n\ndef factors(b):\n if b <= 0 or not type(b) == int:\n print('Please enter a positive integer')\n return\n\n for i in range(1, b+1):\n if b % i == 0:\n print(i)\n\n\n'''\nQuadratic equation root calculator\n'''\ndef roots(a, b, c):\n D = (b*b - 4*a*c)**0.5\n x_1 = (-b + D)/(2*a)\n x_2 = (-b - D)/(2*a)\n\n print('x1: {0}'.format(x_1))\n print('x2: {0}'.format(x_2))\n\n\n\nif __name__ == '__main__':\n factors(25)\n factors(3.4)\n\n a = input('Enter a:')\n b = input('Enter b:')\n c = input('Enter c:')\n roots(float(a), float(b), float(c))\n\n\n\n\n","sub_path":"python_3_projects/doing_math/doing_math.py","file_name":"doing_math.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"379834302","text":"import sys\r\nfrom math import sin, cos, radians\r\n\r\nimport pygame\r\n\r\nfrom classes.Vector3d import Vector3d\r\nfrom classes.Point import Point\r\nfrom utils.graph import draw_cube, draw_lines\r\nfrom utils.utils import init_cube\r\n\r\npygame.init()\r\nFPS = 30\r\n\r\nBLUE = (44, 174, 229)\r\nBLACK = (0, 0, 0)\r\nRED = (255, 0, 0)\r\n\r\nclock = pygame.time.Clock()\r\nsc = pygame.display.set_mode((800, 800))\r\n\r\n\r\ndef get_quaternion(p0, point):\r\n return [p0, point]\r\n\r\n\r\ndef get_conj_quaternion(q):\r\n return get_quaternion(q[0], q[1].reverse())\r\n\r\n\r\ndef quaternions_multiplication(q1, q2):\r\n p0 = q1[0] * q2[0] - q1[1].x * q2[1].x - q1[1].y * q2[1].y - q1[1].z * q2[1].z\r\n p1 = q1[0] * q2[1].x + q1[1].x * q2[0] + q1[1].y * q2[1].z - q1[1].z * q2[1].y\r\n p2 = q1[0] * q2[1].y + q1[1].y * q2[0] + q1[1].z * q2[1].x - q1[1].x * q2[1].z\r\n p3 = q1[0] * q2[1].z + q1[1].z * q2[0] + q1[1].x * q2[1].y - q1[1].y * q2[1].x\r\n return get_quaternion(p0, Point(p1, p2, p3))\r\n\r\n\r\ndef rotation(point, n, angle):\r\n s = sin(angle / 2)\r\n n_q = get_quaternion(cos(angle / 2), Point(n.x * s, n.y * s, n.z * s))\r\n n_q_conj = get_conj_quaternion(n_q)\r\n p_q = get_quaternion(0, point)\r\n return quaternions_multiplication(quaternions_multiplication(n_q, p_q), n_q_conj)[1]\r\n\r\n\r\ndef orthogonal_projection(points):\r\n new_basis = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\r\n new_origin = [-100, 0, 0]\r\n projection = []\r\n for p in points:\r\n new_x = new_basis[0][0] * (p.x - new_origin[0]) + new_basis[1][0] * (p.y - new_origin[1]) + new_basis[2][0] * (\r\n p.z - new_origin[2])\r\n new_y = new_basis[0][1] * (p.x - new_origin[0]) + new_basis[1][1] * (p.y - new_origin[1]) + new_basis[2][1] * (\r\n p.z - new_origin[2])\r\n projection.append(Point(new_x, new_y))\r\n return projection\r\n\r\n\r\ndef center_projection(points, center):\r\n projection = []\r\n for p in points:\r\n x = p.x * (center.z / (center.z - p.z)) + center.x * (p.z / (center.z - p.z))\r\n y = p.y * (center.z / (center.z - p.z)) + center.y * (p.z / (center.z - p.z))\r\n projection.append(Point(x, y))\r\n return projection\r\n\r\n\r\ndef main():\r\n # угол поворота\r\n angle = 0.05\r\n\r\n cube = init_cube()\r\n\r\n # Единичный вектор\r\n angle_dir = 35\r\n dir_p1 = Point(0, 0, 0)\r\n dir_p2 = Point(cos(radians(angle_dir)), sin(radians(angle_dir)), 0)\r\n dir = Vector3d(dir_p1, dir_p2, 0)\r\n\r\n line = [Point(dir_p1.x + 100, dir_p1.y + 100), Point(dir_p2.x * 800, dir_p2.y * 800)]\r\n\r\n while True:\r\n for i in pygame.event.get():\r\n if i.type == pygame.QUIT:\r\n sys.exit()\r\n sc.fill(BLUE)\r\n\r\n # projection_cube = orthogonal_projection(cube)\r\n # projection_line = orthogonal_projection(line)\r\n center = Point(0, 0, 600)\r\n projection_cube = center_projection(cube, center)\r\n projection_line = center_projection(line, center)\r\n\r\n draw_cube(sc, list(map(lambda point: [point.x, point.y], projection_cube)))\r\n draw_lines(sc, list(map(lambda point: [point.x, point.y], projection_line)), RED)\r\n\r\n for i in range(0, len(cube)):\r\n cube[i] = rotation(cube[i], dir, angle)\r\n\r\n pygame.display.update()\r\n clock.tick(FPS)\r\n\r\n\r\nmain()\r\n","sub_path":"Lab11_M/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"444597886","text":"import requests\nfrom requests.auth import HTTPBasicAuth\nimport re\nfrom geopy.geocoders import Nominatim\nfrom geopy.distance import geodesic\nfrom country_list import countries_for_language\nfrom .nutriscore_calculation import simplified_nutriscore\n\nimport requests_cache\n\nrequests_cache.install_cache('demo_cache')\n\nRETRY_MAX = 5\n\nclass Logic:\n def __init__(self):\n self.geolocator = Nominatim(user_agent=\"xxx\")\n\n countries = dict(countries_for_language('de'))\n\n country_names_german = list(countries.values())\n\n country_names_german.append('EU')\n country_names_german.append('Europäische Union')\n self.country_names_german = country_names_german\n\n # param user_weigths example\n # user_weights = {\n # 'price': 0.4,\n # 'sustainability': 0.4,\n # 'nutri_score': 0.2,\n # }\n def compare_products(self, original_gtin, user_weights):\n # get product details\n retry = 0\n while retry < RETRY_MAX:\n try:\n retry += 1\n original_product_id = self.product_id_from_gtin(gtin=original_gtin)\n except Exception:\n print(f\"api error on {original_gtin}\")\n\n print(original_product_id)\n original_product_details = self.product_details_from_id(product_id=original_product_id)\n\n print(original_product_details)\n\n # get related products from group\n related_product_ids = self.get_group_products(category_code=original_product_details['product_category'])\n\n print(related_product_ids)\n\n # get related product information\n all_product_details = [original_product_details]\n for product_id in related_product_ids:\n try:\n product_detail = self.product_details_from_id(product_id=product_id)\n all_product_details.append(product_detail)\n except Exception:\n print(f\"process of {product_id} failed\")\n\n # compute scores\n # add 'score' to dicts\n all_product_details = list(map(lambda x: dict(x, **{'score': 0.0}), all_product_details))\n\n # compute price score\n all_prices_abs = list(map(lambda x: x['base_price'], all_product_details))\n\n max_price = float(max(all_prices_abs))\n min_price = float(min(all_prices_abs))\n\n print(all_product_details)\n print(user_weights['price'])\n\n all_product_details = list(map(lambda x: dict(\n x,\n **{'score': x['score'] + user_weights['price'] * (1-(\n x['base_price'] - min_price) / (\n max_price - min_price))}\n ), all_product_details))\n\n all_product_details = list(map(lambda x: dict(\n x,\n **{'score_price': 1 - (x['base_price'] - min_price) / (\n max_price - min_price),\n 'score_price_color': self.value_color(1-(x['base_price'] - min_price) / (\n max_price - min_price))}\n ), all_product_details))\n\n # compute sustainability score\n def get_distance_score(origin_distance_km):\n green = 300\n yellow = 4000\n orange = 11000\n # max_distance = 18000\n if (origin_distance_km <= green):\n return 1\n elif (origin_distance_km <= yellow):\n return 0.66\n elif (origin_distance_km <= orange):\n return 0.33\n else:\n return 0\n\n all_product_details = list(map(lambda x: dict(\n x,\n **{'score': x['score'] + user_weights['sustainability'] * get_distance_score(x['origin_distance_km'])}\n ), all_product_details))\n\n all_product_details = list(map(lambda x: dict(\n x,\n **{'score_sustainability': get_distance_score(x['origin_distance_km'])*0.8+x['has_label']*0.2,\n 'score_sustainability_color': self.value_color(get_distance_score(x['origin_distance_km'])*0.8+x['has_label']*0.2)}\n ), all_product_details))\n\n # compute nutrition score\n nutri_score_weight = {\n 'A': 1.00,\n 'B': 0.75,\n 'C': 0.50,\n 'D': 0.25,\n 'E': 0.00,\n }\n\n all_product_details = list(map(lambda x: dict(\n x,\n **{'score': x['score'] + user_weights['nutri_score'] * nutri_score_weight[x['nutri_score']]}\n ), all_product_details))\n\n all_product_details = list(map(lambda x: dict(\n x,\n **{'score_nutri_score': nutri_score_weight[x['nutri_score']],\n 'score_nutri_score_color': self.value_color(nutri_score_weight[x['nutri_score']])}\n ), all_product_details))\n\n return sorted(all_product_details, key=lambda product: product['score'], reverse=True)\n\n def product_id_from_gtin(self, gtin):\n query = {\n 'gtins': gtin,\n 'verbosity': 'id'\n }\n\n response = requests.get(\n 'https://hackzurich-api.migros.ch/products',\n params=query,\n auth=HTTPBasicAuth('hackzurich2020', 'uhSyJ08KexKn4ZFS'))\n\n return response.json()['ids'][0]\n\n def product_details_from_id(self, product_id):\n query = {\n 'verbosity': 'full'\n }\n\n response = requests.get(\n f'https://hackzurich-api.migros.ch/products/{product_id}',\n params=query,\n auth=HTTPBasicAuth('hackzurich2020', 'uhSyJ08KexKn4ZFS'))\n\n original_product = response.json()\n\n # product name\n print(original_product)\n original_product_name = original_product['name']\n\n # get nutrients\n nutrients = {}\n\n for nutrient in original_product['nutrition_facts']['standard']['nutrients']:\n if nutrient['name'] == 'Energie':\n nutrients['energy'] = nutrient['quantity']\n elif nutrient['name'] == 'davon Zucker':\n nutrients['sugars'] = nutrient['quantity']\n elif nutrient['name'] == 'davon gesättigte Fettsäuren':\n nutrients['saturated_fat'] = nutrient['quantity']\n elif nutrient['name'] == 'Salz':\n nutrients['sodium'] = nutrient['quantity']\n\n original_product_nutri_score = simplified_nutriscore(nutrients)\n\n # origin\n origin_string = '-'.join(original_product['origins'].values())\n\n for country_name in self.country_names_german:\n if country_name.lower() in origin_string.lower():\n location_origin_str = country_name\n continue\n\n location_origin = self.geolocator.geocode(location_origin_str)\n location_ch = self.geolocator.geocode(\"Schweiz\")\n\n original_product_origin_distance = geodesic(\n (location_origin.latitude, location_origin.longitude),\n (location_ch.latitude, location_ch.longitude)\n ).kilometers\n\n # category\n original_product_category = original_product['categories'][0]['code']\n\n # rating\n original_product_rating = original_product['ratings']['average_all']\n\n # price\n original_product_price = original_product['price']['item']['price']\n # original_product_base_price = original_product['price']['base']['price']\n\n # quantity/unit\n original_product_quantity = original_product['price']['item']['quantity']\n original_product_unit = original_product['price']['item']['unit']\n quantitiy_string = original_product['price']['item']['display_quantity']\n original_product_display_quantity = self.parse_quantity(quantitiy_string)\n\n # label available?\n original_product_label = False\n if 'labels' in original_product:\n original_product_label = original_product['labels'][0] in [\"CO2\", \"L02\", \"L03\", \"L04\", \"L05\", \"L06\", \"L07\",\n \"L09\", \"L10\", \"L14\", \"L16\", \"L17\", \"L28\", \"L29\",\n \"L33\", \"L34\", \"L35\", \"L36\", \"L38\", \"L41\", \"L42\",\n \"L43\", \"L44\", \"L45\", \"L46\", \"L55\", \"L56\", \"L57\",\n \"L59\", \"L60\", \"L62\", \"L64\", \"L65\", \"L67\", \"L68\",\n \"L69\", \"L71\", \"TIW\"]\n\n # product picture URL\n original_product_picture_url = original_product['image']['original']\n\n return {\n 'product_id': product_id,\n 'product_name': original_product_name,\n 'origin_distance_km': original_product_origin_distance,\n 'has_label': original_product_label,\n 'product_category': original_product_category,\n 'customer_rating': original_product_rating,\n 'nutri_score': original_product_nutri_score,\n 'price': original_product_price,\n 'picture_url': original_product_picture_url,\n 'quantity': original_product_quantity,\n 'display_quantity': quantitiy_string,\n 'base_quantity': original_product_display_quantity['quantity'],\n 'base_unit': original_product_display_quantity['unit'],\n 'base_price': original_product_price / original_product_display_quantity['quantity'],\n 'unit': original_product_unit,\n }\n\n def get_group_products(self, category_code):\n # TODO: INCREASE LIMIT\n LIMIT = 10\n\n query_category = {\n 'limit': LIMIT,\n 'facets[category][]': category_code\n }\n\n similar_products = requests.get(\n 'https://hackzurich-api.migros.ch/products',\n params=query_category,\n auth=HTTPBasicAuth('hackzurich2020', 'uhSyJ08KexKn4ZFS'))\n\n similar_products = similar_products.json()['products']\n related_product_ids = []\n for similar_product in similar_products:\n related_product_ids.append(similar_product['id'])\n\n return related_product_ids\n\n # param: normalized_base_price, float. A value between 0 and 1\n def value_color(self, value: float):\n if value < 0.3:\n return \"red\"\n elif value >= 0.3 and value < 0.6:\n return \"orange\"\n else:\n return \"green\"\n\n def parse_quantity(self, quantity_string: str):\n if re.match(\"(\\d+) x (\\d+)\\s*(\\w+)\", quantity_string):\n quantity_match = re.search(\"(\\d+) x (\\d+)\\s*(\\w+)\", quantity_string)\n quantity = float(quantity_match.group(1)) * float(quantity_match.group(2))\n return self.map_quantitiy(quantity, quantity_match.group(3))\n elif re.match(\"(\\d+)\\s*(\\w+)\", quantity_string):\n quantity_match = re.search(\"(\\d+)\\s*(\\w+)\", quantity_string)\n return self.map_quantitiy(float(quantity_match.group(1)), quantity_match.group(2))\n else:\n return quantity_string\n\n def map_quantitiy(self, quantity: float, unit: str):\n mapper = {\n 'l': {'quantity': quantity, 'unit': 'l'},\n 'dl': {'quantity': quantity / 10, 'unit': 'l'},\n 'cl': {'quantity': quantity / 100, 'unit': 'l'},\n 'ml': {'quantity': quantity / 1000, 'unit': 'l'},\n 'kg': {'quantity': quantity, 'unit': 'kg'},\n 'g': {'quantity': quantity / 1000, 'unit': 'kg'},\n }\n return mapper[unit]\n","sub_path":"server/routes/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":11486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"97678407","text":"import serial\nimport sys\n\nif len(sys.argv) < 2 :\n print(\"Not enough arguments, please execute like this:\")\n print(\"python3 mp_reset.py \")\n exit\nelse:\n port = sys.argv[1]\n ser = serial.serial_for_url(port, 115200, rtscts = False, dsrdtr = False, do_not_open=True)\n ser.rts = 0\n ser.dtr = 0\n ser.open()\n ser.close()","sub_path":"mp_reset.py","file_name":"mp_reset.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36193903","text":"from imgurpython import ImgurClient\nfrom classes.Classes import *\n\nclass imgurAPI:\n\n def __init__(self):\n self.imgur = ImgurClient(client_id=\"d77a3439b35d2ea\", client_secret=\"b5ae47b0c54923367978870f8a945f6e09e82dec\")\n\n def getGallery(self, search):\n\n gallery = []\n pictureGallery = self.imgur.gallery_tag(search)\n\n for i in range(1, pictureGallery.total_items):\n if(len(gallery)>=3):\n break\n if pictureGallery.items[i].nsfw:\n continue\n l = Image(pictureGallery.items[i].id, pictureGallery.items[i].link)\n if not l.url[-4:] == \".jpg\":\n continue\n gallery.append(l)\n\n return gallery\n","sub_path":"apis/Imgurapi.py","file_name":"Imgurapi.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"379413183","text":"#!/usr/bin/env python3\n\"\"\" doc \"\"\"\n\nimport tensorflow as tf\n\n\ndef l2_reg_create_layer(prev, n, activation, lambtha):\n \"\"\" doc \"\"\"\n initialize = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n regulize = tf.contrib.layers.l2_regularizer(lambtha)\n layer = tf.layers.Dense(units=n, kernel_initializer=initialize,\n kernel_regularizer=regulize,\n activation=activation, name=\"layer\")\n return layer(prev)\n","sub_path":"supervised_learning/0x05-regularization/3-l2_reg_create_layer.py","file_name":"3-l2_reg_create_layer.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"494751257","text":"from urllib import urlencode\nimport requests\n\nfrom django.conf import settings\n\n\ndef get_latitude_and_longitude(address, http_client=None):\n http_client = http_client or requests\n\n key = settings.GOOGLE_MAPS_API_KEY\n\n params = {\n \"address\": \"{} ontario\".format(address), # HACK\n \"key\": key,\n }\n try:\n response = http_client.get(settings.GOOGLE_MAPS_API_URL_BASE, params=params).json()\n except requests.exceptions.ConnectionError:\n raise\n\n try:\n results = response[\"results\"][0][\"geometry\"][\"location\"]\n except KeyError:\n return 0, 0\n return results[\"lat\"], results[\"lng\"]\n\n\ndef get_google_maps_link(address, postal_code):\n query = urlencode({\n \"q\": \"{} {}\".format(address, postal_code),\n })\n\n return settings.GOOGLE_MAPS_WEB_URL_BASE + \"?\" + query\n","sub_path":"geo/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"265088890","text":"import collections\n\nfrom Wheel import *\n\n\nclass OutcomeStatistics(object):\n # Statistics of a list of numbers. Example is: if we have two numbers 0, 32\n # and 15. mean(0,32,15) = 32 because on the wheel, they are sorted. Also we\n # can compute the variance. If all the numbers are very close one another,\n # the variance will be low and the prediction be accurate.\n @staticmethod\n def create(outcome_numbers):\n counter = collections.Counter(outcome_numbers)\n\n # Reduce the Residuals Sum of Squares (RSS).\n rss_list = np.zeros(len(Wheel.NUMBERS))\n for idx_mean in range(len(Wheel.NUMBERS)):\n rss = 0.0\n for outcome in outcome_numbers:\n rss += Wheel.distance_between_numbers(outcome, Wheel.NUMBERS[idx_mean]) ** 2\n rss_list[idx_mean] = rss\n mean_number = Wheel.NUMBERS[rss_list.argmin()]\n\n variance = 0.0\n for outcome in outcome_numbers:\n variance += Wheel.distance_between_numbers(outcome, mean_number) ** 2\n\n most_probable_number = counter.most_common()\n\n if most_probable_number is None:\n raise (Exception('Most probable number should not be null'))\n\n return {'mean_number': mean_number,\n 'std_deviation': np.sqrt(variance),\n 'most_common': most_probable_number}\n","sub_path":"computations/comp_utils/OutcomeStatistics.py","file_name":"OutcomeStatistics.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"393791117","text":"# logging.py\n# implements Server Logger class\nimport os\nfrom datetime import datetime\n\nclass Logger:\n def __init__(self, caller):\n self.caller = caller.replace(\".\", \"_\").replace(\":\", \"__\")\n self.cache = []\n self.maxCacheSize = 100\n self.defaultFolder = \".log_entry\"\n if not os.path.exists(self.defaultFolder):\n os.makedirs(self.defaultFolder)\n\n def info(self, message):\n \"\"\"\n Log information level message\n \"\"\"\n logMessage = \"[{} {} {}] {}\".format(\"INFO\", datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"), self.caller, message)\n print(logMessage)\n self.cache.append(logMessage)\n self._update()\n\n def warn(self, message):\n \"\"\"\n Log warning level message\n \"\"\"\n #print(\"[{} {} {}] {}\".format(\"WARN\", datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"), self.caller, message))\n warnMessage = \"[{} {} {}] {}\".format(\"WARN\", datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"), self.caller, message)\n print(warnMessage)\n self.cache.append(warnMessage)\n self._update()\n\n def error(self, message):\n \"\"\"\n Log error level message\n \"\"\"\n #print(\"[{} {} {}] {}\".format(\"ERROR\", datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"), self.caller, message))\n errorMessage = \"[{} {} {}] {}\".format(\"ERROR\", datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"), self.caller, message)\n print(errorMessage)\n self.cache.append(errorMessage)\n self._update()\n\n def close(self):\n \"\"\"\n Close logger by saving cached message to log file\n \"\"\"\n self._save()\n\n def _update(self):\n \"\"\"\n Update cache, check cache if exceeds the default value, if yes save it to local space.\n \"\"\"\n if len(self.cache) > self.maxCacheSize:\n self._save()\n self.cache = []\n\n def _save(self):\n \"\"\"\n Save log message to local space\n \"\"\"\n if len(self.cache) <= 0: return\n with open(os.path.join(self.defaultFolder, \"{}.log\".format(self.caller)), \"a\") as logFile:\n for message in self.cache:\n print(message, file=logFile)","sub_path":"Pr0j3ct/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"317163536","text":"# coding:utf-8\nimport json\nimport requests\n\nparams = {\"version\": \"1\", \"city\": \"부산\", \"county\": \"수영구\", \"village\": \"광안동\"}\nheaders = {\"appKey\": \"1a9892f6-585f-3520-b651-9e7ee4472831\"}\nr = requests.get(\"http://apis.skplanetx.com/weather/current/hourly\", params=params, headers=headers)\n\ndata = json.loads(r.text)\nweather = data[\"weather\"][\"hourly\"]\ncTime = weather[0][\"timeRelease\"]\ncSky = weather[0][\"sky\"][\"name\"]\ncWind = weather[0][\"wind\"][\"wspd\"]\ncTemp = weather[0][\"temperature\"][\"tc\"]\n\ncWeather = \"오늘의 날씨\"+ cTime+\" 기즌 하늘은 \"+cSky+\"이고 풍속은 \"+cWind+\"이고 기온은 \"+cTemp+\"입니다.\"\nprint(cWeather)\n#print(r.json())\n\n\n# json 인코딩\njsonString = json.dumps(data)\nprint(jsonString)\nprint(type(jsonString))\n\n# json 디코딩\ndict = json.loads(jsonString)\nprint(dict)\n","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"539169131","text":"\n# a função atualiza a lista sem retornar valores;\n# ela modifica a instância diretamente\ndef mergeSort(lista: list):\n # enquanto a lista tiver, no mínimo\n # dois elementos\n if len(lista) > 1:\n # separa a lista em duas partes\n esquerda, direita = divide(lista)\n\n # executa o mesmo processo de ordenação,\n # chamando de maneira recursiva a função\n mergeSort(direita)\n mergeSort(esquerda)\n # contadores das listas esquerda,\n # direita e lista original, respectivamente\n i = j = k = 0\n \n # enquanto os dois contadores forem menores\n # que o tamanho das listas esquerda e direita,\n # altera o valor da posição da lista original\n # pelo maior valor da comparação entre o elemento\n # da direita e o da esquerda. Incrementa os contadores\n while i < len(esquerda) and j < len(direita):\n if esquerda[i] < direita[j]:\n lista[k] = esquerda[i]\n i += 1\n else:\n lista[k] = direita[j]\n j += 1\n k += 1\n \n # IMPORTANTE: se qualquer um dos\n # contadores se tornar maior que\n # o tamanho da respectiva lista,\n # o loop terminará. É por isso \n # que adicionamos o restante da\n # na original (caso ainda haja)\n while i < len(esquerda):\n lista[k] = esquerda[i]\n i += 1\n k += 1\n while j < len(direita):\n lista[k] = direita[j]\n j += 1\n k += 1\n\n# função ajudante, toma uma lista\n# e divide em duas pela metade\ndef divide(lista: list):\n metade = len(lista) // 2\n parte_A = lista[:metade]\n parte_B = lista[metade:]\n\n return parte_A, parte_B\n\n\n# Código de teste\nif __name__ == \"__main__\": \n lista = [3, 4, 2, 1, 7, 5, 8, 9, 0, 6]\n mergeSort(lista)\n print(lista) # Output: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n","sub_path":"all/070-Merge-Sort.py","file_name":"070-Merge-Sort.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"29026540","text":"class Solution:\n def countSubstrings(self, s: str) -> int:\n L = len(s)\n cnt = 0\n for center in range(L):\n left = right = center\n while left >= 0 and right < L and s[left] == s[right]:\n cnt += 1\n left -= 1\n right += 1\n for left in range(L - 1):\n right = left + 1\n while left >= 0 and right < L and s[left] == s[right]:\n cnt += 1\n left -= 1\n right += 1\n return cnt","sub_path":"Week_06/palindromic-substrings.py","file_name":"palindromic-substrings.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"498573191","text":"import smtplib\nimport ssl\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\nimport logging\n\nport = 465 # for SSL\nsmtp_server = \"smtp.gmail.com\"\nsender_email = \"bpgappdev@gmail.com\"\nsender_password = \"EnterYourOwn\"\nlacey_email = \"EnterYourOwn\"\ntest_email = \"EnterYourOwn\"\nreceiver_email = lacey_email\n\nmessage = MIMEMultipart(\"alternative\")\nmessage[\"Subject\"] = \"HURRY, A PLANT IS IN STOCK!\"\nmessage[\"From\"] = sender_email\nmessage[\"To\"] = receiver_email\nmessage[\"X-Priority\"] = \"2\"\n\n\nthreeIncherInStock = \"\"\"\\\nHi,\nView the 3\" Philodendron Rio here: \nhttps://www.gabriellaplants.com/collections/philodendron/products/3-philodendron-rio?_pos=1&_sid=5f26d33de&_ss=r \nBetter hurry, these suckers are flying off the shelf!\n\"\"\"\nthreeIncherInStockHtml = \"\"\"\\\n\n \n

    Hi,
    \n View the 3\" Philodendron Rio Here
    \n Better hurry, these suckers are flying off the shelf!\n

    \n \n\n\"\"\"\nfourIncherInStock = \"\"\"\\\nHi,\nView the 4\" Philodendron Rio here: \nhttps://www.gabriellaplants.com/collections/philodendron/products/rio-philodendron-4-original-consistent-collectors-version-of-brasil-philodendron-silver-variegation?_pos=2&_sid=5f26d33de&_ss=r \nBetter hurry, these suckers are flying off the shelf!\n\"\"\"\nfourIncherInStockHtml = \"\"\"\\\n\n \n

    Hi,
    \n View the 4\" Philodendron Rio Here
    \n Better hurry, these suckers are flying off the shelf!\n

    \n \n\n\"\"\"\n\npart1 = MIMEText(threeIncherInStock, \"plain\")\npart2 = MIMEText(threeIncherInStockHtml, \"html\")\npart3 = MIMEText(fourIncherInStock, \"plain\")\npart4 = MIMEText(fourIncherInStockHtml, \"html\")\n\n\ndef threeinchstockcheck():\n data = requests.get('https://www.gabriellaplants.com/collections/philodendron/products/3-philodendron-rio?_pos=1&_sid=2efee07e7&_ss=r')\n soup = BeautifulSoup(data.text, 'html.parser')\n for script in soup.find_all('script', {'id': 'stockify-json-data'}):\n strippedstring = repr(script.string)\n indexof = strippedstring.find('\"available\":')\n print(\"The availability of the 3 incher has status: \" + strippedstring[indexof+12:indexof+17])\n if strippedstring[indexof + 12:indexof + 17] != \"false\":\n message.attach(part1)\n message.attach(part2)\n return 1\n\n\ndef fourinchstockcheck():\n data2 = requests.get('https://www.gabriellaplants.com/collections/philodendron/products/rio-philodendron-4-original-consistent-collectors-version-of-brasil-philodendron-silver-variegation?_pos=2&_sid=5f26d33de&_ss=r')\n soup2 = BeautifulSoup(data2.text, 'html.parser')\n script = soup2.find('span', {'class': 'available value'})\n scriptiwant = script.find_next('span', {'class': 'js_in_stock'})\n print(scriptiwant.string)\n # scriptiwant2 = scriptiwant.find_next('script', {'type': 'application/ld+json'})\n if scriptiwant.string == 'none':\n print('test')\n # strippedstring2 = repr(scriptiwant.string)\n # indexof2 = strippedstring2.find('\"availability\" :')\n # print(\"The availability of the 4 incher has status: \" + strippedstring2[indexof2+36:indexof2+46])\n # if strippedstring2[indexof2 + 18:indexof2+46] != \"http://schema.org/OutOfStock\":\n # message.attach(part3)\n # message.attach(part4)\n # return 1\n\n\ndef main():\n sentfirst = False\n sentsecond = False\n while True:\n if threeinchstockcheck() == 1:\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, sender_password)\n server.sendmail(sender_email, receiver_email, message.as_string())\n sentfirst = True\n if sentsecond:\n break\n if fourinchstockcheck() == 1:\n server.sendmail(sender_email, receiver_email, message.as_string())\n break\n if fourinchstockcheck() == 1:\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, sender_password)\n server.sendmail(sender_email, receiver_email, message.as_string())\n sentsecond = True\n if sentfirst:\n break\n if threeinchstockcheck() == 1:\n server.sendmail(sender_email, receiver_email, message.as_string())\n break\n time.sleep(60)\n\n\nmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"194071481","text":"from django.shortcuts import render, redirect\r\nfrom django.views.generic.base import ContextMixin, TemplateView\r\nfrom django.views.generic.list import ListView\r\nfrom generic.mixins import CaregoryListMixin, PageNumberMixin\r\nfrom goods.models import Good, GoodImage\r\nfrom categories.models import Category\r\nfrom generic.controllers import PageNumberView\r\nfrom django.views.generic.detail import DetailView\r\nfrom django.forms.models import inlineformset_factory\r\nfrom goods.forms import GoodForm\r\nfrom django.contrib import messages\r\nfrom django.urls import reverse\r\nfrom django.views.generic.edit import DeleteView\r\n\r\n# Create your views here.\r\nclass SortMixin(ContextMixin):\r\n sort = \"0\"\r\n order = \"A\"\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super(SortMixin, self).get_context_data(**kwargs)\r\n context[\"sort\"] = self.sort\r\n context[\"order\"] = self.order\r\n return context\r\n\r\nclass GoodListView(PageNumberView, ListView, SortMixin, CaregoryListMixin):\r\n template_name = \"goods_index.html\"\r\n model = Good\r\n paginate_by = 10\r\n cat = None\r\n\r\n def get(self, request, *args, **kwargs):\r\n if self.kwargs[\"pk\"] == None:\r\n self.cat = Category.objects.first()\r\n else:\r\n self.cat = Category.objects.get(pk = self.kwargs[\"pk\"])\r\n return super(GoodListView, self).get(request, *args, **kwargs)\r\n \r\n def get_context_data(self, **kwargs):\r\n context = super(GoodListView, self).get_context_data(**kwargs)\r\n context[\"category\"] = self.cat\r\n return context\r\n\r\n def get_queryset(self):\r\n goods = Good.objects.filter(category=self.cat)\r\n if self.sort == \"2\":\r\n if self.order == \"D\":\r\n goods = goods.order_by(\"-in_stock\", \"name\")\r\n else:\r\n goods = goods.order_by(\"in_stock\", \"name\")\r\n elif self.sort == \"1\":\r\n if self.order == \"D\":\r\n goods = goods.order_by(\"-price\", \"name\")\r\n else:\r\n goods = goods.order_by(\"price\", \"name\") \r\n else:\r\n if self.order == \"D\":\r\n goods = goods.order_by(\"-name\")\r\n else:\r\n goods = goods.order_by(\"name\")\r\n\r\nclass GoodDetailView(PageNumberView, DetailView, SortMixin, PageNumberMixin):\r\n model = Good\r\n template_name = \"good.html\"\r\n \r\n\r\nGoodImagesFormset = inlineformset_factory(Good, GoodImage, can_order=True, fields='__all__')\r\n\r\nclass GoodCreate(PageNumberView, TemplateView, SortMixin, PageNumberMixin):\r\n template_name = \"good_add.html\"\r\n cat = None\r\n form = None\r\n formset = None\r\n\r\n def get(self, request, *args, **kwargs):\r\n if self.kwargs[\"pk\"] == None:\r\n self.cat = Category.objects.first()\r\n else:\r\n self.cat = Category.objects.get(pk = self.kwargs[\"pk\"])\r\n self.form = GoodForm(initial= {\"category\": self.cat})\r\n self.formset = GoodImagesFormset()\r\n return super(GoodCreate, self).get(request, *args, **kwargs)\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super(GoodCreate, self).get_context_data(**kwargs)\r\n context[\"category\"] = self.cat\r\n context[\"form\"] = self.form\r\n context[\"formset\"] = self.formset\r\n return context\r\n\r\n def post(self, request, *args, **kwargs):\r\n self.form = GoodForm(request.POST, request.FILES)\r\n if self.form.is_valid():\r\n new_good = self.form.save()\r\n self.formset = GoodImagesFormset(request.POST, request.FILES, instance=new_good)\r\n \r\n if self.formset.is_valid():\r\n self.formset.save()\r\n messages.add_message(request, messages.SUCCESS, \"A good was added\")\r\n return redirect(reverse(\"goods_index\", kwargs = {\"pk\": new_good.category.pk}) + \"?page=\" +\r\n self.request.GET[\"page\"] + \"&sort\" + self.request.GET[\"sort\"] + \"&order\" + self.request.GET[\"order\"])\r\n \r\n if self.kwargs['pk'] == None:\r\n self.cat = Category.objects.first()\r\n else:\r\n self.cat = Category.objects.get(pk = self.kwargs[\"pk\"])\r\n \r\n self.formset = GoodImagesFormset(request.POST, request.FILES)\r\n return super(GoodCreate, self).get(request, *args, **kwargs)\r\n\r\nclass GoodUpdate(PageNumberView, TemplateView, SortMixin, PageNumberMixin):\r\n good = None\r\n template_name= \"good_edit.html\"\r\n form = None\r\n formset = None\r\n\r\n def get(self, request, *args, **kwargs):\r\n self.good = Good.objects.get(pk = self.kwargs[\"pk\"])\r\n self.form = GoodForm(instance = self.good)\r\n self.formset = GoodImagesFormset(instance=self.good)\r\n return super(GoodUpdate, self).get(request, *args, **kwargs)\r\n \r\n def get_context_data(self, **kwargs):\r\n context = super(GoodUpdate, self).get_context_data(**kwargs)\r\n context[\"good\"] = self.good\r\n context[\"form\"] = self.form\r\n context[\"formset\"] = self.formset\r\n return context\r\n \r\n def post(self, request, *args, **kwargs):\r\n self.good = Good.objects.get(pk = self.kwargs[\"pk\"])\r\n self.form = GoodForm(request.POST, request.FILES, instance=self.good)\r\n self.formset = GoodImagesFormset(request.POST, request.FILES, instance=self.good)\r\n\r\n if self.form.is_valid():\r\n self.form.save()\r\n if self.formset.is_valid():\r\n self.formset.save()\r\n messages.add_message(request, messages.SUCCESS, \"Good was changed\")\r\n return redirect(reverse(\"goods_index\", kwargs = {\"pk\": self.good.category.pk}) + \"?page=\" +\r\n self.request.GET[\"page\"] + \"&sort\" + self.request.GET[\"sort\"] + \"&order\" + self.request.GET[\"order\"])\r\n\r\n return super(GoodUpdate, self).get(request, *args, **kwargs)\r\n\r\nclass GoodDelete(PageNumberView, DetailView, SortMixin, PageNumberMixin):\r\n template_name = \"good_delete.html\"\r\n model = Good\r\n\r\n def post(self, request, *args, **kwargs):\r\n self.success_url = reverse(\"goods_index\", \r\n kwargs = {\r\n \"pk\": Good.objects.get(\r\n pk = kwargs[\"pk\"]\r\n ).category.pk}) + \"?page=\" + self.request.GET[\"page\"] + \"&sort\" + self.request.GET[\"sort\"] + \"&order\" + self.request.GET[\"order\"]\r\n messages.add_message(request, messages.SUCCESS, \"Good was deleted\")\r\n return super(GoodDelete, self).post(request, *args, **kwargs)\r\n\r\n\r\n \r\n\r\n \r\n","sub_path":"goods/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"88602119","text":"#!/usr/bin/env python\n\nimport os\n\nreadme = \"\"\"### LeetCode Solutions:\n\n\"\"\"\n\ndef create_leetcode(filename):\n webpage_name = []\n for i in filename.split()[1:]:\n webpage_name.append(i.lower())\n\n return \"https://leetcode.com/problems/\" + \"-\".join(webpage_name)\n\ndef create_github(filename):\n prefix = \"https://github.com/Kevin-Fang/leetcode-solutions/blob/master/\"\n prefix += \"%20\".join(filename.split())\n return prefix\n\nfilenames = []\nfor filename in os.listdir(\"./\"):\n if (filename.endswith(\".py\") or filename.endswith(\".cpp\")) and filename != \"make_links.py\":\n filenames.append(filename)\n\nfilenames = sorted(filenames, key=lambda x: int(x.split(\".\")[0]))\nreadme += \"Total problems solved: {}\\n\\n\".format(len(filenames))\nfor filename in filenames:\n no_ext_fn = \"\".join(filename.split(\".\")[0:2])\n link = create_leetcode(no_ext_fn)\n readme += \"* [{filename}]({leetcode}): [Solution]({filename_link})\\n\\n\".format(leetcode=link, filename=no_ext_fn, filename_link=create_github(filename))\n\nwith open(\"README.md\", \"w\") as f:\n f.write(readme)\n","sub_path":"make_links.py","file_name":"make_links.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"278222480","text":"# coding=utf-8\nimport timeit\nimport unittest\n\n\nclass TestPerformance(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_single_threaded_unlimited(self):\n setup = \"\"\"\nimport ziniurrss.main\nimport logging\n\nziniurrss.main.log.setLevel(logging.ERROR)\nziniurrss.parallel.log.setLevel(logging.ERROR)\n\ndef overriden_parse_episode(*args):\n return \"episode\"\n\nziniurrss.main.parse_episode = overriden_parse_episode\n\npage = \\\"\\\"\\\"
    \\\"\\\"\\\" * 100\n\n\"\"\"\n\n iteration_count = 1000\n\n # unsynchronized\n serial_unlimited_result = timeit.timeit(\n \"\"\"ziniurrss.main.parse_episodes(page, limit=None, use_cache=False)\"\"\",\n setup=setup, number=iteration_count)\n single_threaded_parallel_unlimited_result = timeit.timeit(\n \"\"\"ziniurrss.parallel.parse_episodes_parallel_limited(page, limit=None, use_cache=False, threads=1)\"\"\",\n setup=setup, number=iteration_count)\n single_threaded_parallel_unlimited_unsynch_result = timeit.timeit(\n \"\"\"ziniurrss.parallel.parse_episodes_parallel(page, limit=100, use_cache=False, threads=1)\"\"\",\n setup=setup, number=iteration_count)\n serial_limited_result = timeit.timeit(\n \"\"\"ziniurrss.main.parse_episodes(page, limit=100, use_cache=False)\"\"\",\n setup=setup, number=iteration_count)\n # syncronization overhead\n single_threaded_parallel_limited_result = timeit.timeit(\n \"\"\"ziniurrss.parallel.parse_episodes_parallel_limited(page, limit=None, use_cache=False, threads=1)\"\"\",\n setup=setup, number=iteration_count)\n\n print(\"Serial, unlimited in [%s]\" % serial_unlimited_result)\n print(\"Parallel, single thread, unlimited in [%s]\" % single_threaded_parallel_unlimited_result)\n print(\n \"Parallel, single thread, unlimited, unsynced in [%s]\" % single_threaded_parallel_unlimited_unsynch_result)\n print(\"Serial, limited in [%s]\" % serial_limited_result)\n print(\"Parallel, single thread, limited in [%s]\" % single_threaded_parallel_limited_result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/ziniurrss/test_perf.py","file_name":"test_perf.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"405146876","text":"import os\nos.environ[\"THEANO_FLAGS\"] = \"device=gpu\"\nfrom sklearn.base import BaseEstimator\nfrom lasagne import layers, nonlinearities\nfrom lasagne.updates import nesterov_momentum\nfrom nolearn.lasagne import NeuralNet, BatchIterator\nimport numpy as np\nfrom nolearn.lasagne.handlers import EarlyStopping\nimport skimage.color\nimport skimage.transform\n\nw = 8\n\nclass MyBatchIterator(BatchIterator):\n def transform(self, X, y):\n X = X.transpose((0, 2, 3, 1))\n\n X_rot, y_rot = self.rotate(X, y)\n X = np.append(X, X_rot, axis=0)\n y = np.append(y, y_rot, axis=0)\n\n X_flip, y_flip = self.flip(X, y)\n X = np.append(X, X_flip, axis=0)\n y = np.append(y, y_flip, axis=0)\n\n X_trans, y_trans = self.translate(X, y, w)\n\n X_crop = np.zeros((X.shape[0], 64-w, 64-w, 3))\n for i in np.arange(X.shape[0]):\n X_crop[i] = skimage.transform.resize(X[i], (64-w, 64-w))\n\n X = np.append(X_crop, X_trans, axis=0)\n y = np.append(y, y_trans, axis=0)\n\n X = X.astype(np.float32)\n X = X.transpose((0, 3, 1, 2))\n return X, y\n\n def translate(self, X, y, w):\n X_trans = np.zeros((X.shape[0], 64-w, 64-w, 3))\n for i in np.arange(X.shape[0]):\n trans_x, trans_y = np.random.choice(w, 2)\n X_trans[i] = X[i, trans_x:trans_x+64-w, trans_y:trans_y+64-w, :]\n return X_trans, y\n\n def rotate(self, X, y):\n X_rot = np.zeros_like(X)\n for i in np.arange(X.shape[0]):\n img_rot, label_rot = self.rotateOne(X[i], y[i])\n X_rot[i] = img_rot\n return X_rot, y\n\n def rotateOne(self, img, label):\n angle = np.random.choice(360)\n img_rot = skimage.transform.rotate(img, angle, mode='reflect')\n return img_rot, label\n\n def flip(self, X, y):\n X1 = X[:, ::-1, :, :]\n X2 = X[:, :, ::-1, :]\n X3 = X[:, ::-1, ::-1, :]\n return np.concatenate((X1, X2, X3)), np.concatenate((y, y, y))\n\n\ndef build_model(hyper_parameters):\n net = NeuralNet(\n layers=[\n ('input', layers.InputLayer),\n ('conv1', layers.Conv2DLayer),\n ('pool1', layers.MaxPool2DLayer),\n ('conv2', layers.Conv2DLayer),\n ('pool2', layers.MaxPool2DLayer),\n ('conv3', layers.Conv2DLayer),\n ('pool3', layers.MaxPool2DLayer),\n ('hidden5', layers.DenseLayer),\n ('dropout5', layers.DropoutLayer),\n ('hidden6', layers.DenseLayer),\n ('dropout6', layers.DropoutLayer),\n ('output', layers.DenseLayer),\n ],\n input_shape=(None, 3, 64-w, 64-w),\n use_label_encoder=True,\n verbose=1,\n **hyper_parameters\n )\n return net\n\nhyper_parameters = dict(\n conv1_num_filters=32, conv1_filter_size=(3, 3),\n pool1_pool_size=(2, 2),\n\n conv2_num_filters=64, conv2_filter_size=(3, 3),\n pool2_pool_size=(2, 2),\n\n conv3_num_filters=128, conv3_filter_size=(3, 3),\n pool3_pool_size=(1, 1),\n\n hidden5_num_units=200,\n hidden6_num_units=200,\n\n output_num_units=18, output_nonlinearity=nonlinearities.softmax,\n\n update_learning_rate=0.01,\n update=nesterov_momentum,\n max_epochs=30,\n on_epoch_finished = [\n EarlyStopping(patience=20, criterion='valid_accuracy', criterion_smaller_is_better=False)\n ],\n batch_iterator_train=MyBatchIterator(batch_size=100),\n)\n\n\nclass Classifier(BaseEstimator):\n\n def __init__(self):\n self.net = build_model(hyper_parameters)\n\n def preprocess(self, X):\n X = (X / 255.)\n X = X.astype(np.float32)\n X = X.transpose((0, 3, 1, 2))\n return X\n\n def fit(self, X, y):\n X = self.preprocess(X)\n self.net.fit(X, y)\n return self\n\n def predict(self, X):\n X = self.preprocess(X)\n return self.net.predict(X)\n\n def predict_proba(self, X):\n X = self.preprocess(X)\n return self.net.predict_proba(X)\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"223717251","text":"#-*-coding=utf-8-*-\n\nfrom scrapy import log\nfrom scrapy.exceptions import CloseSpider\nfrom utils import _default_redis, _default_req_count, _default_tk_alive, \\\n one_valid_token\n\nclass InvalidTokenError(Exception):\n \"\"\"token过期或不合法\"\"\"\n def __init__(self, value=None):\n self.value = value\n\n def __str__(self):\n if self.value:\n return repr(self.value)\n else:\n return 'InvalidTokenError'\n\nclass UnknownResponseError(Exception):\n \"\"\"未处理的错误\"\"\"\n def __init__(self, value=None):\n self.value = value\n\n def __str__(self):\n if self.value:\n return repr(self.value)\n else:\n return 'UnknownResponseError'\n\nclass UserDoesNotExistError(Exception):\n \"\"\"用户信息不存在\"\"\"\n def __init__(self, value=None):\n self.value = value\n\n def __str__(self):\n if self.value:\n return repr(self.value)\n else:\n return 'UserDoesNotExistError'\n\nclass ShouldNotEmptyError(Exception):\n \"\"\"返回不应该为空,但是为空了,在spider里抛出\"\"\"\n def __init__(self, value=None):\n self.value = value\n\n def __str__(self):\n if self.value:\n return repr(self.value)\n else:\n return 'ShouldNotEmptyError'\n\nclass RetryErrorResponseMiddleware(object):\n def __init__(self, retry_times):\n self.retry_times = retry_times\n\n @classmethod\n def from_crawler(cls, crawler):\n settings = crawler.settings\n retry_times = settings.get('RETRY_TIMES', 2)\n return cls(retry_times)\n\n def _retry(self, request, reason, spider):\n retries = request.meta.get('retry_times', 0) + 1\n if retries <= self.retry_times:\n log.msg(format=\"Retrying %(request)s (failed %(retries)d times): %(reason)s\",\n level=log.WARNING, spider=spider, request=request, retries=retries, reason=reason)\n retryreq = request.copy()\n retryreq.meta['retry_times'] = retries\n retryreq.dont_filter = True\n return retryreq\n else:\n log.msg(format=\"Gave up retrying %(request)s (failed %(retries)d times): %(reason)s\",\n level=log.ERROR, spider=spider, request=request, retries=retries, reason=reason)\n\n def process_spider_exception(self, response, exception, spider):\n if 'dont_retry' not in response.request.meta and \\\n isinstance(exception, InvalidTokenError) or isinstance(exception, UnknownResponseError) \\\n or isinstance(exception, ShouldNotEmptyError) or isinstance(exception, PerUserPerAppLimitError):\n return [self._retry(response.request, exception, spider)]\n\n if isinstance(exception, UserDoesNotExistError):\n # UserDoesNotExistError放弃重试\n pass\n\nclass RequestTokenMiddleware(object):\n def __init__(self, host, port, api_key, per_token_hours_limit, per_ip_hours_limit, buffer_size):\n r = _default_redis(host, port)\n self.req_count = _default_req_count(r, api_key=api_key)\n self.tk_alive = _default_tk_alive(r, api_key=api_key)\n self.per_token_hours_limit = per_token_hours_limit\n self.per_ip_hours_limit = per_ip_hours_limit\n self.buffer_size = buffer_size\n\n @classmethod\n def from_crawler(cls, crawler):\n settings = crawler.settings\n host = settings.get('REDIS_HOST')\n port = settings.get('REDIS_PORT')\n api_key = settings.get('API_KEY')\n per_token_hours_limit = settings.get('PER_TOKEN_HOURS_LIMIT')\n per_ip_hours_limit = settings.get('PER_IP_HOURS_LIMIT')\n buffer_size = settings.get('BUFFER_SIZE')\n return cls(host, port, api_key, per_token_hours_limit, per_ip_hours_limit, buffer_size)\n\n def process_request(self, request, spider):\n token_and_used = one_valid_token(self.req_count, self.tk_alive)\n if token_and_used is None:\n log.msg(format='No token alive',\n level=log.INFO, spider=spider)\n\n raise CloseSpider('No Token Alive')\n token, used = token_and_used\n\n if used > self.per_token_hours_limit - self.buffer_size:\n calibration(self.req_count, self.tk_alive, self.per_token_hours_limit)\n token, _ = one_valid_token(self.req_count, self.tk_alive)\n tk_status = token_status(token)\n reset_time_in, remaining = tk_status\n if remaining < BUFFER_SIZE:\n log.msg(format='REACH API REQUEST BUFFER, SLEEP %(reset_time_in)s SECONDS',\n level=log.WARNING, spider=spider, reset_time_in=reset_time_in)\n\n time.sleep(reset_time_in)\n\n log.msg(format='Request token: %(token)s used: %(used)s',\n level=log.INFO, spider=spider, token=token, used=used)\n request.headers['Authorization'] = 'OAuth2 %s' % token\n","sub_path":"scrapy_weibo/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"456471154","text":"from database_singleton import Singleton\nfrom flask import request, jsonify\nfrom project.api.models import Pax\nfrom operator import itemgetter\n\ndb = Singleton().database_connection()\n\n\nclass Utils:\n def createFailMessage(self, message):\n response_object = {\n 'status': 'fail',\n 'message': '{}'.format(message)\n }\n return response_object\n\n def createSuccessMessage(self, message):\n response_object = {\n 'status': 'success',\n 'message': '{}'.format(message)\n }\n return response_object\n\n def createSuccessGet(self, content):\n response_object = {\n 'status': 'success',\n 'data': {\n 'pax': [pax.to_json() for pax in content]\n }\n }\n return response_object\n\n def commit_to_database(self, type, model):\n if (type == 'A'):\n db.session.add(model)\n elif (type == 'M'):\n db.session.merge(model)\n\n db.session.flush()\n db.session.commit()\n\n def filter_by_status(self, value, user_type, id):\n if user_type == 'provider':\n pax = Pax.query.filter_by(\n status=value, provider_id=int(id)).all()\n elif user_type == 'user':\n pax = Pax.query.filter_by(\n status=value, user_id=int(id)).all()\n return pax\n\n def sqlalchemyobj_to_list(self, data):\n final_list = []\n for item in data:\n final_list.append(item.to_json())\n return final_list\n\n def ignore_empty_status(self, data) -> list:\n query = self.sqlalchemyobj_to_list(data)\n filtered_pax = []\n for pax in query:\n if pax['status'] != '':\n filtered_pax.append(pax)\n return filtered_pax\n\n\n def reverse_alphabetical_order(self, data: list) -> list:\n reverse_alphabetical_pax = sorted(data, key=itemgetter('status'), reverse=True)\n return reverse_alphabetical_pax\n","sub_path":"project/api/utils/creation_utils.py","file_name":"creation_utils.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"136524393","text":"import os\nimport re\nfrom config import Config\nfrom PIL import Image\nimport PIL.ImageOps\n\n\ndef delete_div(string_list, html):\n html = html.replace('', 'div>')\n for string in string_list:\n start = html.find(string)\n if start == -1:\n continue\n num = 1\n now = start + 1\n while num != 0:\n if html[now:now+4] == \"\":\n num -= 1\n if num == 0:\n break\n now += 1\n html = html.replace(html[start:now+6], '')\n return html\n\n\ndef find_img_url(html):\n start = 0\n url_dict = dict()\n while True:\n start += 1\n start = html.find(\"//images.csmonitor.com/\", start)\n if start == -1:\n break\n end = html.find('\"', start)\n url_dict[html[start:end].split(' ')[0]] = ''\n return url_dict\n\n\ndef invert_png(png):\n image = Image.open(png)\n if image.mode == 'RGBA':\n r,g,b,a = image.split()\n rgb_image = Image.merge('RGB', (r,g,b))\n inverted_image = PIL.ImageOps.invert(rgb_image)\n r2,g2,b2 = inverted_image.split()\n final_transparent_image = Image.merge('RGBA', (r2,g2,b2,a))\n final_transparent_image.save(png)\n\n\nscript_str = ''''''\ncookie_str = '''
    \n\t
    '\ntools_menu = '''
    \n\t\t\t\t\t
      '''\nezve_inbody = '
      '\nfooter_1 = '